From 6d3f74a01c64f28b12a5322dc268a12950393c7e Mon Sep 17 00:00:00 2001 From: Kevin Heis Date: Thu, 9 Oct 2025 09:19:30 -0700 Subject: [PATCH 1/4] Migrate 6 files from JavaScript to TypeScript (#57885) --- ...-processing.js => spotlight-processing.ts} | 58 ++++++++---- .../middleware/context/whats-new-changelog.ts | 2 +- .../lib/process-learning-tracks.ts | 2 +- ...roduct-groups.js => get-product-groups.ts} | 90 +++++++++++++----- .../middleware/secret-scanning.ts | 2 +- ...{versions-schema.js => versions-schema.ts} | 17 +++- ...eases.js => enterprise-server-releases.ts} | 46 ++++++--- ...versions.js => get-applicable-versions.ts} | 93 ++++++++++++------- ...hort-versions.js => use-short-versions.ts} | 48 +++++++--- 9 files changed, 251 insertions(+), 107 deletions(-) rename src/fixtures/tests/{spotlight-processing.js => spotlight-processing.ts} (73%) rename src/products/tests/{get-product-groups.js => get-product-groups.ts} (64%) rename src/tests/helpers/schemas/{versions-schema.js => versions-schema.ts} (92%) rename src/versions/lib/{enterprise-server-releases.js => enterprise-server-releases.ts} (81%) rename src/versions/lib/{get-applicable-versions.js => get-applicable-versions.ts} (57%) rename src/versions/scripts/{use-short-versions.js => use-short-versions.ts} (88%) diff --git a/src/fixtures/tests/spotlight-processing.js b/src/fixtures/tests/spotlight-processing.ts similarity index 73% rename from src/fixtures/tests/spotlight-processing.js rename to src/fixtures/tests/spotlight-processing.ts index 66b92e5e4b98..1716a402eb84 100644 --- a/src/fixtures/tests/spotlight-processing.js +++ b/src/fixtures/tests/spotlight-processing.ts @@ -1,7 +1,26 @@ import { describe, expect, test } from 'vitest' +interface TocItem { + title: string + intro: string + fullPath?: string +} + +interface SpotlightItem { + article: string + image: string +} + +interface ProcessedSpotlightItem { + article: string + title: string + description: string + url: string + image: string +} + // Mock data to simulate tocItems and spotlight configurations -const mockTocItems = [ +const mockTocItems: TocItem[] = [ { title: 'Test Debug Article', intro: 'A test article for debugging functionality.', @@ -20,19 +39,22 @@ const mockTocItems = [ ] // Helper function to simulate the spotlight processing logic from CategoryLanding -function processSpotlight(spotlight, tocItems) { - const findArticleData = (articlePath) => { - const cleanPath = articlePath.startsWith('/') ? articlePath.slice(1) : articlePath +function processSpotlight( + spotlight: SpotlightItem[] | undefined, + tocItems: TocItem[], +): ProcessedSpotlightItem[] { + const findArticleData = (articlePath: string): TocItem | undefined => { + const cleanPath: string = articlePath.startsWith('/') ? articlePath.slice(1) : articlePath return tocItems.find( - (item) => + (item: TocItem) => item.fullPath?.endsWith(cleanPath) || item.fullPath?.includes(cleanPath.split('/').pop() || ''), ) } return ( - spotlight?.map((spotlightItem) => { - const articleData = findArticleData(spotlightItem.article) + spotlight?.map((spotlightItem: SpotlightItem): ProcessedSpotlightItem => { + const articleData: TocItem | undefined = findArticleData(spotlightItem.article) return { article: spotlightItem.article, title: articleData?.title || 'Unknown Article', @@ -46,7 +68,7 @@ function processSpotlight(spotlight, tocItems) { describe('spotlight processing logic', () => { test('processes spotlight object items correctly', () => { - const spotlight = [ + const spotlight: SpotlightItem[] = [ { article: '/debugging-errors/test-debug-article', image: '/assets/images/test-debugging.png', @@ -57,7 +79,7 @@ describe('spotlight processing logic', () => { }, ] - const result = processSpotlight(spotlight, mockTocItems) + const result: ProcessedSpotlightItem[] = processSpotlight(spotlight, mockTocItems) expect(result).toHaveLength(2) expect(result[0]).toEqual({ @@ -77,7 +99,7 @@ describe('spotlight processing logic', () => { }) test('processes multiple spotlight items with different images', () => { - const spotlight = [ + const spotlight: SpotlightItem[] = [ { article: '/debugging-errors/test-debug-article', image: '/assets/images/debugging.png', @@ -92,7 +114,7 @@ describe('spotlight processing logic', () => { }, ] - const result = processSpotlight(spotlight, mockTocItems) + const result: ProcessedSpotlightItem[] = processSpotlight(spotlight, mockTocItems) expect(result).toHaveLength(3) expect(result[0].image).toBe('/assets/images/debugging.png') @@ -102,13 +124,13 @@ describe('spotlight processing logic', () => { }) test('finds articles by filename when full path does not match', () => { - const spotlight = [ + const spotlight: SpotlightItem[] = [ { article: 'test-debug-article', image: '/assets/images/debug.png', }, ] - const result = processSpotlight(spotlight, mockTocItems) + const result: ProcessedSpotlightItem[] = processSpotlight(spotlight, mockTocItems) expect(result[0].title).toBe('Test Debug Article') expect(result[0].url).toBe('/en/category/debugging-errors/test-debug-article') @@ -116,7 +138,7 @@ describe('spotlight processing logic', () => { }) test('handles articles not found in tocItems', () => { - const spotlight = [ + const spotlight: SpotlightItem[] = [ { article: '/completely/nonexistent/path', image: '/assets/images/missing1.png', @@ -127,7 +149,7 @@ describe('spotlight processing logic', () => { }, ] - const result = processSpotlight(spotlight, mockTocItems) + const result: ProcessedSpotlightItem[] = processSpotlight(spotlight, mockTocItems) expect(result).toHaveLength(2) expect(result[0]).toEqual({ @@ -147,13 +169,13 @@ describe('spotlight processing logic', () => { }) test('handles empty spotlight array', () => { - const spotlight = [] - const result = processSpotlight(spotlight, mockTocItems) + const spotlight: SpotlightItem[] = [] + const result: ProcessedSpotlightItem[] = processSpotlight(spotlight, mockTocItems) expect(result).toEqual([]) }) test('handles undefined spotlight', () => { - const result = processSpotlight(undefined, mockTocItems) + const result: ProcessedSpotlightItem[] = processSpotlight(undefined, mockTocItems) expect(result).toEqual([]) }) }) diff --git a/src/frame/middleware/context/whats-new-changelog.ts b/src/frame/middleware/context/whats-new-changelog.ts index b0ec238d10c0..726130be00dc 100644 --- a/src/frame/middleware/context/whats-new-changelog.ts +++ b/src/frame/middleware/context/whats-new-changelog.ts @@ -19,7 +19,7 @@ export default async function whatsNewChangelog( const changelogVersions = getApplicableVersions(req.context.page.changelog.versions) // If the current version is not included, do not display a changelog. - if (!changelogVersions.includes(req.context.currentVersion)) { + if (!req.context.currentVersion || !changelogVersions.includes(req.context.currentVersion)) { return next() } } diff --git a/src/learning-track/lib/process-learning-tracks.ts b/src/learning-track/lib/process-learning-tracks.ts index 97a5fee58259..24a961f4a4d6 100644 --- a/src/learning-track/lib/process-learning-tracks.ts +++ b/src/learning-track/lib/process-learning-tracks.ts @@ -82,7 +82,7 @@ export default async function processLearningTracks( const trackVersions = getApplicableVersions(track.versions) // If the current version is not included, do not display the track. - if (!trackVersions.includes(context.currentVersion)) { + if (!context.currentVersion || !trackVersions.includes(context.currentVersion)) { continue } } diff --git a/src/products/tests/get-product-groups.js b/src/products/tests/get-product-groups.ts similarity index 64% rename from src/products/tests/get-product-groups.js rename to src/products/tests/get-product-groups.ts index 77bc45840276..511f3be86722 100644 --- a/src/products/tests/get-product-groups.js +++ b/src/products/tests/get-product-groups.ts @@ -6,16 +6,33 @@ import { getLocalizedGroupNames, } from '@/products/lib/get-product-groups' +// Mock data interface for tests - uses required name to match library expectations +interface MockProductGroupData { + name: string + octicon?: string + children: string[] +} + +// Mock data for testing edge cases with optional fields +interface PartialProductGroupData { + name?: string + octicon?: string + children: string[] +} + describe('get-product-groups helper functions', () => { describe('createOcticonToNameMap', () => { test('creates correct mapping from childGroups', () => { - const mockChildGroups = [ + const mockChildGroups: MockProductGroupData[] = [ { name: 'Get started', octicon: 'RocketIcon', children: ['get-started'] }, { name: 'GitHub Copilot', octicon: 'CopilotIcon', children: ['copilot'] }, { name: 'Security', octicon: 'ShieldLockIcon', children: ['code-security'] }, ] - const octiconToName = createOcticonToNameMap(mockChildGroups) + // Using any to cast mock data structure to match library's expected ProductGroupData type + const octiconToName: { [key: string]: string } = createOcticonToNameMap( + mockChildGroups as any, + ) expect(octiconToName['RocketIcon']).toBe('Get started') expect(octiconToName['CopilotIcon']).toBe('GitHub Copilot') @@ -24,14 +41,17 @@ describe('get-product-groups helper functions', () => { }) test('handles missing octicon or name gracefully', () => { - const mockChildGroups = [ + const mockChildGroups: PartialProductGroupData[] = [ { name: 'Valid Group', octicon: 'RocketIcon', children: [] }, { octicon: 'MissingNameIcon', children: [] }, // missing name { name: 'Missing Octicon', children: [] }, // missing octicon { name: '', octicon: 'EmptyNameIcon', children: [] }, // empty name ] - const octiconToName = createOcticonToNameMap(mockChildGroups) + // Using any to test edge cases with partial/missing fields that wouldn't normally pass strict typing + const octiconToName: { [key: string]: string } = createOcticonToNameMap( + mockChildGroups as any, + ) expect(octiconToName['RocketIcon']).toBe('Valid Group') expect(octiconToName['MissingNameIcon']).toBeUndefined() @@ -42,19 +62,23 @@ describe('get-product-groups helper functions', () => { describe('mapEnglishToLocalizedNames', () => { test('maps English names to localized names using octicon as key', () => { - const englishGroups = [ + const englishGroups: MockProductGroupData[] = [ { name: 'Get started', octicon: 'RocketIcon', children: [] }, { name: 'Security', octicon: 'ShieldLockIcon', children: [] }, { name: 'GitHub Copilot', octicon: 'CopilotIcon', children: [] }, ] - const localizedByOcticon = { + const localizedByOcticon: { [key: string]: string } = { RocketIcon: 'Empezar', ShieldLockIcon: 'Seguridad', CopilotIcon: 'GitHub Copilot', // Some names stay the same } - const nameMap = mapEnglishToLocalizedNames(englishGroups, localizedByOcticon) + // Using any to cast mock data structure to match library's expected ProductGroupData type + const nameMap: { [key: string]: string } = mapEnglishToLocalizedNames( + englishGroups as any, + localizedByOcticon, + ) expect(nameMap['Get started']).toBe('Empezar') expect(nameMap['Security']).toBe('Seguridad') @@ -63,18 +87,22 @@ describe('get-product-groups helper functions', () => { }) test('handles missing translations gracefully', () => { - const englishGroups = [ + const englishGroups: MockProductGroupData[] = [ { name: 'Get started', octicon: 'RocketIcon', children: [] }, { name: 'Missing Translation', octicon: 'MissingIcon', children: [] }, { name: 'No Octicon', children: [] }, ] - const localizedByOcticon = { + const localizedByOcticon: { [key: string]: string } = { RocketIcon: 'Empezar', // MissingIcon is not in the localized map } - const nameMap = mapEnglishToLocalizedNames(englishGroups, localizedByOcticon) + // Using any to cast mock data structure to match library's expected ProductGroupData type + const nameMap: { [key: string]: string } = mapEnglishToLocalizedNames( + englishGroups as any, + localizedByOcticon, + ) expect(nameMap['Get started']).toBe('Empezar') expect(nameMap['Missing Translation']).toBeUndefined() @@ -84,18 +112,22 @@ describe('get-product-groups helper functions', () => { test('handles different ordering between English and localized groups', () => { // English groups in one order - const englishGroups = [ + const englishGroups: MockProductGroupData[] = [ { name: 'Get started', octicon: 'RocketIcon', children: [] }, { name: 'Security', octicon: 'ShieldLockIcon', children: [] }, ] // Localized groups in different order (but mapped by octicon) - const localizedByOcticon = { + const localizedByOcticon: { [key: string]: string } = { ShieldLockIcon: 'Seguridad', // Security comes first in localized RocketIcon: 'Empezar', // Get started comes second } - const nameMap = mapEnglishToLocalizedNames(englishGroups, localizedByOcticon) + // Using any to cast mock data structure to match library's expected ProductGroupData type + const nameMap: { [key: string]: string } = mapEnglishToLocalizedNames( + englishGroups as any, + localizedByOcticon, + ) // Should correctly map regardless of order expect(nameMap['Get started']).toBe('Empezar') @@ -105,17 +137,20 @@ describe('get-product-groups helper functions', () => { describe('getLocalizedGroupNames integration', () => { test('returns empty object for English language', async () => { - const result = await getLocalizedGroupNames('en') + const result: { [key: string]: string } = await getLocalizedGroupNames('en') expect(result).toEqual({}) }) test('returns empty object when no translation root available', () => { // Test the fallback when translation root is not found const lang = 'unknown-lang' - const languages = { en: { dir: '/en' }, es: { dir: '/es' } } + const languages: { [key: string]: { dir: string } } = { + en: { dir: '/en' }, + es: { dir: '/es' }, + } - const translationRoot = languages[lang]?.dir - const result = translationRoot + const translationRoot: string | undefined = languages[lang]?.dir + const result: { [key: string]: string } = translationRoot ? { /* would proceed */ } @@ -126,7 +161,7 @@ describe('get-product-groups helper functions', () => { test('handles file read errors gracefully', () => { // Test the try/catch behavior when file read fails - let result + let result: { [key: string]: string } try { // Simulate file read error throw new Error('File not found') @@ -141,28 +176,35 @@ describe('get-product-groups helper functions', () => { describe('full translation pipeline', () => { test('complete flow from English groups to localized names', () => { // Simulate the complete flow - const englishChildGroups = [ + const englishChildGroups: MockProductGroupData[] = [ { name: 'Get started', octicon: 'RocketIcon', children: ['get-started'] }, { name: 'Security', octicon: 'ShieldLockIcon', children: ['code-security'] }, { name: 'GitHub Copilot', octicon: 'CopilotIcon', children: ['copilot'] }, ] // Simulate what would come from a Spanish localized file - const mockLocalizedChildGroups = [ + const mockLocalizedChildGroups: MockProductGroupData[] = [ { name: 'Empezar', octicon: 'RocketIcon', children: ['get-started'] }, { name: 'Seguridad', octicon: 'ShieldLockIcon', children: ['code-security'] }, { name: 'GitHub Copilot', octicon: 'CopilotIcon', children: ['copilot'] }, ] // Step 1: Create octicon -> localized name mapping - const localizedByOcticon = createOcticonToNameMap(mockLocalizedChildGroups) + // Using any to cast mock data structure to match library's expected ProductGroupData type + const localizedByOcticon: { [key: string]: string } = createOcticonToNameMap( + mockLocalizedChildGroups as any, + ) // Step 2: Map English names to localized names - const localizedNames = mapEnglishToLocalizedNames(englishChildGroups, localizedByOcticon) + // Using any to cast mock data structure to match library's expected ProductGroupData type + const localizedNames: { [key: string]: string } = mapEnglishToLocalizedNames( + englishChildGroups as any, + localizedByOcticon, + ) // Step 3: Use in final mapping - const finalResult = englishChildGroups.map((group) => { - const localizedName = localizedNames[group.name] || group.name + const finalResult = englishChildGroups.map((group: MockProductGroupData) => { + const localizedName: string = localizedNames[group.name] || group.name return { name: localizedName, octicon: group.octicon, diff --git a/src/secret-scanning/middleware/secret-scanning.ts b/src/secret-scanning/middleware/secret-scanning.ts index 8feb9cabbd3e..b69a67b71b1e 100644 --- a/src/secret-scanning/middleware/secret-scanning.ts +++ b/src/secret-scanning/middleware/secret-scanning.ts @@ -31,7 +31,7 @@ export default async function secretScanning( const { currentVersion } = req.context req.context.secretScanningData = secretScanningData.filter((entry) => - getApplicableVersions(entry.versions).includes(currentVersion), + currentVersion ? getApplicableVersions(entry.versions).includes(currentVersion) : false, ) // Some entries might use Liquid syntax, so we need diff --git a/src/tests/helpers/schemas/versions-schema.js b/src/tests/helpers/schemas/versions-schema.ts similarity index 92% rename from src/tests/helpers/schemas/versions-schema.js rename to src/tests/helpers/schemas/versions-schema.ts index c2c18c6a471c..44b84bf50ce6 100644 --- a/src/tests/helpers/schemas/versions-schema.js +++ b/src/tests/helpers/schemas/versions-schema.ts @@ -5,7 +5,20 @@ const releasePattern = '[a-z0-9-.]+' const delimiter = '@' const versionPattern = `${planPattern}${delimiter}${releasePattern}` -export default { +interface VersionSchema { + type: string + additionalProperties: boolean + required: string[] + properties: { + [key: string]: { + description: string + type: string + pattern?: string + } + } +} + +const schema: VersionSchema = { type: 'object', additionalProperties: false, required: [ @@ -106,3 +119,5 @@ export default { }, }, } + +export default schema diff --git a/src/versions/lib/enterprise-server-releases.js b/src/versions/lib/enterprise-server-releases.ts similarity index 81% rename from src/versions/lib/enterprise-server-releases.js rename to src/versions/lib/enterprise-server-releases.ts index f9b789df939e..40f257bd1abf 100644 --- a/src/versions/lib/enterprise-server-releases.js +++ b/src/versions/lib/enterprise-server-releases.ts @@ -3,7 +3,26 @@ import semver from 'semver' import versionSatisfiesRange from './version-satisfies-range' -const rawDates = JSON.parse(fs.readFileSync('src/ghes-releases/lib/enterprise-dates.json', 'utf8')) +interface VersionDateData { + releaseDate: string + releaseCandidateDate?: string + generalAvailabilityDate?: string + deprecationDate: string + [key: string]: any +} + +interface EnhancedVersionDateData extends VersionDateData { + displayCandidateDate?: string | null + displayReleaseDate?: string | null +} + +interface RawDatesData { + [version: string]: VersionDateData +} + +const rawDates: RawDatesData = JSON.parse( + fs.readFileSync('src/ghes-releases/lib/enterprise-dates.json', 'utf8'), +) // ============================================================================ // STATICALLY DEFINED VALUES @@ -91,7 +110,7 @@ export const latestStable = releaseCandidate ? supported[1] : latest export const oldestSupported = supported[supported.length - 1] // Enhanced dates object with computed display values for templates -export const dates = Object.fromEntries( +export const dates: Record = Object.fromEntries( Object.entries(rawDates).map(([version, versionData]) => [ version, { @@ -100,11 +119,13 @@ export const dates = Object.fromEntries( displayReleaseDate: processDateForDisplay(versionData.generalAvailabilityDate), }, ]), -) +) as Record // Deprecation tracking export const nextDeprecationDate = dates[oldestSupported].deprecationDate -export const isOldestReleaseDeprecated = new Date() > new Date(nextDeprecationDate) +export const isOldestReleaseDeprecated = nextDeprecationDate + ? new Date() > new Date(nextDeprecationDate) + : false // Filtered version arrays for different use cases export const deprecatedOnNewSite = deprecated.filter((version) => @@ -133,7 +154,7 @@ export const deprecatedReleasesOnDeveloperSite = deprecated.filter((version) => * @param {string|null} date - ISO date string * @returns {string|null} - Date string if in the past, null if future or invalid */ -function processDateForDisplay(date) { +function processDateForDisplay(date: string | undefined): string | null { if (!date) return null const currentTimestamp = Math.floor(Date.now() / 1000) const dateTimestamp = Math.floor(new Date(date).getTime() / 1000) @@ -146,24 +167,24 @@ function processDateForDisplay(date) { * @param {string} v2 - Next version * @throws {Error} If version sequence is invalid */ -function isValidNext(v1, v2) { - const semverV1 = semver.coerce(v1).raw - const semverV2 = semver.coerce(v2).raw +function isValidNext(v1: string, v2: string): void { + const semverV1 = semver.coerce(v1)!.raw + const semverV2 = semver.coerce(v2)!.raw const isValid = semverV2 === semver.inc(semverV1, 'minor') || semverV2 === semver.inc(semverV1, 'major') if (!isValid) throw new Error(`The version "${v2}" is not one version ahead of "${v1}" as expected`) } -export const findReleaseNumberIndex = (releaseNum) => { +export const findReleaseNumberIndex = (releaseNum: string): number => { return all.findIndex((i) => i === releaseNum) } -export const getNextReleaseNumber = (releaseNum) => { +export const getNextReleaseNumber = (releaseNum: string): string => { return all[findReleaseNumberIndex(releaseNum) - 1] } -export const getPreviousReleaseNumber = (releaseNum) => { +export const getPreviousReleaseNumber = (releaseNum: string): string => { return all[findReleaseNumberIndex(releaseNum) + 1] } @@ -180,6 +201,7 @@ export default { nextNext, supported, deprecated, + deprecatedWithFunctionalRedirects, legacyAssetVersions, all, latest, @@ -193,11 +215,13 @@ export default { firstVersionDeprecatedOnNewSite, lastVersionWithoutArchivedRedirectsFile, lastReleaseWithLegacyFormat, + firstReleaseStoredInBlobStorage, deprecatedReleasesWithLegacyFormat, deprecatedReleasesWithNewFormat, deprecatedReleasesOnDeveloperSite, firstReleaseNote, firstRestoredAdminGuides, + findReleaseNumberIndex, getNextReleaseNumber, getPreviousReleaseNumber, } diff --git a/src/versions/lib/get-applicable-versions.js b/src/versions/lib/get-applicable-versions.ts similarity index 57% rename from src/versions/lib/get-applicable-versions.js rename to src/versions/lib/get-applicable-versions.ts index 381b32bb4598..097adabafda7 100644 --- a/src/versions/lib/get-applicable-versions.js +++ b/src/versions/lib/get-applicable-versions.ts @@ -3,21 +3,36 @@ import { allVersions } from './all-versions' import versionSatisfiesRange from './version-satisfies-range' import { next, nextNext } from './enterprise-server-releases' import { getDeepDataByLanguage } from '@/data-directory/lib/get-data' +import type { Version } from '@/types' -let featureData = null +interface VersionsObject { + [key: string]: string | string[] +} + +interface GetApplicableVersionsOptions { + doNotThrow?: boolean + includeNextVersion?: boolean +} + +// Using any for feature data as it's dynamically loaded from YAML files +let featureData: any = null const allVersionKeys = Object.keys(allVersions) // return an array of versions that an article's product versions encompasses -function getApplicableVersions(versionsObj, filepath, opts = {}) { +function getApplicableVersions( + versionsObj: VersionsObject | string | undefined, + filepath?: string, + opts: GetApplicableVersionsOptions = {}, +): string[] { if (typeof versionsObj === 'undefined') { - throw new Error(`No \`versions\` frontmatter found in ${filepath}`) + throw new Error(`No \`versions\` frontmatter found in ${filepath || 'undefined'}`) } // Catch an old frontmatter value that was used to indicate an article was available in all versions. if (versionsObj === '*') { throw new Error( - `${filepath} contains the invalid versions frontmatter: *. Please explicitly list out all the versions that apply to this article.`, + `${filepath || 'undefined'} contains the invalid versions frontmatter: *. Please explicitly list out all the versions that apply to this article.`, ) } @@ -35,34 +50,39 @@ function getApplicableVersions(versionsObj, filepath, opts = {}) { // fpt: '*' // ghes: '>=2.23' // where the feature is bringing the ghes versions into the mix. - const featureVersionsObj = reduce( - versionsObj, - (result, value, key) => { - if (key === 'feature') { - if (typeof value === 'string') { - Object.assign(result, { ...featureData[value]?.versions }) - } else if (Array.isArray(value)) { - value.forEach((str) => { - Object.assign(result, { ...featureData[str].versions }) - }) - } - delete result[key] - } - return result - }, - {}, - ) + const featureVersionsObj: VersionsObject = + typeof versionsObj === 'string' + ? {} + : reduce( + versionsObj, + (result: any, value, key) => { + if (key === 'feature') { + if (typeof value === 'string') { + Object.assign(result, { ...featureData[value]?.versions }) + } else if (Array.isArray(value)) { + value.forEach((str) => { + Object.assign(result, { ...featureData[str].versions }) + }) + } + delete result[key] + } + return result + }, + {}, + ) // Get available versions for feature and standard versions. const foundFeatureVersions = evaluateVersions(featureVersionsObj) - const foundStandardVersions = evaluateVersions(versionsObj) + const foundStandardVersions = typeof versionsObj === 'string' ? [] : evaluateVersions(versionsObj) // Combine them! - const applicableVersions = Array.from(new Set(foundStandardVersions.concat(foundFeatureVersions))) + const applicableVersions: string[] = Array.from( + new Set(foundStandardVersions.concat(foundFeatureVersions)), + ) if (!applicableVersions.length && !opts.doNotThrow) { throw new Error( - `${filepath} is not available in any currently supported version. Make sure the \`versions\` property includes at least one supported version.`, + `${filepath || 'undefined'} is not available in any currently supported version. Make sure the \`versions\` property includes at least one supported version.`, ) } @@ -74,35 +94,38 @@ function getApplicableVersions(versionsObj, filepath, opts = {}) { // Strip out not-yet-supported versions if the option to include them is not provided. if (!opts.includeNextVersion) { sortedVersions = sortedVersions.filter( - (v) => !(v.endsWith(`@${next}`) || v.endsWith(`@${nextNext}`)), + (v: string) => !(v.endsWith(`@${next}`) || v.endsWith(`@${nextNext}`)), ) } return sortedVersions } -function evaluateVersions(versionsObj) { +function evaluateVersions(versionsObj: VersionsObject): string[] { // get an array like: [ 'free-pro-team@latest', 'enterprise-server@2.21', 'enterprise-cloud@latest' ] - const versions = [] + const versions: string[] = [] // where versions obj is something like: // fpt: '*' // ghes: '>=2.19' // ghec: '*' // ^ where each key corresponds to a plan's short name (defined in lib/all-versions.js) - Object.entries(versionsObj).forEach(([plan, planValue]) => { + Object.entries(versionsObj).forEach(([plan, planValue]: [string, string | string[]]) => { + // Skip non-string plan values for semantic comparison + if (typeof planValue !== 'string') return + // For each available plan (e.g., `ghes`), get the matching versions from allVersions. // This will be an array of one or more version objects. - const matchingVersionObjs = Object.values(allVersions).filter( - (relevantVersionObj) => + const matchingVersionObjs: Version[] = Object.values(allVersions).filter( + (relevantVersionObj: Version) => relevantVersionObj.plan === plan || relevantVersionObj.shortName === plan, ) // For each matching version found above, compare it to the provided planValue. // E.g., compare `enterprise-server@2.19` to `ghes: >=2.19`. - matchingVersionObjs.forEach((relevantVersionObj) => { + matchingVersionObjs.forEach((relevantVersionObj: Version) => { // If the version doesn't require any semantic comparison, we can assume it applies. - if (!(relevantVersionObj.hasNumberedReleases || relevantVersionObj.internalLatestRelease)) { + if (!relevantVersionObj.hasNumberedReleases) { versions.push(relevantVersionObj.version) return } @@ -117,11 +140,9 @@ function evaluateVersions(versionsObj) { } // Determine which release to use for semantic comparison. - const releaseToCompare = relevantVersionObj.hasNumberedReleases - ? relevantVersionObj.currentRelease - : relevantVersionObj.internalLatestRelease + const releaseToCompare: string = relevantVersionObj.currentRelease - if (versionSatisfiesRange(releaseToCompare, planValue)) { + if (releaseToCompare && versionSatisfiesRange(releaseToCompare, planValue)) { versions.push(relevantVersionObj.version) } }) diff --git a/src/versions/scripts/use-short-versions.js b/src/versions/scripts/use-short-versions.ts similarity index 88% rename from src/versions/scripts/use-short-versions.js rename to src/versions/scripts/use-short-versions.ts index 967f9d883d2b..e4597a2dbd42 100755 --- a/src/versions/scripts/use-short-versions.js +++ b/src/versions/scripts/use-short-versions.ts @@ -10,7 +10,7 @@ import { deprecated, oldestSupported } from '@/versions/lib/enterprise-server-re const allVersionKeys = Object.values(allVersions) const dryRun = ['-d', '--dry-run'].includes(process.argv[2]) -const walkFiles = (pathToWalk, ext) => { +const walkFiles = (pathToWalk: string, ext: string): string[] => { return walk(path.posix.join(process.cwd(), pathToWalk), { includeBasePath: true, directories: false, @@ -20,7 +20,24 @@ const walkFiles = (pathToWalk, ext) => { const markdownFiles = walkFiles('content', '.md').concat(walkFiles('data', '.md')) const yamlFiles = walkFiles('data', '.yml') -const operatorsMap = { +interface ReplacementsMap { + [key: string]: string +} + +interface VersionData { + versions?: Record | string + [key: string]: any +} + +interface OperatorsMap { + [key: string]: string + '==': string + ver_gt: string + ver_lt: string + '!=': string +} + +const operatorsMap: OperatorsMap = { // old: new '==': '=', ver_gt: '>', @@ -50,9 +67,10 @@ async function main() { const newContent = makeLiquidReplacements(contentReplacements, content) // B. UPDATE FRONTMATTER VERSIONS PROPERTY - const { data } = frontmatter(newContent) + const { data } = frontmatter(newContent) as { data: VersionData } if (data.versions && typeof data.versions !== 'string') { - Object.entries(data.versions).forEach(([plan, value]) => { + const versions = data.versions as Record + Object.entries(versions).forEach(([plan, value]) => { // Update legacy versioning while we're here const valueToUse = value .replace('2.23', '3.0') @@ -68,15 +86,16 @@ async function main() { console.error(`can't find supported version for ${plan}`) process.exit(1) } - delete data.versions[plan] - data.versions[versionObj.shortName] = valueToUse + delete versions[plan] + versions[versionObj.shortName] = valueToUse }) } if (dryRun) { console.log(contentReplacements) } else { - fs.writeFileSync(file, frontmatter.stringify(newContent, data, { lineWidth: 10000 })) + // Using any for frontmatter.stringify options as gray-matter types don't include lineWidth + fs.writeFileSync(file, frontmatter.stringify(newContent, data, { lineWidth: 10000 } as any)) } } @@ -109,14 +128,15 @@ main().then( ) // Convenience function to help with readability by removing this large but unneded property. -function removeInputProps(arrayOfObjects) { - return arrayOfObjects.map((obj) => { +// Using any for token objects as liquidjs doesn't provide TypeScript types +function removeInputProps(arrayOfObjects: any[]): any[] { + return arrayOfObjects.map((obj: any) => { delete obj.input || delete obj.token.input return obj }) } -function makeLiquidReplacements(replacementsObj, text) { +function makeLiquidReplacements(replacementsObj: ReplacementsMap, text: string): string { let newText = text Object.entries(replacementsObj).forEach(([oldCond, newCond]) => { const oldCondRegex = new RegExp(`({%-?)\\s*?${escapeRegExp(oldCond)}\\s*?(-?%})`, 'g') @@ -139,8 +159,8 @@ function makeLiquidReplacements(replacementsObj, text) { // if currentVersion ver_gt "myVersion@myRelease -> ifversion myVersionShort > myRelease // if currentVersion ver_lt "myVersion@myRelease -> ifversion myVersionShort < myRelease // if enterpriseServerVersions contains currentVersion -> ifversion ghes -function getLiquidReplacements(content, file) { - const replacements = {} +function getLiquidReplacements(content: string, file: string): ReplacementsMap { + const replacements: ReplacementsMap = {} const tokenizer = new Tokenizer(content) const tokens = removeInputProps(tokenizer.readTopLevelTokens()) @@ -157,7 +177,7 @@ function getLiquidReplacements(content, file) { token .replace(/(if|elsif) /, '') .split(/ (or|and) /) - .forEach((op) => { + .forEach((op: any) => { if (op === 'or' || op === 'and') { newToken.push(op) return @@ -193,7 +213,7 @@ function getLiquidReplacements(content, file) { // Handle numbered releases! if (versionObj.hasNumberedReleases) { - const newOperator = operatorsMap[operator] + const newOperator: string | undefined = operatorsMap[operator] if (!newOperator) { console.error( `Couldn't find an operator that corresponds to ${operator} in "${token} in "${file}`, From 893ea88421ae15d443446e7e5d3e009e1db165ef Mon Sep 17 00:00:00 2001 From: Kevin Heis Date: Thu, 9 Oct 2025 09:43:15 -0700 Subject: [PATCH 2/4] Migrate 6 files from JavaScript to TypeScript (#57884) --- ....js => code-annotation-comment-spacing.ts} | 36 ++--- ....js => frontmatter-landing-recommended.ts} | 29 ++-- ...ble.js => third-party-actions-reusable.ts} | 19 ++- .../{code-header.js => code-header.ts} | 53 +++++--- .../tests/{rendering.js => rendering.ts} | 72 ++++++++-- src/graphql/lib/index.js | 98 -------------- src/graphql/lib/index.ts | 126 ++++++++++++++++++ src/graphql/pages/breaking-changes.tsx | 2 +- src/graphql/pages/changelog.tsx | 2 +- src/graphql/pages/reference.tsx | 2 +- src/graphql/pages/schema-previews.tsx | 2 +- 11 files changed, 279 insertions(+), 162 deletions(-) rename src/content-linter/lib/linting-rules/{code-annotation-comment-spacing.js => code-annotation-comment-spacing.ts} (70%) rename src/content-linter/lib/linting-rules/{frontmatter-landing-recommended.js => frontmatter-landing-recommended.ts} (69%) rename src/content-linter/lib/linting-rules/{third-party-actions-reusable.js => third-party-actions-reusable.ts} (85%) rename src/content-render/unified/{code-header.js => code-header.ts} (58%) rename src/github-apps/tests/{rendering.js => rendering.ts} (66%) delete mode 100644 src/graphql/lib/index.js create mode 100644 src/graphql/lib/index.ts diff --git a/src/content-linter/lib/linting-rules/code-annotation-comment-spacing.js b/src/content-linter/lib/linting-rules/code-annotation-comment-spacing.ts similarity index 70% rename from src/content-linter/lib/linting-rules/code-annotation-comment-spacing.js rename to src/content-linter/lib/linting-rules/code-annotation-comment-spacing.ts index 759e5a5ec319..1a6c3f66b0e3 100644 --- a/src/content-linter/lib/linting-rules/code-annotation-comment-spacing.js +++ b/src/content-linter/lib/linting-rules/code-annotation-comment-spacing.ts @@ -1,32 +1,38 @@ +// @ts-ignore - markdownlint-rule-helpers doesn't provide TypeScript declarations import { addError, filterTokens } from 'markdownlint-rule-helpers' +import type { RuleParams, RuleErrorCallback, MarkdownToken } from '@/content-linter/types' + export const codeAnnotationCommentSpacing = { names: ['GHD045', 'code-annotation-comment-spacing'], description: 'Code comments in annotation blocks must have exactly one space after the comment character(s)', tags: ['code', 'comments', 'annotate', 'spacing'], parser: 'markdownit', - function: (params, onError) => { - filterTokens(params, 'fence', (token) => { - if (!token.info.includes('annotate')) return + function: (params: RuleParams, onError: RuleErrorCallback) => { + filterTokens(params, 'fence', (token: MarkdownToken) => { + if (!token.info?.includes('annotate')) return + + const content = token.content + if (!content) return - const lines = token.content.split('\n') + const lines = content.split('\n') - lines.forEach((line, index) => { + lines.forEach((line: string, index: number) => { const trimmedLine = line.trim() if (!trimmedLine) return // Define a map of comment patterns - const commentPatterns = { + const commentPatterns: Record = { '//': /^(\/\/)(.*)/, // JavaScript/TypeScript/Java/C# style comments '#': /^(#)(.*)/, // Python/Ruby/Shell/YAML style comments '--': /^(--)(.*)/, // SQL/Lua style comments } // Check for different comment patterns - let commentMatch = null - let commentChar = null - let restOfLine = null + let commentMatch: RegExpMatchArray | null = null + let commentChar: string | null = null + let restOfLine: string | null = null // Iterate over the map to find a matching comment style for (const [char, pattern] of Object.entries(commentPatterns)) { @@ -38,7 +44,7 @@ export const codeAnnotationCommentSpacing = { } } - if (commentMatch && restOfLine !== null) { + if (commentMatch && restOfLine !== null && commentChar !== null) { // Skip shebang lines (#!/...) if (trimmedLine.startsWith('#!')) { return @@ -49,8 +55,8 @@ export const codeAnnotationCommentSpacing = { // If it starts with a space, make sure it's exactly one space if (restOfLine.startsWith(' ') && restOfLine.length > 1 && restOfLine[1] === ' ') { // Multiple spaces - this is an error - const lineNumber = token.lineNumber + index + 1 - const fixedLine = line.replace( + const lineNumber: number = token.lineNumber + index + 1 + const fixedLine: string = line.replace( new RegExp(`^(\\s*${commentChar.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')})\\s+`), `$1 `, ) @@ -73,9 +79,9 @@ export const codeAnnotationCommentSpacing = { return } else { // No space after comment character - this is an error - const lineNumber = token.lineNumber + index + 1 - const leadingWhitespace = line.match(/^\s*/)[0] - const fixedLine = leadingWhitespace + commentChar + ' ' + restOfLine + const lineNumber: number = token.lineNumber + index + 1 + const leadingWhitespace: string = line.match(/^\s*/)![0] + const fixedLine: string = leadingWhitespace + commentChar + ' ' + restOfLine addError( onError, diff --git a/src/content-linter/lib/linting-rules/frontmatter-landing-recommended.js b/src/content-linter/lib/linting-rules/frontmatter-landing-recommended.ts similarity index 69% rename from src/content-linter/lib/linting-rules/frontmatter-landing-recommended.js rename to src/content-linter/lib/linting-rules/frontmatter-landing-recommended.ts index 06bdbb4774cf..d1dd31c11859 100644 --- a/src/content-linter/lib/linting-rules/frontmatter-landing-recommended.js +++ b/src/content-linter/lib/linting-rules/frontmatter-landing-recommended.ts @@ -1,24 +1,26 @@ import fs from 'fs' import path from 'path' +// @ts-ignore - markdownlint-rule-helpers doesn't provide TypeScript declarations import { addError } from 'markdownlint-rule-helpers' import { getFrontmatter } from '../helpers/utils' +import type { RuleParams, RuleErrorCallback } from '@/content-linter/types' -function isValidArticlePath(articlePath, currentFilePath) { +function isValidArticlePath(articlePath: string, currentFilePath: string): boolean { const ROOT = process.env.ROOT || '.' // Strategy 1: Always try as an absolute path from content root first const contentDir = path.join(ROOT, 'content') const normalizedPath = articlePath.startsWith('/') ? articlePath.substring(1) : articlePath - const absolutePath = path.join(contentDir, `${normalizedPath}.md`) + const absolutePath: string = path.join(contentDir, `${normalizedPath}.md`) if (fs.existsSync(absolutePath) && fs.statSync(absolutePath).isFile()) { return true } // Strategy 2: Fall back to relative path from current file's directory - const currentDir = path.dirname(currentFilePath) - const relativePath = path.join(currentDir, `${normalizedPath}.md`) + const currentDir: string = path.dirname(currentFilePath) + const relativePath: string = path.join(currentDir, `${normalizedPath}.md`) try { return fs.existsSync(relativePath) && fs.statSync(relativePath).isFile() @@ -32,15 +34,18 @@ export const frontmatterLandingRecommended = { description: 'Only landing pages can have recommended articles, there should be no duplicate recommended articles, and all recommended articles must exist', tags: ['frontmatter', 'landing', 'recommended'], - function: (params, onError) => { - const fm = getFrontmatter(params.lines) + function: (params: RuleParams, onError: RuleErrorCallback) => { + // Using any for frontmatter as it's a dynamic YAML object with varying properties + const fm: any = getFrontmatter(params.lines) if (!fm || !fm.recommended) return - const recommendedLine = params.lines.find((line) => line.startsWith('recommended:')) + const recommendedLine: string | undefined = params.lines.find((line) => + line.startsWith('recommended:'), + ) if (!recommendedLine) return - const lineNumber = params.lines.indexOf(recommendedLine) + 1 + const lineNumber: number = params.lines.indexOf(recommendedLine) + 1 if (!fm.layout || !fm.layout.includes('landing')) { addError( @@ -55,11 +60,11 @@ export const frontmatterLandingRecommended = { // Check for duplicate recommended items and invalid paths if (Array.isArray(fm.recommended)) { - const seen = new Set() - const duplicates = [] - const invalidPaths = [] + const seen = new Set() + const duplicates: string[] = [] + const invalidPaths: string[] = [] - fm.recommended.forEach((item) => { + fm.recommended.forEach((item: string) => { if (seen.has(item)) { duplicates.push(item) } else { diff --git a/src/content-linter/lib/linting-rules/third-party-actions-reusable.js b/src/content-linter/lib/linting-rules/third-party-actions-reusable.ts similarity index 85% rename from src/content-linter/lib/linting-rules/third-party-actions-reusable.js rename to src/content-linter/lib/linting-rules/third-party-actions-reusable.ts index 2c0000dc6879..6b95c52fa9ab 100644 --- a/src/content-linter/lib/linting-rules/third-party-actions-reusable.js +++ b/src/content-linter/lib/linting-rules/third-party-actions-reusable.ts @@ -1,16 +1,21 @@ +// @ts-ignore - markdownlint-rule-helpers doesn't provide TypeScript declarations import { addError, filterTokens } from 'markdownlint-rule-helpers' +import type { RuleParams, RuleErrorCallback, MarkdownToken } from '@/content-linter/types' + export const thirdPartyActionsReusable = { names: ['GHD054', 'third-party-actions-reusable'], description: 'Code examples with third-party actions must include disclaimer reusable', tags: ['actions', 'reusable', 'third-party'], - function: (params, onError) => { + function: (params: RuleParams, onError: RuleErrorCallback) => { // Find all code fence blocks - filterTokens(params, 'fence', (token) => { + filterTokens(params, 'fence', (token: MarkdownToken) => { // Only check YAML code blocks (GitHub Actions workflows) if (token.info !== 'yaml' && token.info !== 'yaml copy') return const codeContent = token.content + if (!codeContent) return + const lineNumber = token.lineNumber // Find third-party actions in the code block @@ -41,8 +46,8 @@ export const thirdPartyActionsReusable = { * Third-party actions are identified by the pattern: owner/action@version * where owner is not 'actions' or 'github' */ -function findThirdPartyActions(yamlContent) { - const thirdPartyActions = [] +function findThirdPartyActions(yamlContent: string): string[] { + const thirdPartyActions: string[] = [] // Pattern to match 'uses: owner/action@version' where owner is not actions or github const actionPattern = /uses:\s+([^{\s]+\/[^@\s]+@[^\s]+)/g @@ -70,7 +75,11 @@ function findThirdPartyActions(yamlContent) { * Check if the disclaimer reusable is present before the given line number or inside the code block * Looks backward from the code block and also inside the code block content */ -function checkForDisclaimer(lines, codeBlockLineNumber, codeContent) { +function checkForDisclaimer( + lines: string[], + codeBlockLineNumber: number, + codeContent: string, +): boolean { const disclaimerPattern = /{% data reusables\.actions\.actions-not-certified-by-github-comment %}/ // First, check inside the code block content diff --git a/src/content-render/unified/code-header.js b/src/content-render/unified/code-header.ts similarity index 58% rename from src/content-render/unified/code-header.js rename to src/content-render/unified/code-header.ts index 3d29e5a4590e..1e4c7d28b808 100644 --- a/src/content-render/unified/code-header.js +++ b/src/content-render/unified/code-header.ts @@ -7,15 +7,25 @@ import yaml from 'js-yaml' import fs from 'fs' import { visit } from 'unist-util-visit' import { h } from 'hastscript' +// @ts-ignore - no types available for @primer/octicons import octicons from '@primer/octicons' import { parse } from 'parse5' import { fromParse5 } from 'hast-util-from-parse5' import murmur from 'imurmurhash' import { getPrompt } from './copilot-prompt' +import type { Element } from 'hast' -const languages = yaml.load(fs.readFileSync('./data/code-languages.yml', 'utf8')) +interface LanguageConfig { + name: string + [key: string]: any +} + +type Languages = Record + +const languages = yaml.load(fs.readFileSync('./data/code-languages.yml', 'utf8')) as Languages -const matcher = (node) => +// Using any due to conflicting unist/hast type definitions between dependencies +const matcher = (node: any): boolean => node.type === 'element' && node.tagName === 'pre' && // For now, limit to ones with the copy or prompt meta, @@ -25,28 +35,39 @@ const matcher = (node) => !getPreMeta(node).annotate export default function codeHeader() { - return (tree) => { - visit(tree, matcher, (node, index, parent) => { - parent.children[index] = wrapCodeExample(node, tree) + // Using any due to conflicting unist/hast type definitions between dependencies + return (tree: any) => { + // Using any due to conflicting unist/hast type definitions between dependencies + visit(tree, matcher, (node: any, index: number | undefined, parent: any) => { + if (index !== undefined && parent) { + parent.children[index] = wrapCodeExample(node, tree) + } }) } } -function wrapCodeExample(node, tree) { - const lang = node.children[0].properties.className?.[0].replace('language-', '') - const code = node.children[0].children[0].value +// Using any due to conflicting unist/hast type definitions between dependencies +function wrapCodeExample(node: any, tree: any): Element { + const lang: string = node.children[0].properties.className?.[0].replace('language-', '') + const code: string = node.children[0].children[0].value const subnav = null // getSubnav() lives in annotate.js, not needed for normal code blocks const prompt = getPrompt(node, tree, code) // returns null if there's no prompt - const hasCopy = Boolean(getPreMeta(node).copy) // defaults to true + const hasCopy: boolean = Boolean(getPreMeta(node).copy) // defaults to true const headerHast = header(lang, code, subnav, prompt, hasCopy) return h('div', { className: 'code-example' }, [headerHast, node]) } -export function header(lang, code, subnav = null, prompt = null, hasCopy = true) { - const codeId = murmur('js-btn-copy').hash(code).result() +export function header( + lang: string, + code: string, + subnav: Element | null = null, + prompt: Element | null = null, + hasCopy: boolean = true, +): Element { + const codeId: string = murmur('js-btn-copy').hash(code).result().toString() return h( 'header', @@ -83,14 +104,16 @@ export function header(lang, code, subnav = null, prompt = null, hasCopy = true) ) } -function btnIcon() { - const btnIconHtml = octicons.copy.toSVG() +function btnIcon(): Element { + const btnIconHtml: string = octicons.copy.toSVG() const btnIconAst = parse(String(btnIconHtml), { sourceCodeLocationInfo: true }) + // @ts-ignore - fromParse5 file option typing issue const btnIcon = fromParse5(btnIconAst, { file: btnIconHtml }) - return btnIcon + return btnIcon as Element } -export function getPreMeta(node) { +// Using any due to conflicting unist/hast type definitions between dependencies +export function getPreMeta(node: any): Record { // Here's why this monstrosity works: // https://github.com/syntax-tree/mdast-util-to-hast/blob/c87cd606731c88a27dbce4bfeaab913a9589bf83/lib/handlers/code.js#L40-L42 return node.children[0]?.data?.meta || {} diff --git a/src/github-apps/tests/rendering.js b/src/github-apps/tests/rendering.ts similarity index 66% rename from src/github-apps/tests/rendering.js rename to src/github-apps/tests/rendering.ts index 0db35c2c4d2e..2a352c666dea 100644 --- a/src/github-apps/tests/rendering.js +++ b/src/github-apps/tests/rendering.ts @@ -6,16 +6,54 @@ import { allVersions } from '@/versions/lib/all-versions' import { get, getDOM } from '@/tests/helpers/e2etest' import { categoriesWithoutSubcategories } from '@/rest/lib/index' import { getAppsData } from '@/github-apps/lib/index' +import type { Version } from '@/types' -const configContent = JSON.parse(await readFile('src/github-apps/lib/config.json', 'utf8')) -const pageTypes = Object.keys(configContent.pages) -const permissionPages = pageTypes.filter((pageType) => pageType.includes('permissions')) -const enabledPages = pageTypes.filter((pageType) => !pageType.includes('permissions')) -const defaultVersion = Object.values(allVersions) +interface PageConfig { + frontmatterDefaults: { + versions: string | Record + autogenerated: string + } + targetFilename: string +} + +interface AppsConfig { + targetDirectory: string + pages: Record + 'api-versions': Record + sha: string +} + +interface PermissionItem { + category: string + subcategory: string + slug: string +} + +interface PermissionData { + permissions: PermissionItem[] +} + +interface EnabledItem { + subcategory: string + slug: string +} + +const configContent: AppsConfig = JSON.parse( + await readFile('src/github-apps/lib/config.json', 'utf8'), +) +const pageTypes: string[] = Object.keys(configContent.pages) +const permissionPages: string[] = pageTypes.filter((pageType) => pageType.includes('permissions')) +const enabledPages: string[] = pageTypes.filter((pageType) => !pageType.includes('permissions')) +const defaultVersion: Version | undefined = Object.values(allVersions) .filter((version) => version.nonEnterpriseDefault) .shift() -const version = defaultVersion.version -const apiVersion = defaultVersion.latestApiVersion + +if (!defaultVersion) { + throw new Error('No default version found') +} + +const version: string = defaultVersion.version +const apiVersion: string = defaultVersion.latestApiVersion describe('REST references docs', () => { vi.setConfig({ testTimeout: 3 * 60 * 1000 }) @@ -24,15 +62,19 @@ describe('REST references docs', () => { // For every version of /rest/overview/endpoints-available-for-github-apps test('loads enabled list pages', async () => { for (const page of enabledPages) { - const schemaSlugs = [] + const schemaSlugs: string[] = [] - const enabledForApps = await getAppsData(page, version, apiVersion) + const enabledForApps: Record = await getAppsData( + page, + version, + apiVersion, + ) // using the static file, generate the expected slug for each operation for (const [key, value] of Object.entries(enabledForApps)) { schemaSlugs.push( ...value.map( - (item) => + (item: EnabledItem) => `/en/rest/${key}${ categoriesWithoutSubcategories.includes(key) ? '' : '/' + item.subcategory }#${item.slug}`, @@ -54,15 +96,19 @@ describe('REST references docs', () => { test('loads permission list pages', async () => { // permissions pages for (const page of permissionPages) { - const schemaSlugs = [] + const schemaSlugs: string[] = [] - const permissionsData = await getAppsData(page, version, apiVersion) + const permissionsData: Record = await getAppsData( + page, + version, + apiVersion, + ) // using the static file, generate the expected slug for each operation for (const value of Object.values(permissionsData)) { schemaSlugs.push( ...value.permissions.map( - (item) => + (item: PermissionItem) => `/en/rest/${item.category}${ categoriesWithoutSubcategories.includes(item.category) ? '' : '/' + item.subcategory }#${item.slug}`, diff --git a/src/graphql/lib/index.js b/src/graphql/lib/index.js deleted file mode 100644 index b13f17bd6536..000000000000 --- a/src/graphql/lib/index.js +++ /dev/null @@ -1,98 +0,0 @@ -import { - readCompressedJsonFileFallbackLazily, - readCompressedJsonFileFallback, -} from '@/frame/lib/read-json-file' -import { getAutomatedPageMiniTocItems } from '@/frame/lib/get-mini-toc-items' -import languages from '@/languages/lib/languages' -import { allVersions } from '@/versions/lib/all-versions' - -export const GRAPHQL_DATA_DIR = 'src/graphql/data' -/* ADD LANGUAGE KEY */ -const previews = new Map() -const upcomingChanges = new Map() -const changelog = new Map() -const graphqlSchema = new Map() -const miniTocs = new Map() - -Object.keys(languages).forEach((language) => { - miniTocs.set(language, new Map()) -}) - -export function getGraphqlSchema(version, type) { - const graphqlVersion = getGraphqlVersion(version) - if (!graphqlSchema.has(graphqlVersion)) { - graphqlSchema.set( - graphqlVersion, - readCompressedJsonFileFallback(`${GRAPHQL_DATA_DIR}/${graphqlVersion}/schema.json`), - ) - } - return graphqlSchema.get(graphqlVersion)[type] -} - -export function getGraphqlChangelog(version) { - const graphqlVersion = getGraphqlVersion(version) - if (!changelog.has(graphqlVersion)) { - changelog.set( - graphqlVersion, - readCompressedJsonFileFallbackLazily( - `${GRAPHQL_DATA_DIR}/${graphqlVersion}/changelog.json`, - )(), - ) - } - - return changelog.get(graphqlVersion) -} - -export function getGraphqlBreakingChanges(version) { - const graphqlVersion = getGraphqlVersion(version) - if (!upcomingChanges.has(graphqlVersion)) { - const data = readCompressedJsonFileFallbackLazily( - `${GRAPHQL_DATA_DIR}/${graphqlVersion}/upcoming-changes.json`, - )() - upcomingChanges.set(graphqlVersion, data) - } - return upcomingChanges.get(graphqlVersion) -} - -export function getPreviews(version) { - const graphqlVersion = getGraphqlVersion(version) - if (!previews.has(graphqlVersion)) { - const data = readCompressedJsonFileFallbackLazily( - `${GRAPHQL_DATA_DIR}/${graphqlVersion}/previews.json`, - )() - previews.set(graphqlVersion, data) - } - return previews.get(graphqlVersion) -} - -export async function getMiniToc(context, type, items, depth = 2, markdownHeading = '') { - const { currentLanguage, currentVersion } = context - const graphqlVersion = getGraphqlVersion(currentVersion) - if (!miniTocs.get(currentLanguage).has(graphqlVersion)) { - miniTocs.get(currentLanguage).set(graphqlVersion, new Map()) - } - if (!miniTocs.get(currentLanguage).get(graphqlVersion).has(type)) { - const graphqlMiniTocItems = await getAutomatedPageMiniTocItems( - items, - context, - depth, - markdownHeading, - ) - miniTocs.get(currentLanguage).get(graphqlVersion).set(type, graphqlMiniTocItems) - } - return miniTocs.get(currentLanguage).get(graphqlVersion).get(type) -} - -export async function getChangelogMiniTocs(items, context, depth = 2, markdownHeading = '') { - if (!changelog.has('toc')) { - changelog.set('toc', await getAutomatedPageMiniTocItems(items, context, depth, markdownHeading)) - } - return changelog.get('toc') -} - -function getGraphqlVersion(version) { - if (!(version in allVersions)) { - throw new Error(`Unrecognized version '${version}'. Not found in ${Object.keys(allVersions)}`) - } - return allVersions[version].openApiVersionName -} diff --git a/src/graphql/lib/index.ts b/src/graphql/lib/index.ts new file mode 100644 index 000000000000..d7c39a43d189 --- /dev/null +++ b/src/graphql/lib/index.ts @@ -0,0 +1,126 @@ +import { + readCompressedJsonFileFallbackLazily, + readCompressedJsonFileFallback, +} from '@/frame/lib/read-json-file' +import { getAutomatedPageMiniTocItems } from '@/frame/lib/get-mini-toc-items' +import languages from '@/languages/lib/languages' +import { allVersions } from '@/versions/lib/all-versions' +interface GraphqlContext { + currentLanguage: string + currentVersion: string + [key: string]: any +} + +export const GRAPHQL_DATA_DIR = 'src/graphql/data' +/* ADD LANGUAGE KEY */ +const previews = new Map() +const upcomingChanges = new Map() +const changelog = new Map() +const graphqlSchema = new Map() +const miniTocs = new Map>>() + +Object.keys(languages).forEach((language) => { + miniTocs.set(language, new Map()) +}) + +// Using any for return type as the GraphQL schema structure is complex and dynamically loaded from JSON +export function getGraphqlSchema(version: string, type: string): any { + const graphqlVersion: string = getGraphqlVersion(version) + if (!graphqlSchema.has(graphqlVersion)) { + graphqlSchema.set( + graphqlVersion, + readCompressedJsonFileFallback(`${GRAPHQL_DATA_DIR}/${graphqlVersion}/schema.json`), + ) + } + return graphqlSchema.get(graphqlVersion)[type] +} + +// Using any for return type as the changelog structure is dynamically loaded from JSON +export function getGraphqlChangelog(version: string): any { + const graphqlVersion: string = getGraphqlVersion(version) + if (!changelog.has(graphqlVersion)) { + changelog.set( + graphqlVersion, + readCompressedJsonFileFallbackLazily( + `${GRAPHQL_DATA_DIR}/${graphqlVersion}/changelog.json`, + )(), + ) + } + + return changelog.get(graphqlVersion) +} + +// Using any for return type as the breaking changes structure is dynamically loaded from JSON +export function getGraphqlBreakingChanges(version: string): any { + const graphqlVersion: string = getGraphqlVersion(version) + if (!upcomingChanges.has(graphqlVersion)) { + // Using any as the JSON structure is not typed + const data: any = readCompressedJsonFileFallbackLazily( + `${GRAPHQL_DATA_DIR}/${graphqlVersion}/upcoming-changes.json`, + )() + upcomingChanges.set(graphqlVersion, data) + } + return upcomingChanges.get(graphqlVersion) +} + +// Using any for return type as the previews structure is dynamically loaded from JSON +export function getPreviews(version: string): any { + const graphqlVersion: string = getGraphqlVersion(version) + if (!previews.has(graphqlVersion)) { + // Using any as the JSON structure is not typed + const data: any = readCompressedJsonFileFallbackLazily( + `${GRAPHQL_DATA_DIR}/${graphqlVersion}/previews.json`, + )() + previews.set(graphqlVersion, data) + } + return previews.get(graphqlVersion) +} + +export async function getMiniToc( + context: GraphqlContext, + type: string, + items: string[], + depth: number = 2, + markdownHeading: string = '', +): Promise { + const { currentLanguage, currentVersion } = context + const graphqlVersion: string = getGraphqlVersion(currentVersion) + const languageMap = miniTocs.get(currentLanguage) + if (!languageMap) { + throw new Error(`Language ${currentLanguage} not found in miniTocs`) + } + if (!languageMap.has(graphqlVersion)) { + languageMap.set(graphqlVersion, new Map()) + } + const versionMap = languageMap.get(graphqlVersion)! + if (!versionMap.has(type)) { + // Using any[] as the mini TOC item structure is not yet typed in the codebase + const graphqlMiniTocItems: any[] = await getAutomatedPageMiniTocItems( + items, + context, + depth, + markdownHeading, + ) + versionMap.set(type, graphqlMiniTocItems) + } + return versionMap.get(type)! +} + +export async function getChangelogMiniTocs( + items: string[], + context: GraphqlContext, + depth: number = 2, + markdownHeading: string = '', +): Promise { + if (!changelog.has('toc')) { + changelog.set('toc', await getAutomatedPageMiniTocItems(items, context, depth, markdownHeading)) + } + return changelog.get('toc') +} + +function getGraphqlVersion(version: string): string { + if (!(version in allVersions)) { + throw new Error(`Unrecognized version '${version}'. Not found in ${Object.keys(allVersions)}`) + } + return allVersions[version].openApiVersionName +} diff --git a/src/graphql/pages/breaking-changes.tsx b/src/graphql/pages/breaking-changes.tsx index 0fbaf096330f..c638c493de0f 100644 --- a/src/graphql/pages/breaking-changes.tsx +++ b/src/graphql/pages/breaking-changes.tsx @@ -37,7 +37,7 @@ export default function GraphqlBreakingChanges({ } export const getServerSideProps: GetServerSideProps = async (context) => { - const { getGraphqlBreakingChanges } = await import('@/graphql/lib/index.js') + const { getGraphqlBreakingChanges } = await import('@/graphql/lib/index') const { getAutomatedPageMiniTocItems } = await import('@/frame/lib/get-mini-toc-items.js') const req = context.req as any diff --git a/src/graphql/pages/changelog.tsx b/src/graphql/pages/changelog.tsx index 1bfb0d50e5c1..9142a753ae57 100644 --- a/src/graphql/pages/changelog.tsx +++ b/src/graphql/pages/changelog.tsx @@ -28,7 +28,7 @@ export default function GraphqlChangelog({ mainContext, schema, automatedPageCon } export const getServerSideProps: GetServerSideProps = async (context) => { - const { getGraphqlChangelog } = await import('@/graphql/lib/index.js') + const { getGraphqlChangelog } = await import('@/graphql/lib/index') const { getAutomatedPageMiniTocItems } = await import('@/frame/lib/get-mini-toc-items.js') const req = context.req as any diff --git a/src/graphql/pages/reference.tsx b/src/graphql/pages/reference.tsx index d666ca4795f3..872c444da5db 100644 --- a/src/graphql/pages/reference.tsx +++ b/src/graphql/pages/reference.tsx @@ -44,7 +44,7 @@ export default function GraphqlReferencePage({ } export const getServerSideProps: GetServerSideProps = async (context) => { - const { getGraphqlSchema, getMiniToc } = await import('@/graphql/lib/index.js') + const { getGraphqlSchema, getMiniToc } = await import('@/graphql/lib/index') const req = context.req as any const res = context.res as any diff --git a/src/graphql/pages/schema-previews.tsx b/src/graphql/pages/schema-previews.tsx index 7952096c63d9..97848971c893 100644 --- a/src/graphql/pages/schema-previews.tsx +++ b/src/graphql/pages/schema-previews.tsx @@ -33,7 +33,7 @@ export default function GraphqlPreviews({ mainContext, schema, automatedPageCont } export const getServerSideProps: GetServerSideProps = async (context) => { - const { getPreviews } = await import('@/graphql/lib/index.js') + const { getPreviews } = await import('@/graphql/lib/index') const { getAutomatedPageMiniTocItems } = await import('@/frame/lib/get-mini-toc-items.js') const req = context.req as any From 943b2e1cbba973f45a5bce146fa2b090aa3145e9 Mon Sep 17 00:00:00 2001 From: Sunbrye Ly <56200261+sunbrye@users.noreply.github.com> Date: Thu, 9 Oct 2025 10:30:11 -0700 Subject: [PATCH 3/4] [2025-10-07] GPT-5 mini is the default for individual Copilot plans (#57856) --- .../concepts/billing/copilot-requests.md | 8 ++-- .../chat-with-copilot/chat-in-github.md | 8 ---- .../how-tos/chat-with-copilot/chat-in-ide.md | 12 ------ .../reference/ai-models/model-comparison.md | 40 +++++++++---------- 4 files changed, 24 insertions(+), 44 deletions(-) diff --git a/content/copilot/concepts/billing/copilot-requests.md b/content/copilot/concepts/billing/copilot-requests.md index fa99d9f6b5e4..53752023e3b6 100644 --- a/content/copilot/concepts/billing/copilot-requests.md +++ b/content/copilot/concepts/billing/copilot-requests.md @@ -52,7 +52,7 @@ The following {% data variables.product.prodname_copilot_short %} features can u If you use **{% data variables.copilot.copilot_free_short %}**, your plan comes with up to 2,000 code completion requests and up to 50 premium requests per month. All chat interactions count as premium requests. -If you're on a **paid plan**, you get unlimited code completions and unlimited chat interactions using the included models ({% data variables.copilot.copilot_gpt_41 %} and {% data variables.copilot.copilot_gpt_4o %}). Rate limiting is in place to accommodate for high demand. See [AUTOTITLE](/copilot/concepts/rate-limits). +If you're on a **paid plan**, you get unlimited code completions and unlimited chat interactions using the included models ({% data variables.copilot.copilot_gpt_5_mini %}, {% data variables.copilot.copilot_gpt_41 %} and {% data variables.copilot.copilot_gpt_4o %}). Rate limiting is in place to accommodate for high demand. See [AUTOTITLE](/copilot/concepts/rate-limits). Paid plans also receive a monthly allowance of premium requests, which can be used for advanced chat interactions, code completions using premium models, and other premium features. For an overview of the amount of premium requests included in each plan, see [AUTOTITLE](/copilot/about-github-copilot/subscription-plans-for-github-copilot#comparing-copilot-plans). @@ -91,7 +91,7 @@ The available models vary depending on your {% data variables.product.prodname_c Each model has a premium request multiplier, based on its complexity and resource usage. If you are on a paid {% data variables.product.prodname_copilot_short %} plan, your premium request allowance is deducted according to this multiplier. -{% data variables.copilot.copilot_gpt_41 %} and {% data variables.copilot.copilot_gpt_4o %} are the included models, and do not consume any premium requests if you are on a **paid plan**. +{% data variables.copilot.copilot_gpt_5_mini %}, {% data variables.copilot.copilot_gpt_41 %} and {% data variables.copilot.copilot_gpt_4o %} are the included models, and do not consume any premium requests if you are on a **paid plan**. If you use **{% data variables.copilot.copilot_free_short %}**, you have access to a limited number of models, and each model will consume one premium request when used. For example, if you make a request using the {% data variables.copilot.copilot_gemini_flash %} model, your interaction will consume **one premium request**, not 0.25 premium requests. @@ -102,5 +102,5 @@ If you use **{% data variables.copilot.copilot_free_short %}**, you have access Premium request usage is based on the model’s multiplier and the feature you’re using. For example: * **Using {% data variables.copilot.copilot_claude_opus %} in {% data variables.copilot.copilot_chat_short %}**: With a 10× multiplier, one interaction counts as 10 premium requests. -* **Using {% data variables.copilot.copilot_gpt_41 %} on {% data variables.copilot.copilot_free_short %}**: Each interaction counts as 1 premium request. -* **Using {% data variables.copilot.copilot_gpt_41 %} on a paid plan**: No premium requests are consumed. +* **Using {% data variables.copilot.copilot_gpt_5_mini %} on {% data variables.copilot.copilot_free_short %}**: Each interaction counts as 1 premium request. +* **Using {% data variables.copilot.copilot_gpt_5_mini %} on a paid plan**: No premium requests are consumed. diff --git a/content/copilot/how-tos/chat-with-copilot/chat-in-github.md b/content/copilot/how-tos/chat-with-copilot/chat-in-github.md index ab77168074ad..458b8d602f1b 100644 --- a/content/copilot/how-tos/chat-with-copilot/chat-in-github.md +++ b/content/copilot/how-tos/chat-with-copilot/chat-in-github.md @@ -145,14 +145,6 @@ You can attach an image to {% data variables.product.prodname_copilot_short %} a 1. Go to the immersive view of {% data variables.copilot.copilot_chat_short %} ([https://github.com/copilot](https://github.com/copilot)). 1. If you see the AI model picker at the top of the page, select one of the models that supports adding images to prompts: - * {% data variables.copilot.copilot_gpt_41 %} (the default that's used if you don't see a model picker) - * {% data variables.copilot.copilot_gpt_5_mini %} - * {% data variables.copilot.copilot_gpt_5 %} - * {% data variables.copilot.copilot_claude_sonnet_35 %} - * {% data variables.copilot.copilot_claude_sonnet_37 %} - * {% data variables.copilot.copilot_gemini_flash %} - * {% data variables.copilot.copilot_gemini_25_pro %} - ![Screenshot of the model picker with the list of models expanded.](/assets/images/help/copilot/model-picker-copilot-immersive.png) 1. Do one of the following: diff --git a/content/copilot/how-tos/chat-with-copilot/chat-in-ide.md b/content/copilot/how-tos/chat-with-copilot/chat-in-ide.md index 98230f5242d0..bfcaa646d675 100644 --- a/content/copilot/how-tos/chat-with-copilot/chat-in-ide.md +++ b/content/copilot/how-tos/chat-with-copilot/chat-in-ide.md @@ -166,12 +166,6 @@ For more information, see [{% data variables.copilot.copilot_edits_short %}](htt 1. If you see the AI model picker at the bottom right of the chat view, select one of the models that supports adding images to prompts: - * {% data variables.copilot.copilot_gpt_41 %} (the default that's used if you don't see a model picker) - * {% data variables.copilot.copilot_claude_sonnet_35 %} - * {% data variables.copilot.copilot_claude_sonnet_37 %} - * {% data variables.copilot.copilot_gemini_flash %} - * {% data variables.copilot.copilot_gemini_25_pro %} - ![Screenshot of {% data variables.copilot.copilot_chat_short %} with the model picker highlighted with a dark orange outline.](/assets/images/help/copilot/vsc-chat-model-picker.png) 1. Do one of the following: @@ -313,12 +307,6 @@ When you use {% data variables.copilot.copilot_agent_short %} mode, each prompt 1. If you see the AI model picker at the bottom right of the chat view, select one of the models that supports adding images to prompts: - * {% data variables.copilot.copilot_gpt_41 %} (the default that's used if you don't see a model picker) - * {% data variables.copilot.copilot_claude_sonnet_35 %} - * {% data variables.copilot.copilot_claude_sonnet_37 %} - * {% data variables.copilot.copilot_gemini_flash %} - * {% data variables.copilot.copilot_gemini_25_pro %} - 1. Do one of the following: * Copy an image and paste it into the chat view. diff --git a/content/copilot/reference/ai-models/model-comparison.md b/content/copilot/reference/ai-models/model-comparison.md index b259ba3a58fc..3aead50a30fb 100644 --- a/content/copilot/reference/ai-models/model-comparison.md +++ b/content/copilot/reference/ai-models/model-comparison.md @@ -27,24 +27,24 @@ contentType: reference Use this table to find a suitable model quickly, see more detail in the sections below. -| Model | Task area | Excels at (primary use case) | Additional capabilities | Further reading | -|--------------------------------------------------------|----------------------------------------------|-------------------------------------------------------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| {% data variables.copilot.copilot_gpt_41 %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode, vision | [{% data variables.copilot.copilot_gpt_41 %} model card](https://openai.com/index/gpt-4-1/) | -| {% data variables.copilot.copilot_gpt_5_codex %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode | [{% data variables.copilot.copilot_gpt_5_codex %} model card](https://cdn.openai.com/pdf/97cc5669-7a25-4e63-b15f-5fd5bdc4d149/gpt-5-codex-system-card.pdf) | -| {% data variables.copilot.copilot_gpt_5_mini %} | Deep reasoning and debugging | Well-defined tasks and precise prompts | Reasoning, vision | Not available | -| {% data variables.copilot.copilot_gpt_5 %} | Deep reasoning and debugging | Multi-step problem solving and architecture-level code analysis | Reasoning | [{% data variables.copilot.copilot_gpt_5 %} model card](https://cdn.openai.com/pdf/8124a3ce-ab78-4f06-96eb-49ea29ffb52f/gpt5-system-card-aug7.pdf) | -| {% data variables.copilot.copilot_o3 %} | Deep reasoning and debugging | Multi-step problem solving and architecture-level code analysis | Reasoning | [{% data variables.copilot.copilot_o3 %} model card](https://openai.com/index/o3-o4-mini-system-card/) | -| {% data variables.copilot.copilot_o4_mini %} | Fast help with simple or repetitive tasks | Fast, reliable answers to lightweight coding questions | Lower latency | [{% data variables.copilot.copilot_o4_mini %} model card](https://openai.com/index/o3-o4-mini-system-card/) | -| {% data variables.copilot.copilot_claude_sonnet_45 %} | General-purpose coding and agent tasks | Complex problem-solving challenges, sophisticated reasoning | Agent mode | Not available | -| {% data variables.copilot.copilot_claude_opus_41 %} | Deep reasoning and debugging | Complex problem-solving challenges, sophisticated reasoning | Reasoning, vision | [{% data variables.copilot.copilot_claude_opus_41 %} model card](https://assets.anthropic.com/m/4c024b86c698d3d4/original/Claude-4-1-System-Card.pdf) | -| {% data variables.copilot.copilot_claude_opus %} | Deep reasoning and debugging | Complex problem-solving challenges, sophisticated reasoning | Reasoning, vision | [{% data variables.copilot.copilot_claude_opus %} model card](https://www-cdn.anthropic.com/6be99a52cb68eb70eb9572b4cafad13df32ed995.pdf) | -| {% data variables.copilot.copilot_claude_sonnet_35 %} | Fast help with simple or repetitive tasks | Quick responses for code, syntax, and documentation | Agent mode, vision | [{% data variables.copilot.copilot_claude_sonnet_35 %} model card](https://www-cdn.anthropic.com/fed9cc193a14b84131812372d8d5857f8f304c52/Model_Card_Claude_3_Addendum.pdf) | -| {% data variables.copilot.copilot_claude_sonnet_37 %} | Deep reasoning and debugging | Structured reasoning across large, complex codebases | Agent mode, vision | [{% data variables.copilot.copilot_claude_sonnet_37 %} model card](https://assets.anthropic.com/m/785e231869ea8b3b/original/claude-3-7-sonnet-system-card.pdf) | -| {% data variables.copilot.copilot_claude_sonnet_40 %} | Deep reasoning and debugging | Performance and practicality, perfectly balanced for coding workflows | Agent mode, vision | [{% data variables.copilot.copilot_claude_sonnet_40 %} model card](https://www-cdn.anthropic.com/6be99a52cb68eb70eb9572b4cafad13df32ed995.pdf) | -| {% data variables.copilot.copilot_gemini_25_pro %} | Deep reasoning and debugging | Complex code generation, debugging, and research workflows | Reasoning, vision | [{% data variables.copilot.copilot_gemini_25_pro %} model card](https://storage.googleapis.com/model-cards/documents/gemini-2.5-pro.pdf) | -| {% data variables.copilot.copilot_gemini_flash %} | Working with visuals (diagrams, screenshots) | Real-time responses and visual reasoning for UI and diagram-based tasks | Vision | [{% data variables.copilot.copilot_gemini_flash %} model card](https://storage.googleapis.com/model-cards/documents/gemini-2-flash.pdf) | -| {% data variables.copilot.copilot_grok_code %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode | [{% data variables.copilot.copilot_grok_code %} model card](https://data.x.ai/2025-08-20-grok-4-model-card.pdf) | -| {% data variables.copilot.copilot_qwen_25 %} | General-purpose coding and writing | Code generation, reasoning, and code repair / debugging | Reasoning | [{% data variables.copilot.copilot_qwen_25 %} model card](https://arxiv.org/pdf/2409.12186) +| Model | Task area | Excels at (primary use case) | Additional capabilities | Further reading | +|--------------------------------------------------------|----------------------------------------------|-------------------------------------------------------------------------|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| {% data variables.copilot.copilot_gpt_41 %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode, vision | [{% data variables.copilot.copilot_gpt_41 %} model card](https://openai.com/index/gpt-4-1/) | +| {% data variables.copilot.copilot_gpt_5_codex %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode | [{% data variables.copilot.copilot_gpt_5_codex %} model card](https://cdn.openai.com/pdf/97cc5669-7a25-4e63-b15f-5fd5bdc4d149/gpt-5-codex-system-card.pdf) | +| {% data variables.copilot.copilot_gpt_5_mini %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode, reasoning, vision | [{% data variables.copilot.copilot_gpt_5_mini %} model card](https://cdn.openai.com/gpt-5-system-card.pdf) | +| {% data variables.copilot.copilot_gpt_5 %} | Deep reasoning and debugging | Multi-step problem solving and architecture-level code analysis | Reasoning | [{% data variables.copilot.copilot_gpt_5 %} model card](https://cdn.openai.com/gpt-5-system-card.pdf) | +| {% data variables.copilot.copilot_o3 %} | Deep reasoning and debugging | Multi-step problem solving and architecture-level code analysis | Reasoning | [{% data variables.copilot.copilot_o3 %} model card](https://openai.com/index/o3-o4-mini-system-card/) | +| {% data variables.copilot.copilot_o4_mini %} | Fast help with simple or repetitive tasks | Fast, reliable answers to lightweight coding questions | Lower latency | [{% data variables.copilot.copilot_o4_mini %} model card](https://openai.com/index/o3-o4-mini-system-card/) | +| {% data variables.copilot.copilot_claude_sonnet_45 %} | General-purpose coding and agent tasks | Complex problem-solving challenges, sophisticated reasoning | Agent mode | [{% data variables.copilot.copilot_claude_sonnet_45 %} model card](https://assets.anthropic.com/m/12f214efcc2f457a/original/Claude-Sonnet-4-5-System-Card.pdf) | +| {% data variables.copilot.copilot_claude_opus_41 %} | Deep reasoning and debugging | Complex problem-solving challenges, sophisticated reasoning | Reasoning, vision | [{% data variables.copilot.copilot_claude_opus_41 %} model card](https://assets.anthropic.com/m/4c024b86c698d3d4/original/Claude-4-1-System-Card.pdf) | +| {% data variables.copilot.copilot_claude_opus %} | Deep reasoning and debugging | Complex problem-solving challenges, sophisticated reasoning | Reasoning, vision | [{% data variables.copilot.copilot_claude_opus %} model card](https://www-cdn.anthropic.com/6be99a52cb68eb70eb9572b4cafad13df32ed995.pdf) | +| {% data variables.copilot.copilot_claude_sonnet_35 %} | Fast help with simple or repetitive tasks | Quick responses for code, syntax, and documentation | Agent mode, vision | [{% data variables.copilot.copilot_claude_sonnet_35 %} model card](https://www-cdn.anthropic.com/fed9cc193a14b84131812372d8d5857f8f304c52/Model_Card_Claude_3_Addendum.pdf) | +| {% data variables.copilot.copilot_claude_sonnet_37 %} | Deep reasoning and debugging | Structured reasoning across large, complex codebases | Agent mode, vision | [{% data variables.copilot.copilot_claude_sonnet_37 %} model card](https://assets.anthropic.com/m/785e231869ea8b3b/original/claude-3-7-sonnet-system-card.pdf) | +| {% data variables.copilot.copilot_claude_sonnet_40 %} | Deep reasoning and debugging | Performance and practicality, perfectly balanced for coding workflows | Agent mode, vision | [{% data variables.copilot.copilot_claude_sonnet_40 %} model card](https://www-cdn.anthropic.com/6be99a52cb68eb70eb9572b4cafad13df32ed995.pdf) | +| {% data variables.copilot.copilot_gemini_25_pro %} | Deep reasoning and debugging | Complex code generation, debugging, and research workflows | Reasoning, vision | [{% data variables.copilot.copilot_gemini_25_pro %} model card](https://storage.googleapis.com/model-cards/documents/gemini-2.5-pro.pdf) | +| {% data variables.copilot.copilot_gemini_flash %} | Working with visuals (diagrams, screenshots) | Real-time responses and visual reasoning for UI and diagram-based tasks | Vision | [{% data variables.copilot.copilot_gemini_flash %} model card](https://storage.googleapis.com/model-cards/documents/gemini-2-flash.pdf) | +| {% data variables.copilot.copilot_grok_code %} | General-purpose coding and writing | Fast, accurate code completions and explanations | Agent mode | [{% data variables.copilot.copilot_grok_code %} model card](https://data.x.ai/2025-08-20-grok-4-model-card.pdf) | +| {% data variables.copilot.copilot_qwen_25 %} | General-purpose coding and writing | Code generation, reasoning, and code repair / debugging | Reasoning | [{% data variables.copilot.copilot_qwen_25 %} model card](https://arxiv.org/pdf/2409.12186) ## Task: General-purpose coding and writing @@ -52,8 +52,8 @@ Use these models for common development tasks that require a balance of quality, | Model | Why it's a good fit | |-------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------| -| {% data variables.copilot.copilot_gpt_41 %} | Reliable default for most coding and writing tasks. Fast, accurate, and works well across languages and frameworks. | | {% data variables.copilot.copilot_gpt_5_codex %} | Delivers higher-quality code on complex engineering tasks like features, tests, debugging, refactors, and reviews without lengthy instructions. | +| {% data variables.copilot.copilot_gpt_5_mini %} | Reliable default for most coding and writing tasks. Fast, accurate, and works well across languages and frameworks. | | {% data variables.copilot.copilot_claude_sonnet_37 %} | Produces clear, structured output. Follows formatting instructions and maintains consistent style. | | {% data variables.copilot.copilot_gemini_flash %} | Fast and cost-effective. Well suited for quick questions, short code snippets, and lightweight writing tasks. | | {% data variables.copilot.copilot_o4_mini %} | Optimized for speed and cost efficiency. Ideal for real-time suggestions with low usage overhead. | @@ -136,7 +136,7 @@ Use these models when you want to ask questions about screenshots, diagrams, UI | Model | Why it's a good fit | |-------|---------------------| -| {% data variables.copilot.copilot_gpt_41 %} | Reliable default for most coding and writing tasks. Fast, accurate, and supports multimodal input for visual reasoning tasks. Works well across languages and frameworks. | +| {% data variables.copilot.copilot_gpt_5_mini %} | Reliable default for most coding and writing tasks. Fast, accurate, and supports multimodal input for visual reasoning tasks. Works well across languages and frameworks. | | {% data variables.copilot.copilot_claude_opus %} | Anthropic’s most powerful model. Strong at strategy, debugging, and multi-layered logic. | | {% data variables.copilot.copilot_claude_sonnet_40 %} | Improves on 3.7 with more reliable completions and smarter reasoning under pressure. | | {% data variables.copilot.copilot_gemini_flash %} | Fast, multimodal model optimized for real-time interaction. Useful for feedback on diagrams, visual prototypes, and UI layouts. | From 51c65e3b862bc71ae244baf6425fcecaae4e833c Mon Sep 17 00:00:00 2001 From: Sarah Schneider Date: Thu, 9 Oct 2025 13:54:59 -0400 Subject: [PATCH 4/4] Support data-driven tables (#57806) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- data/tables/README.md | 99 +++++++++++++++++++ src/data-directory/middleware/data-tables.ts | 26 +++++ .../data/tables/supported-languages.yml | 13 +++ src/frame/middleware/index.ts | 2 + 4 files changed, 140 insertions(+) create mode 100644 data/tables/README.md create mode 100644 src/data-directory/middleware/data-tables.ts create mode 100644 src/fixtures/fixtures/data/tables/supported-languages.yml diff --git a/data/tables/README.md b/data/tables/README.md new file mode 100644 index 000000000000..1595c428b479 --- /dev/null +++ b/data/tables/README.md @@ -0,0 +1,99 @@ +# Data-driven tables + +## Overview + +GitHub Docs uses YAML files to manage some complex reference tables instead of hard-to-maintain Markdown tables. This approach provides: + +- **Maintainable format**: Stakeholders can easily update readable YAML files +- **Single source of truth**: Centralized data prevents inconsistencies +- **Accurate information**: Reduces errors and outdated content +- **Self-service process**: Minimal engineering support needed + +> **Important**: The `.yml` files in this directory are maintained **manually**. Tables that need automatic updates from external sources require engineering work. + +## Table of contents + +- [When to use this approach](#when-to-use-this-approach) +- [How it works](#how-it-works) +- [Step-by-step guide](#step-by-step-guide) +- [Testing and validation](#testing-and-validation) +- [Next steps](#next-steps) + +## When to use this approach + +Use data-driven tables when you have: +- Complex reference tables with multiple columns +- Data that needs regular updates by different stakeholders +- Structured information that benefits from validation + +## How it works + +Every data-driven table needs **three files** that work together: + +| File type | Location | Purpose | +|-----------|----------|---------| +| **Data file** | `data/tables/` | Stores the table content in YAML format | +| **Content file** | `content/` | Displays the table using Liquid templating | +| **Schema file** | `src/data-directory/lib/data-schemas/` | Validates the YAML structure | + +**Estimated time**: 30-60 minutes for a new table + +## Step-by-step guide + +### Step 1: Create the data file + +Create a new `.yml` file in `data/tables/` with a descriptive name. + +**Copilot prompt template:** +``` +Create a YAML structure that will allow me to generate a table that looks like: +[describe your table headers, rows, and columns OR attach an example] + +See src/secret-scanning/data/public-docs.yml for an example. +``` + +### Step 2: Create the content display + +In your content file, add Liquid code to render the table. Access your data at `{% data tables.TABLE_NAME %}` (where `TABLE_NAME` is your filename without `.yml`). + +**Copilot prompt template:** +``` +Create a Markdown table that is dynamically rendered using Liquid code. +Pull data from data/tables/TABLE_NAME.yml. +The table should look like: [describe your desired output OR attach an example] + +See content/code-security/secret-scanning/introduction/supported-secret-scanning-patterns.md for an example. +Liquid docs: https://shopify.github.io/liquid +``` + +**💡 Tip**: Iterate between Steps 1 and 2 until the table renders correctly. + +### Step 3: Create the schema file + +Create a `.ts` file in `src/data-directory/lib/data-schemas/` with the same name as your YAML file. + +**Copilot prompt template:** +``` +Create a TypeScript schema following prior art under data-schemas that enforces +the structure of the data/TABLE_NAME.yml file. + +See src/data-directory/lib/data-schemas/learning-tracks.ts for an example. +``` + +## Testing and validation + +After creating all three files: + +1. **Build the site**: Run `npm run build` +2. **Test schemas**: Run `npm test -- src/data-directory/tests` +3. **Fix any errors**: If you get failures in `src/data-directory/tests/data-schemas.js`: + - Copy the error message + - In VS Code Copilot Chat, type: "When I ran the schema test, I got this error:" and paste the error + - Update your schema file based on Copilot's suggestions +4. **Repeat testing** until all tests pass + +## Next steps + +Once your table is working and tests pass, create a pull request for review. + +The `docs-engineering` team must review and approve your implementation. \ No newline at end of file diff --git a/src/data-directory/middleware/data-tables.ts b/src/data-directory/middleware/data-tables.ts new file mode 100644 index 000000000000..a29086e9f08c --- /dev/null +++ b/src/data-directory/middleware/data-tables.ts @@ -0,0 +1,26 @@ +import type { NextFunction, Response } from 'express' +import { ExtendedRequest } from '@/types' +import { getDeepDataByLanguage } from '@/data-directory/lib/get-data' + +let tablesCache: Record | null = null + +// Lazy loading function +const getTables = () => { + if (!tablesCache) { + // Keep product-name-heavy reference tables in English only for now + tablesCache = getDeepDataByLanguage('tables', 'en') + } + return tablesCache +} + +/** + * Middleware that loads data-driven table content into the request context. + * Tables are sourced from YAML files in data/tables/ directory. + */ +export default async function dataTables(req: ExtendedRequest, res: Response, next: NextFunction) { + if (!req.context) throw new Error('request not contextualized') + + req.context.tables = getTables() + + return next() +} diff --git a/src/fixtures/fixtures/data/tables/supported-languages.yml b/src/fixtures/fixtures/data/tables/supported-languages.yml new file mode 100644 index 000000000000..60a1b56d8e1c --- /dev/null +++ b/src/fixtures/fixtures/data/tables/supported-languages.yml @@ -0,0 +1,13 @@ +features: + foo: + name: 'Foo Feature' + fptAndGhec: true + ghes: false + bar: + name: 'Bar Feature' + fptAndGhec: true + ghes: true +supported: + BeepBoop: + foo: 'supported' + bar: 'not-supported' diff --git a/src/frame/middleware/index.ts b/src/frame/middleware/index.ts index b52591590ea2..bf4fc11b46dd 100644 --- a/src/frame/middleware/index.ts +++ b/src/frame/middleware/index.ts @@ -35,6 +35,7 @@ import robots from './robots' import earlyAccessLinks from '@/early-access/middleware/early-access-links' import categoriesForSupport from './categories-for-support' import triggerError from '@/observability/middleware/trigger-error' +import dataTables from '@/data-directory/middleware/data-tables' import secretScanning from '@/secret-scanning/middleware/secret-scanning' import ghesReleaseNotes from '@/release-notes/middleware/ghes-release-notes' import whatsNewChangelog from './context/whats-new-changelog' @@ -256,6 +257,7 @@ export default function (app: Express) { app.head('/*path', fastHead) // *** Preparation for render-page: contextualizers *** + app.use(asyncMiddleware(dataTables)) app.use(asyncMiddleware(secretScanning)) app.use(asyncMiddleware(ghesReleaseNotes)) app.use(asyncMiddleware(whatsNewChangelog))