diff --git a/.dumi/hooks/types.ts b/.dumi/hooks/types.ts new file mode 100644 index 0000000..22228de --- /dev/null +++ b/.dumi/hooks/types.ts @@ -0,0 +1,239 @@ +import type { ExampleBlockAsset } from 'dumi-assets-types'; +import type { ComponentType, ReactNode } from 'react'; + +export interface IPreviewerProps { + /** + * title of current demo + */ + title?: string; + /** + * description of current demo + */ + description?: string; + /** + * filename of current demo + */ + filename?: string; + /** + * use iframe to render demo + */ + iframe?: boolean | number; + /** + * debug mark (will only render in dev by default) + */ + debug?: boolean; + /** + * display the source code or not by default + */ + defaultShowCode?: boolean; + /** + * url for render current demo in a single page + */ + demoUrl: string; + /** + * disable demo content padding + */ + compact?: boolean; + /** + * add transform property for handle absolute/fixed position element + */ + transform?: boolean; + /** + * background color for demo content + */ + background?: string; + /** + * asset metadata of current demo + */ + asset: ExampleBlockAsset; + /** + * react node of current demo + */ + children: ReactNode; + [key: string]: any; +} + +export interface IRouteMeta { + // route frontmatter + frontmatter: { + // seo related + title: string; + description?: string; + keywords?: string[]; + // render related + nav?: + | string + | { + title?: string; + order?: number; + second?: Omit; + }; + group?: { title?: string; order?: number; index?: boolean } | string; + subGroup?: { title?: string }; + order?: number; + hero?: { + title?: string; + description?: string; + background?: string; + actions?: { text: string; link: string }[]; + [key: string]: any; + }; + features?: { + emoji?: string; + title?: string; + link?: string; + description?: string; + [key: string]: any; + }[]; + toc?: boolean | 'content' | 'menu'; + demo?: { + cols?: number; + tocDepth?: number; + }; + atomId?: string; + filename?: string; + lastUpdated?: number; + debug?: boolean; + /** + * Control the display of the sidebar menu. + * @default true + */ + sidebar?: boolean; + [key: string]: any; + }; + // route toc + toc: { + id: string; + depth: number; + title: string; + /** + * private field, do not use it in your code + */ + _debug_demo?: boolean; + }[]; + // route texts + texts: { + type?: 'content'; + value: string; + /** + * paragraph index + */ + paraId: number; + /** + * title index in toc + */ + tocIndex?: number; + }[]; + // tabs + tabs?: { + key: string; + title?: string; + titleIntlId?: string; + components: { + default: ComponentType; + Extra: ComponentType; + Action: ComponentType; + }; + meta: { + frontmatter: Omit< + IRouteMeta['frontmatter'], + 'description' | 'keywords' | 'nav' | 'group' | 'hero' | 'features' + >; + toc: IRouteMeta['toc']; + texts: IRouteMeta['texts']; + [key: string]: any; + }; + }[]; + /** + * private field, do not use it in your code + */ + _atom_route?: boolean; +} + +type IBasicLocale = { id: string; name: string }; +export type ILocale = + | (IBasicLocale & { base: string }) + | (IBasicLocale & { suffix: string }); +export type ILocalesConfig = ILocale[]; + +export interface INavItem { + title: string; + link?: string; + order?: number; + activePath?: string; + [key: string]: any; +} +export interface ISidebarItem { + title: string; + link: string; + order?: number; + frontmatter?: IRouteMeta['frontmatter']; + [key: string]: any; +} +export interface ISidebarGroup { + title?: string; + children: ISidebarItem[]; + [key: string]: any; +} +export type SocialTypes = + | 'github' + | 'weibo' + | 'twitter' + | 'gitlab' + | 'facebook' + | 'zhihu' + | 'yuque' + | 'linkedin'; + +export type INavItems = (INavItem & { children?: INavItem[] })[]; +export type INav = INavItems | Record; +type IUserNavItem = Pick; +export type IUserNavMode = 'override' | 'append' | 'prepend'; +export type IUserNavItems = (IUserNavItem & { children?: IUserNavItem[] })[]; +export type IUserNavValue = IUserNavItems | Record; +export type NavWithMode = { + /** + * 扩展导航的模式 + * @description + * - 'override': 用 value 中配置的导航直接覆盖约定路由的导航 + * - 'append': 将 value 中配置的导航追加到约定路由导航后面 + * - 'prepend': 将 value 中配置的导航添加到约定路由导航前面 + */ + mode: IUserNavMode; + value: T; +}; + +export interface IThemeConfig { + name?: string; + logo?: string | false; + nav?: IUserNavValue | NavWithMode; + sidebar?: Record; + footer?: string | false; + showLineNum?: boolean; + prefersColor: { + default: 'light' | 'dark' | 'auto'; + switch: boolean; + }; + nprogress?: boolean; + socialLinks?: { + /** + * 形如:github: "https://github.com/umijs/dumi" + */ + [key in SocialTypes]?: string; + }; + editLink?: boolean | string; + lastUpdated?: boolean; + [key: string]: any; +} + +export type IRoutesById = Record< + string, + { + path?: string; + parentId?: string; + meta?: IRouteMeta; + id: string; + redirect?: string; + [key: string]: any; + } +>; diff --git a/.dumi/hooks/useLocale.ts b/.dumi/hooks/useLocale.ts new file mode 100644 index 0000000..218db04 --- /dev/null +++ b/.dumi/hooks/useLocale.ts @@ -0,0 +1,12 @@ +import { useIntl, useSiteData } from 'dumi'; +import { useState } from 'react'; +import type { ILocale } from './types'; + +export const useLocale = (): ILocale => { + const intl = useIntl(); + const { locales } = useSiteData(); + const [locale] = useState( + () => locales.find(({ id }) => id === intl.locale)!, + ); + return locale; +}; diff --git a/.dumi/hooks/utils.ts b/.dumi/hooks/utils.ts new file mode 100644 index 0000000..76dec4d --- /dev/null +++ b/.dumi/hooks/utils.ts @@ -0,0 +1,148 @@ +import { PluginManager, useAppData, useIntl, useSiteData } from 'dumi'; +import { useCallback, useEffect, useLayoutEffect, useState } from 'react'; +import type { + ILocale, + INav, + INavItem, + IRouteMeta, + IRoutesById, + IUserNavValue, +} from './types'; +import { useLocale } from './useLocale'; + +/** + * private instance, do not use it in your code + */ +export let pluginManager: PluginManager; + +export const setPluginManager = (pm: PluginManager) => { + pluginManager = pm; +}; + +export const useLocaleDocRoutes = () => { + const intl = useIntl(); + const { routes } = useAppData(); + const { locales } = useSiteData(); + const [localeDocRoutes] = useState(() => { + const reversedLocales = locales.slice().reverse(); + + return Object.values(routes).reduce((ret, route) => { + const matched = reversedLocales.find((locale) => + 'suffix' in locale + ? // suffix mode + route.path!.endsWith(locale.suffix) + : // base mode + route.path!.startsWith(locale.base.slice(1)), + )!; + + if (route.parentId === 'DocLayout' && matched.id === intl.locale) { + ret[route.id] = route; + } + + return ret; + }, {}); + }); + + return localeDocRoutes; +}; + +/** + * 在 react 18 中需要新的 render 方式,这个函数用来处理不同的 jsx 模式。 + * @param version react version + * @returns code string + */ +export const genReactRenderCode = (version: string): string => { + const annotation = `/** + * This is an auto-generated demo by dumi + * if you think it is not working as expected, + * please report the issue at + * https://github.com/umijs/dumi/issues + */`; + + if (version.startsWith('18.') || version === 'latest') { + return `${annotation} + +import React from 'react'; +import { createRoot } from "react-dom/client"; +import App from "./App"; + +const rootElement = document.getElementById("root"); +const root = createRoot(rootElement); + +root.render();`; + } + return `${annotation} + +import React from 'react'; +import ReactDOM from 'react-dom'; +import App from './App'; + +ReactDOM.render( + , + document.getElementById('root'), +);`; +}; + +export const useIsomorphicLayoutEffect = + typeof window !== 'undefined' ? useLayoutEffect : useEffect; + +/** + * common comparer for sidebar/nav items + */ +export const useRouteDataComparer = < + T extends { order?: number; link?: string; path?: string; title?: string }, +>() => { + const locale = useLocale(); + + return useCallback((a: T, b: T) => { + return ( + // smaller before larger for all + ('order' in a && 'order' in b ? a.order! - b.order! : 0) || + // shallower before deeper for sidebar item + ('link' in a && 'link' in b + ? a.link!.split('/').length - b.link!.split('/').length + : 0) || + // shallower before deeper for sidebar leaf + ('path' in a && 'path' in b + ? a.path!.split('/').length - b.path!.split('/').length + : 0) || + // fallback to compare title (put non-title item at the end) + (a.title ? a.title.localeCompare(b.title || '', locale.id) : -1) + ); + }, []); +}; + +/** + * common util for pick meta to sort sidebar/nav items + */ +export const pickRouteSortMeta = ( + original: Partial>, + field: 'nav' | 'nav.second' | 'group', + fm: IRouteMeta['frontmatter'], +) => { + const sub: IRouteMeta['frontmatter']['group'] = + field === 'nav.second' + ? typeof fm.nav === 'object' + ? fm.nav.second + : {} + : fm[field]; + + switch (typeof sub) { + case 'object': + original.title = sub.title || original.title; + original.order = sub.order ?? original.order; + break; + + case 'string': + original.title = sub || original.title; + break; + + default: + } + + return original; +}; + +export function getLocaleNav(nav: IUserNavValue | INav, locale: ILocale) { + return Array.isArray(nav) ? nav : nav[locale.id]; +} diff --git a/.dumi/theme/constants/index.ts b/.dumi/theme/constants/index.ts new file mode 100644 index 0000000..8277afc --- /dev/null +++ b/.dumi/theme/constants/index.ts @@ -0,0 +1,31 @@ +/** 一级导航入口 */ +export const NavbarEnums = { + 'CodeFuse-Query': '/zh-CN/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery', + 'MFTCoder': '/zh-CN/docs/developer-docs/MFTCoder/main/MFTCoder', + 'CodeFuse-MFT-VLM': '/zh-CN/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm', + 'Test-Agent': '/zh-CN/docs/developer-docs/Test-Agent/main/TestAgent', + 'CodeFuse-ModelCache': '/zh-CN/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache', + 'CodeFuse-ChatBot': '/zh-CN/docs/developer-docs/CodeFuse-ChatBot/master/quickstart', + 'CodeFuse-DevOps-Eval': '/zh-CN/docs/developer-docs/CodeFuse-DevOps-Eval/master/data', + 'CodeFuse-DevOps-Model': '/zh-CN/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel', + 'CodeFuse-evalution': '/zh-CN/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution', + 'MuAgent': '/zh-CN/docs/api-docs/MuAgent/overview/multi-agent', + '整体介绍': '/zh-CN/docs/about/overview', + '关于':'/zh-CN/aboutDocs/aboutdocs' +} + +/** 一级导航 英文 */ +export const NavbarEnumsEn = { + 'CodeFuse-Query': '/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery', + 'MFTCoder': '/docs/developer-docs/MFTCoder/main/MFTCoder', + 'CodeFuse-MFT-VLM': '/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm', + 'Test-Agent': '/docs/developer-docs/Test-Agent/main/TestAgent', + 'CodeFuse-ModelCache': '/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache', + 'CodeFuse-ChatBot': '/docs/developer-docs/CodeFuse-ChatBot/master/quickstart', + 'CodeFuse-DevOps-Eval': '/docs/developer-docs/CodeFuse-DevOps-Eval/master/data', + 'CodeFuse-DevOps-Model': '/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel', + 'CodeFuse-evalution': '/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution', + 'MuAgent': '/docs/api-docs/MuAgent/overview/multi-agent', + 'Overview': '/docs/about/overview', + 'AboutDocs':'/aboutDocs/aboutdocs' +} diff --git a/.dumi/theme/layouts/DocLayout/index.less b/.dumi/theme/layouts/DocLayout/index.less new file mode 100644 index 0000000..ea5bdb4 --- /dev/null +++ b/.dumi/theme/layouts/DocLayout/index.less @@ -0,0 +1,214 @@ +@import (reference) '../../styles/variables.less'; + +// @{dark-selector} { +// color-scheme: dark; +// } + +body { + margin: 0; + padding: 0; + background-color: @c-site-bg; + // @{dark-selector} & { + // background-color: @c-site-bg-dark; + // } +} + +.@{prefix}-doc-layout { + font-family: sans-serif; + background-color: @c-site-bg; + + // @{dark-selector} & { + // background-color: @c-site-bg-dark; + // } + + @media @mobile { + &::before { + content: ''; + position: fixed; + z-index: 11; // 1 more than header + top: 0; + right: 0; + left: 0; + bottom: 0; + background-color: #ffffff; + transition: all 0.1s; + } + + &:not([data-mobile-sidebar-active])::before { + opacity: 0; + visibility: hidden; + } + } + + &-mobile-bar { + @height: 36px; + position: sticky; + z-index: 9; // 1 less than header + top: @s-header-height-m; + left: 0; + right: 0; + display: none; + align-items: center; + padding: 0 24px; + height: @height; + border-top: 1px solid @c-border-light; + background-color: fadeout(@c-site-bg, 10%); + backdrop-filter: blur(6px); + + @{dark-selector} & { + border-top-color: @c-border-less-dark; + background-color: fadein(@c-site-bg-dark, 10%); + } + + @media @mobile { + display: flex; + } + + .@{prefix}-sidebar-btn { + padding: 0; + color: @c-text-secondary; + border: 0; + background-color: transparent; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + + >svg { + width: 16px; + margin-right: 6px; + fill: @c-text-secondary; + vertical-align: middle; + + @{dark-selector} & { + fill: @c-text-secondary-dark; + } + } + } + } + + >main { + display: flex; + align-items: flex-start; + padding: 0 24px; + box-sizing: border-box; + + >section { + flex: 1; + max-width: 100%; + } + + >.@{prefix}-doc-layout-toc-wrapper { + position: sticky; + top: @s-header-height; + max-width: @s-sidebar-width; + margin-inline-start: 24px; + max-height: 80vh; + overflow: auto; + overscroll-behavior: contain; + -webkit-overflow-scrolling: touch; + + @media @mobile { + display: none; + } + + >h4 { + margin: 0 0 20px; + color: @c-text-note; + font-size: 15px; + line-height: 1; + + @{dark-selector} & { + color: @c-text-note-dark; + } + } + } + + .dumi-default-article .dumi-default-content:not([data-no-sidebar]) { + background-color: #070b13; + box-shadow: 0px 2px 4px 0px #000000ff; + } + + .markdown { + font-size: 16px; + color: #e4e9ec; + line-height: 30px; + letter-spacing: 0.61px; + font-weight: 300; + margin: 0 auto; + + a { + color: #9999FF; + } + } + + .dumi-default-content-tool { + color: #b5b5b5; + } + + .dumi-default-content-tool>dl dd>a { + color: #b5b5b5; + } + + .dumi-default-toc>li>a.active { + color: #fff !important; + } + + .dumi-default-toc>li>a { + color: #b5b5b5; + } + + .dumi-default-source-code { + background: #2c2d2e; + border-radius: 4px; + } + + .dumi-default-source-code>pre.prism-code { + color: #e4e9ec; + background: #2c2d2e; + } + + .dumi-default-source-code-copy { + background: #2c2d2e; + } + + .dumi-default-article .dumi-default-content:not([data-no-sidebar]) { + padding: 10px 18px 48px 28px; + } + + .ant-select-outlined:not(.ant-select-customize-input) .ant-select-selector { + background: #070b13; + } + + .ant-menu-light .ant-menu-item { + color: #b5b5b5; + } + + .ant-menu-light:not(.ant-menu-horizontal) .ant-menu-submenu-title:active { + background-color: #181d29; + border-radius: 5px; + + } + + .ant-menu-light:not(.ant-menu-horizontal) .ant-menu-item:not(.ant-menu-item-selected):hover { + background-color: #181d29; + color: #fff; + } + + .ant-select-single .ant-select-selector { + color: #fff; + } + + .ant-select .ant-select-arrow { + color: #fff; + } + + .ant-menu-light .ant-menu-item-selected { + background-color: #181d29; + } + } + .markdown:not(:lang(zh)):not(:lang(ja)):not(:lang(kr)), .markdown:not(:lang(zh)) { + letter-spacing: 0.61px; + } + +} diff --git a/.dumi/theme/layouts/DocLayout/index.tsx b/.dumi/theme/layouts/DocLayout/index.tsx new file mode 100644 index 0000000..30d5ecc --- /dev/null +++ b/.dumi/theme/layouts/DocLayout/index.tsx @@ -0,0 +1,114 @@ +import { ReactComponent as IconSidebar } from '@ant-design/icons-svg/inline-svg/outlined/align-left.svg'; +import animateScrollTo from 'animated-scroll-to'; +import { + Helmet, + useIntl, + useLocation, + useOutlet, + useRouteMeta, + useSidebarData, + useSiteData, +} from 'dumi'; +import Content from 'dumi/theme/slots/Content'; +import ContentFooter from 'dumi/theme/slots/ContentFooter'; +import Features from 'dumi/theme/slots/Features'; +import Footer from 'dumi/theme/slots/Footer'; +import Header from 'dumi/theme/slots/Header'; +import Hero from 'dumi/theme/slots/Hero'; +import Sidebar from 'dumi/theme/slots/Sidebar'; +import Toc from 'dumi/theme/slots/Toc'; +import React, { useEffect, useState, type FC } from 'react'; +import './index.less'; +import AboutDocs from 'dumi/theme/slots/AboutDocs'; +import Foot from 'dumi/theme/slots/Foot'; +import Contribution from 'dumi/theme/slots/Contribution'; + +const DocLayout: FC = () => { + const intl = useIntl(); + const outlet = useOutlet(); + const sidebar = useSidebarData(); + const { hash, pathname } = useLocation(); + const { loading, hostname } = useSiteData(); + const [activateSidebar, updateActivateSidebar] = useState(false); + const { frontmatter: fm } = useRouteMeta(); + const about = pathname.split("/").pop(); + const doc = pathname.includes("/docs"); + const showSidebar = fm.sidebar !== false && sidebar?.length > 0; + const Publication = pathname.split("/").pop(); + + // handle hash change or visit page hash after async chunk loaded + useEffect(() => { + const id = hash.replace('#', ''); + if (id) { + setTimeout(() => { + const elm = document.getElementById(decodeURIComponent(id)); + if (elm) { + // animated-scroll-to instead of native scroll + animateScrollTo(elm.offsetTop - 80, { + maxDuration: 300, + }); + } + }, 1); + } + }, [loading, hash]); + + return ( +
updateActivateSidebar(false)} + > + + + {fm.title && {fm.title}} + {fm.title && } + {fm.description && } + {fm.description && ( + + )} + {fm.keywords && ( + + )} + {fm.keywords && + fm.keywords.map((keyword) => ( + + ))} + {hostname && } + +
+ + { + about === 'aboutdocs' && + } + { + Publication === 'contribution' && + } + { + doc &&
+ {/* 文档页两侧展示 */} + {showSidebar && doc && } + + { +
{outlet}
+ } + { + + } + { +
+ } + + {fm.toc === 'content' && ( +
+ {/*

大纲

*/} + +
+ )} +
+ } + +
+ ); +}; + +export default DocLayout; diff --git a/.dumi/theme/locales/en-US.json b/.dumi/theme/locales/en-US.json new file mode 100644 index 0000000..6c0f656 --- /dev/null +++ b/.dumi/theme/locales/en-US.json @@ -0,0 +1,46 @@ +{ + "header.search.placeholder": "Search", + "header.color.mode.light": "Light Mode", + "header.color.mode.dark": "Dark Mode", + "header.color.mode.auto": "Follow System", + "header.social.github": "GitHub", + "header.social.weibo": "Weibo", + "header.social.twitter": "Twitter", + "header.social.gitlab": "GitLab", + "header.social.facebook": "Facebook", + "header.social.zhihu": "Zhihu", + "header.social.yuque": "Yuque", + "header.social.linkedin": "Linkedin", + "previewer.actions.code.expand": "Show Code", + "previewer.actions.code.shrink": "Hide Code", + "previewer.actions.sketch": "Copy to Sketch app", + "previewer.actions.sketch.group": "Copy as Sketch Group", + "previewer.actions.sketch.symbol": "Copy as Sketch Symbol", + "previewer.actions.sketch.divider": "------------------------", + "previewer.actions.sketch.guide": "How to paste to Sketch?", + "previewer.actions.codesandbox": "Open in CodeSandbox", + "previewer.actions.codepen": "Open in CodePen (Not implemented)", + "previewer.actions.stackblitz": "Open in StackBlitz", + "previewer.actions.separate": "Open in separate page", + "404.title": "PAGE NOT FOUND", + "404.back": "Back to homepage", + "api.component.name": "Name", + "api.component.description": "Description", + "api.component.type": "Type", + "api.component.default": "Default", + "api.component.required": "(required)", + "api.component.unavailable": "apiParser must be enabled to use auto-generated API", + "api.component.loading": "Properties definition is resolving, wait a moment...", + "api.component.not.found": "Properties definition not found for {id} component", + "content.tabs.default": "Doc", + "content.footer.last.updated": "Last updated: ", + "content.footer.actions.edit": "Improve this documentation", + "content.footer.actions.previous": "PREV", + "content.footer.actions.next": "NEXT", + "search.not.found": "No content was found", + "layout.sidebar.btn": "Sidebar", + "content.menus.docs.developer": "Developer-Docs", + "content.menus.docs.api": "API-Docs", + "content.menus.docs.product": "Product-Docs", + "content.menus.docs.about": "About CodeFuse" +} diff --git a/.dumi/theme/locales/zh-CN.json b/.dumi/theme/locales/zh-CN.json new file mode 100644 index 0000000..7825b08 --- /dev/null +++ b/.dumi/theme/locales/zh-CN.json @@ -0,0 +1,45 @@ +{ + "header.search.placeholder": "输入关键字搜索...", + "header.color.mode.light": "亮色模式", + "header.color.mode.dark": "暗色模式", + "header.color.mode.auto": "跟随系统", + "header.social.github": "GitHub", + "header.social.weibo": "微博", + "header.social.twitter": "Twitter", + "header.social.gitlab": "GitLab", + "header.social.facebook": "Facebook", + "header.social.zhihu": "知乎", + "header.social.yuque": "语雀", + "header.social.linkedin": "Linkedin", + "previewer.actions.code.expand": "展开代码", + "previewer.actions.code.shrink": "收起代码", + "previewer.actions.codesandbox": "在 CodeSandbox 中打开", + "previewer.actions.sketch": "拷贝到 Sketch", + "previewer.actions.sketch.group": "拷贝为 Sketch Group", + "previewer.actions.sketch.symbol": "拷贝为 Sketch Symbol", + "previewer.actions.sketch.divider": "----------------------", + "previewer.actions.sketch.guide": "如何粘贴到 SKetch?", + "previewer.actions.stackblitz": "在 StackBlitz 中打开", + "previewer.actions.separate": "在独立页面中打开", + "404.title": "页面未找到", + "404.back": "返回首页", + "api.component.name": "属性名", + "api.component.description": "描述", + "api.component.type": "类型", + "api.component.default": "默认值", + "api.component.required": "(必选)", + "api.component.unavailable": "必须启用 apiParser 才能使用自动 API 特性", + "api.component.loading": "属性定义正在解析中,稍等片刻...", + "api.component.not.found": "未找到 {id} 组件的属性定义", + "content.tabs.default": "文档", + "content.footer.last.updated": "最后更新时间:", + "content.footer.actions.edit": "帮助改进此文档", + "content.footer.actions.previous": "上一篇", + "content.footer.actions.next": "下一篇", + "search.not.found": "未找到相关内容", + "layout.sidebar.btn": "侧边菜单", + "content.menus.docs.developer": "开发者文档", + "content.menus.docs.api": "API文档", + "content.menus.docs.product": "产品文档", + "content.menus.docs.about": "关于 CodeFuse" +} diff --git a/.dumi/theme/slots/AboutDocs/index.less b/.dumi/theme/slots/AboutDocs/index.less new file mode 100644 index 0000000..133c5f9 --- /dev/null +++ b/.dumi/theme/slots/AboutDocs/index.less @@ -0,0 +1,120 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-about { + margin: -@s-header-height auto 0 auto; + // margin: 0 auto ; + height: 100%; + display: flex; + flex-direction: column; + justify-content: center; + + @media @mobile { + margin-top: -@s-header-height-m - 20; + padding-top: 160px; + height: 660px; + } + + +* { + position: relative; + } + + .banner { + position: relative; + padding-top: 30px; + height: 380px; + width: 100%; + box-sizing: border-box; + // background: linear-gradient(to top, #30cfd0 0%, #330867 100%); + background-image: url('https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*wCCKR7plqDMAAAAAAAAAAAAADlHYAQ/original'); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; + + .bannerContent { + padding: 0px 24px; + margin-top: 30px; + width: 900px; + color: #f8f9fa; + font-size: 2.7rem; + display: flex; + justify-content: center; + + img { + width: 35%; + } + } + } + + p { + font-weight: 500; + text-align: center; + line-height: 72px; + margin-top: 0px; + font-size: 36px; + color: #ffffff; + letter-spacing: 1.21px; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + + @media @mobile { + font-size: 16px; + } + } + + &-actions { + margin-top: 48px; + display: flex; + justify-content: center; + + >a { + display: inline-block; + height: 52px; + font-size: 18px; + line-height: 52px; + text-decoration: none; + min-width: 168px; + + border-radius: 16px; + box-sizing: border-box; + transition: opacity 0.2s; + + @media @mobile { + font-size: 16px; + height: 42px; + line-height: 40px; + min-width: 128px; + } + + &:hover { + opacity: 0.8; + } + + &:not(:first-child) { + margin-inline-start: 48px; + color: @c-primary; + border: 2px solid @c-primary; + + @{dark-selector} & { + color: @c-primary-dark; + border-color: @c-primary-dark; + } + } + + &:first-child { + color: #fff; + background-color: @c-primary; + + @{dark-selector} & { + background-color: @c-primary-dark; + } + } + } + } +} + diff --git a/.dumi/theme/slots/AboutDocs/index.tsx b/.dumi/theme/slots/AboutDocs/index.tsx new file mode 100644 index 0000000..74c876f --- /dev/null +++ b/.dumi/theme/slots/AboutDocs/index.tsx @@ -0,0 +1,21 @@ +import { useRouteMeta } from 'dumi'; +import React, { type FC } from 'react'; +import './index.less'; +import ContentText from '../ContentText'; + +const AboutDocs: FC = () => { + const { frontmatter } = useRouteMeta(); + return ( +
+
+
+ +
+
+ +
+ ); +}; +export default AboutDocs; diff --git a/.dumi/theme/slots/AutomatedTesting/index.less b/.dumi/theme/slots/AutomatedTesting/index.less new file mode 100644 index 0000000..adf5ba7 --- /dev/null +++ b/.dumi/theme/slots/AutomatedTesting/index.less @@ -0,0 +1,60 @@ +.automatedTesting { + margin-top: 180px; + display: flex; + justify-content: center; + position: relative; + background-image: url('https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*C8E9RZGf5eEAAAAAAAAAAAAADlHYAQ/original'); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + + + .automatedTesting-center { + display: flex; + justify-content: space-between; + flex-direction: row; + width: 1200px; + margin: 41px 0 25px 0; + + img { + height: 628px; + height: 507px; + border-radius: 25px; + } + + .automatedTestingContent { + margin-left: 57px; + padding-top: 41px; + + .generationTitle { + margin: 0 auto; + text-align: left; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + .line { + margin-top: 19px; + background-image: linear-gradient(90deg, #e5b2ca 0%, #7546f3 100%); + height: 4px; + width: 323px; + } + } + + .desc { + width: 542px; + margin-top: 48px; + font-size: 18px; + color: #ffffff; + letter-spacing: 0.61px; + opacity: 0.8; + } + } + } +} diff --git a/.dumi/theme/slots/AutomatedTesting/index.tsx b/.dumi/theme/slots/AutomatedTesting/index.tsx new file mode 100644 index 0000000..f92c2a4 --- /dev/null +++ b/.dumi/theme/slots/AutomatedTesting/index.tsx @@ -0,0 +1,24 @@ +import { useRouteMeta } from 'dumi'; +import './index.less'; +import React, { type FC } from 'react'; + +const AutomatedTesting: FC = () => { + const { frontmatter } = useRouteMeta(); + if (!('AutomatedTesting' in frontmatter)) return null; + return
+
+ +
+
+ {frontmatter.AutomatedTesting.title} +
+
+
+ {frontmatter.AutomatedTesting.description} +
+
+
+
+}; + +export default AutomatedTesting; diff --git a/.dumi/theme/slots/Banner/index.less b/.dumi/theme/slots/Banner/index.less new file mode 100644 index 0000000..9be9fc1 --- /dev/null +++ b/.dumi/theme/slots/Banner/index.less @@ -0,0 +1,30 @@ +.banner { + position: relative; + padding-top: 30px; + height: 380px; + width: 100%; + box-sizing: border-box; + // background: linear-gradient(to top, #30cfd0 0%, #330867 100%); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; + + .bannerContent { + padding: 0px 24px; + margin-top: 30px; + width: 900px; + color: #f8f9fa; + font-size: 2.7rem; + display: flex; + justify-content: center; + + img { + width: 30%; + } + + } +} diff --git a/.dumi/theme/slots/Banner/index.tsx b/.dumi/theme/slots/Banner/index.tsx new file mode 100644 index 0000000..9cf0f93 --- /dev/null +++ b/.dumi/theme/slots/Banner/index.tsx @@ -0,0 +1,16 @@ +import React, { type FC } from 'react'; +import './index.less'; +const Banner: FC <{ bannerBg?: string; bannerTitle?: string; }> = (props) => { + return ( +
+
+
+ +
+
+
+ ); +}; +export default Banner; diff --git a/.dumi/theme/slots/CodeAnalysis/index.less b/.dumi/theme/slots/CodeAnalysis/index.less new file mode 100644 index 0000000..28cf9a2 --- /dev/null +++ b/.dumi/theme/slots/CodeAnalysis/index.less @@ -0,0 +1,53 @@ +.codeAnalysis { + margin-top: 71px; + display: flex; + justify-content: center; + + .codeAnalysis-center { + display: flex; + justify-content: space-between; + flex-direction: row; + width: 1200px; + + img { + width: 614px; + height: 383px; + border-radius: 25px; + } + + .codeAnalysisContent { + margin-left: 57px; + + .codeAnalysisTitle { + margin: 0 auto; + text-align: left; + min-width: 343px; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + .line { + margin-top: 19px; + background-image: linear-gradient(90deg, #e5b2ca 0%, #7546f3 100%); + height: 4px; + width: 323px; + } + } + + .desc { + width: 542px; + margin-top: 48px; + font-size: 18px; + color: #ffffff; + letter-spacing: 0.61px; + opacity: 0.8; + } + } + } +} diff --git a/.dumi/theme/slots/CodeAnalysis/index.tsx b/.dumi/theme/slots/CodeAnalysis/index.tsx new file mode 100644 index 0000000..0e06785 --- /dev/null +++ b/.dumi/theme/slots/CodeAnalysis/index.tsx @@ -0,0 +1,24 @@ +import { Link, useLocale, useSiteData, useRouteMeta} from 'dumi'; +import './index.less'; +import React, { type FC } from 'react'; + +const CodeAnalysis: FC = () => { + const { frontmatter } = useRouteMeta(); + if (!('CodeAnalysis' in frontmatter)) return null; + return
+
+ +
+
+ {frontmatter.CodeAnalysis.title} +
+
+
+ {frontmatter.CodeAnalysis.description} +
+
+
+
+}; + +export default CodeAnalysis; diff --git a/.dumi/theme/slots/CodeGeneration/index.less b/.dumi/theme/slots/CodeGeneration/index.less new file mode 100644 index 0000000..639e558 --- /dev/null +++ b/.dumi/theme/slots/CodeGeneration/index.less @@ -0,0 +1,169 @@ +.code-Generation { + // margin-top: 71px; + display: flex; + justify-content: center; + position: relative; + padding: 73px 0; + position: relative; + background-image: url('https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*BBG4RJuqfbMAAAAAAAAAAAAADlHYAQ/original'); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + + + .code-Generation-center { + display: flex; + justify-content: center; + flex-direction: column; + min-width: 1200px; + + .generationTitle { + margin: 0 auto; + text-align: center; + min-width: 343px; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + .line { + margin: 0 auto; + margin-top: 19px; + background-image: linear-gradient(90deg, #e5b2ca 0%, #7546f3 100%); + height: 4px; + width: 323px; + } + } + + + .generationContent { + margin-top: 190px; + height: 445px; + width: 1200px; + + .generationContentItem { + display: flex !important; + justify-content: space-between; + flex-direction: row; + width: 1200px; + + img { + width: 614px; + height: 445px; + border-radius: 25px; + } + + .generationText { + margin-left: 57px; + + .generationTextTitle { + margin: 0 auto; + text-align: left; + min-width: 343px; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + } + + .desc { + font-family: PingFangSC-Regular; + width: 530px; + margin-top: 31px; + font-size: 18px; + color: #ffffff; + letter-spacing: 0.61px; + opacity: 0.8; + } + } + } + + // 按钮列 + .buttomDots { + display: flex; + justify-content: space-between; + width: 576px; + height: 40px; + text-align: center; + + } + + .generationButtomzh-CN { + width: 132px; + display: flex; + height: 40px; + border-radius: 25px; + border: 1px solid #979797; + font-size: 18px; + color: #ffffff; + letter-spacing: 2.7px; + font-weight: 600; + align-items: center; + justify-content: center; + + } + + .generationButtomen-US { + display: flex; + width: 145px; + height: 40px; + border-radius: 25px; + border: 1px solid #979797; + font-size: 18px; + color: #ffffff; + font-weight: 600; + align-items: center; + justify-content: center; + + } + } + } + + .slick-prev { + width: 58px; + height: 58px; + top: 225px; + left: -100px; + } + + .slick-next { + width: 58px; + height: 58px; + top: 225px; + right: -100px; + } + + + .slick-list { + margin: 0 auto; + } + + .slick-active { + .generationButtomzh-CN { + border: 0 !important; + background-image: linear-gradient(90deg, #6777ff 0%, #7647f3 100%); + } + + .generationButtomen-US { + border: 0 !important; + background-image: linear-gradient(90deg, #6777ff 0%, #7647f3 100%); + } + } + + .slick-dots { + li { + width: 145px; + } + } + +} diff --git a/.dumi/theme/slots/CodeGeneration/index.tsx b/.dumi/theme/slots/CodeGeneration/index.tsx new file mode 100644 index 0000000..74230be --- /dev/null +++ b/.dumi/theme/slots/CodeGeneration/index.tsx @@ -0,0 +1,69 @@ +import { Link, useLocale, useSiteData, useRouteMeta } from 'dumi'; +import './index.less'; +import React, { type FC } from 'react'; +import "slick-carousel/slick/slick.css"; +import "slick-carousel/slick/slick-theme.css"; +import Slider from "react-slick"; + +const CodeGeneration: FC = () => { + const locale = useLocale(); + const { frontmatter } = useRouteMeta(); + const settings = { + dots: true, + infinite: true, + speed: 500, + slidesToShow: 1, + slidesToScroll: 1, + nextArrow: , + prevArrow: , + appendDots: dots => ( +
{dots}
+ + ), + customPaging: i => ( +
+ + {frontmatter?.CodeGenerationTitle.buttomText}{i + 1} +
+ ) + }; + return
+
+
+ {frontmatter?.CodeGenerationTitle.title} +
+
+
+ + { + frontmatter?.CodeGeneration.map(item => { + return
+ +
+
+ {item.title} +
+
+ {item.description} +
+
+
+ }) + } +
+
+
+
+}; +export default CodeGeneration; diff --git a/.dumi/theme/slots/ColorSwitch/index.less b/.dumi/theme/slots/ColorSwitch/index.less new file mode 100644 index 0000000..0b9e11d --- /dev/null +++ b/.dumi/theme/slots/ColorSwitch/index.less @@ -0,0 +1,61 @@ +@import (reference) '../../styles/variables.less'; + +.@{prefix}-color-switch { + position: relative; + font-size: 0; + line-height: 0; + + @media screen and (max-width: 1430px) { + &::before { + left: auto; + right: auto; + inset-inline-end: -15px; + transform: none; + + [class*='-switch'] + &, + [class*='-select'] + & { + inset-inline-end: 0; + } + } + } + + [class*='-switch'] + &, + [class*='-select'] + & { + margin-inline-start: 35px; + margin-inline-end: -15px; + padding-inline: 15px; + // border-inline-start: 1px solid @c-border; + + @{dark-selector} & { + border-inline-start-color: @c-border-dark; + } + } + + svg { + width: 16px; + fill: @c-text-secondary; + + @{dark-selector} & { + fill: @c-text-secondary-dark; + } + } + + &:hover svg { + fill: @c-primary; + + @{dark-selector} & { + fill: @c-primary-dark; + } + } + + select { + position: absolute; + inset: 0 15%; + opacity: 0; + width: 100%; + min-width: 16px; + max-width: 70%; + height: 16px; + cursor: pointer; + } +} diff --git a/.dumi/theme/slots/ColorSwitch/index.tsx b/.dumi/theme/slots/ColorSwitch/index.tsx new file mode 100644 index 0000000..e7d2444 --- /dev/null +++ b/.dumi/theme/slots/ColorSwitch/index.tsx @@ -0,0 +1,68 @@ +import { useIntl, usePrefersColor, useSiteData } from 'dumi'; +import React, { type FC } from 'react'; +import './index.less'; + +const IconDark = ({ + onClick, +}: { + onClick: React.MouseEventHandler; +}) => ( + + + +); + +const IconLight = ({ + onClick, +}: { + onClick: React.MouseEventHandler; +}) => ( + + + +); + + +const IconAuto = () => ( + + + +); + +const ICON_MAPPING = { + light: IconLight, + dark: IconDark, + auto: IconAuto, +}; + +const ColorSwitch: FC = () => { + const { + themeConfig: { + prefersColor: { default: defaultColor }, + }, + } = useSiteData(); + const intl = useIntl(); + const [, prefersColor = defaultColor, setPrefersColor] = usePrefersColor(); + const Icon = ICON_MAPPING[prefersColor]; + + // 切换颜色模式的函数 + const switchColorMode = () => { + // 根据当前模式切换到另一模式 + const nextMode = prefersColor === 'light' ? 'dark' : 'light'; + setPrefersColor(nextMode); + }; + + return ( + + {Icon && } + + ); +}; + +export default ColorSwitch; diff --git a/.dumi/theme/slots/Content/heti.scss b/.dumi/theme/slots/Content/heti.scss new file mode 100644 index 0000000..b628311 --- /dev/null +++ b/.dumi/theme/slots/Content/heti.scss @@ -0,0 +1,4 @@ +// override .heti root-selector +$root-selector: '.markdown'; +$line-length: 100%; +@import 'heti/lib/heti.scss'; diff --git a/.dumi/theme/slots/Content/index.less b/.dumi/theme/slots/Content/index.less new file mode 100644 index 0000000..d150613 --- /dev/null +++ b/.dumi/theme/slots/Content/index.less @@ -0,0 +1,435 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.markdown { + color: @c-text; + + p[align="center"] { + display: flex; + text-align: center; + justify-content: center; + } + + + @{dark-selector} & { + color: @c-text-dark; + } + + // hyperlink + a { + color: @c-primary; + + @{dark-selector} & { + color: @c-primary-dark; + } + } + + img { + max-width: 100%; + + @{dark-selector} & { + opacity: 0.8; + } + } + + // inline code + *:not(pre) code { + padding: 2px 5px; + color: #d56161; + background: darken(@c-site-bg, 2%); + border-radius: 2px; + + @{dark-selector} & { + background: lighten(@c-site-bg-dark, 5%); + } + } + + // pre tag + pre { + font-size: 14px; + padding-left: 24px; + padding-right: 24px; + background-color: tint(@c-site-bg, 50%); + + @{dark-selector} & { + background-color: shade(@c-site-bg-dark, 50%); + } + } + + // table + table { + width: 100%; + table-layout: auto; + } + + th { + background-color: tint(@c-site-bg, 50%); + + @{dark-selector} & { + background-color: shade(@c-site-bg-dark, 50%); + } + } + + th, + td { + padding-block-start: 10px; + padding-block-end: 10px; + padding-inline-start: 16px; + padding-inline-end: 16px; + border-color: @c-border-light; + + @{dark-selector} & { + border-color: @c-border-less-dark; + } + } + + // blockquote + blockquote { + font-style: italic; + margin-inline-start: 0; + margin-inline-end: 0; + background-color: tint(@c-site-bg, 50%); + border-left: 5px solid @c-border-light; + + @{dark-selector} & { + background-color: shade(@c-site-bg-dark, 50%); + border-left-color: @c-border-less-dark; + } + } + + // list + ul li { + line-height: 1.8; + } + + // anchor of headings + h1, + h2, + h3, + h4, + h5, + h6 { + >a[aria-hidden]:first-child { + float: left; + width: 20px; + padding-inline-end: 4px; + margin-inline-start: -24px; + color: @c-text; + // hide phantom blank node + font-size: 0; + text-align: right; + line-height: inherit; + + @{dark-selector} & { + color: @c-text-dark; + } + + [data-direction='rtl'] & { + float: right; + } + + &:hover { + border: 0; + } + + >.icon-link::before { + content: '#'; + color: @c-text-secondary; + font-size: 20px; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + } + } + + &:not(:hover)>a[aria-hidden]:first-child>.icon-link { + visibility: hidden; + } + } + + // horizontal line + hr { + background-color: @c-border-light; + + @{dark-selector} & { + background-color: @c-border-less-dark; + } + } +} + +.@{prefix}-article { + display: flex; + flex: 1; + flex-direction: column; + min-width: 0; + max-width: 100%; + box-sizing: border-box; + + .@{prefix}-content { + display: flex; + flex: 1; + flex-direction: column; + min-width: 0; + max-width: 100%; + box-sizing: border-box; + + &:not([data-no-sidebar]) { + padding: @s-content-padding @s-content-padding 0; + background-color: #fff; + border-radius: 10px; + // box-shadow: 0 0 24px 0 rgba(0, 0, 0, 5%); + + @{dark-selector} & { + background-color: lighten(@c-site-bg-dark, 3%); + } + + &[data-no-footer] { + padding-bottom: 20px; + } + + @media @mobile { + max-width: initial; + margin: 0 -24px; + padding: 24px 24px 0; + border-radius: 0; + box-shadow: none; + + &[data-no-footer] { + padding: 24px; + } + } + } + + article { + flex: 1; + } + + .@{prefix}-header+main>&, + .@{prefix}-doc-layout-mobile-bar+main>& { + min-height: calc(100vh - @s-header-height); + + @media @mobile { + min-height: calc(100vh - @s-header-height-m - 40px); + } + } + + &[data-no-sidebar][data-no-footer] { + margin-bottom: @s-content-padding; + + @media @mobile { + margin-bottom: 24px; + } + } + } + + .dumi-default-article { + display: flex; + justify-content: center; + } + + .foot { + display: flex; + // width: 1200px; + // margin: 0 auto; + padding: 48px 40px 0; + flex: 1; + height: 322px; + box-sizing: border-box; + // background: @c-site-bg; + flex-direction: column; + justify-content: center; + + @{dark-selector} & { + background-color: @c-site-bg-dark; + } + + .subtitle { + box-sizing: border-box; + padding-bottom: 16px; + font-family: PingFangSC, sans-serif; + font-weight: 500; + font-size: 14px; + color: #000; + line-height: 22px; + + @{dark-selector} & { + color: #6d7080; + } + } + + .link { + margin-bottom: 16px; + color: #6d7080; + line-height: 22px; + + @{dark-selector} & { + color: #6d7080; + } + + a { + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 14px; + color: #6d7080 !important; + line-height: 22px; + } + } + + .link-box { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 48px; + + .qrcode { + height: 108px; + width: 108px; + background-color: #fff; + border-radius: 12px; + margin-bottom: 8px; + } + + .qrcode-mr24 { + margin-right: 24px; + + >img { + padding: 6px; + } + } + + .qrcodetext { + width: 108px; + text-align: center; + height: 22px; + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 14px; + color: #6d7080; + line-height: 22px; + } + + >div:last-child { + margin: 0 auto; + } + } + + .copyright { + line-height: 60px; + text-align: center; + border-top: 1px solid rgba(247, 239, 239, 0.392); + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 12px; + color: #6d7080; + max-width: 1200px; + width: 100%; + } + } + + .foot-mobile { + display: none; + } + + @media only screen and (max-width: 767px) { + .foot { + display: none; + } + + .foot-mobile { + display: flex; + flex-direction: column; + padding: 40px 36px 0px 0px; + width: 100vw; + box-sizing: border-box; + // background: #f6f8fa; + font-family: PingFangSC, sans-serif; + + .home-problem-collapse { + margin-top: 22px; + border: none; + background: transparent; + + :global .ant-collapse-item { + padding: 20px 0 0; + border: none; + + .ant-collapse-header { + padding: 0; + font-size: 14px; + color: #000; + font-weight: 500; + line-height: 22px; + } + + .ant-collapse-content { + background-color: transparent; + border-top: 0; + + .ant-collapse-content-box { + padding-left: 0; + color: hsla(0deg, 0%, 100%, 80%); + padding-top: 12px; + padding-bottom: 12px; + } + } + } + + .link { + margin-top: 10px; + + .linkA { + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 14px; + color: #6d7080; + line-height: 22px; + } + } + } + + .link-box { + margin-top: 50px; + display: flex; + align-items: center; + justify-content: center; + + .qrcode-mr86 { + margin-right: 86px; + } + + .link-qrcode { + margin-top: 14px; + display: flex; + align-items: center; + justify-content: center; + width: 120px; + height: 120px; + border-radius: 8px; + background: #fff; + + .qrcode { + width: 105px; + height: 105px; + } + } + + .qrcodetext { + color: #6d7080; + margin-top: 12px; + text-align: center; + } + } + + .copyright { + margin-top: 64px; + text-align: center; + border-top: 1px solid rgba(247, 239, 239, 0.392); + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 12px; + color: #6d7080; + } + } + } + +} diff --git a/.dumi/theme/slots/Content/index.tsx b/.dumi/theme/slots/Content/index.tsx new file mode 100644 index 0000000..e1f654b --- /dev/null +++ b/.dumi/theme/slots/Content/index.tsx @@ -0,0 +1,36 @@ +import { useRouteMeta, useSidebarData, useSiteData } from 'dumi'; +import React, { type FC, type ReactNode } from 'react'; +import './heti.scss'; +import './index.less'; + +const Content: FC<{ children: ReactNode }> = (props) => { + console.log('children==',props.children); + const sidebar = useSidebarData(); + const { themeConfig } = useSiteData(); + const { frontmatter } = useRouteMeta(); + return ( +
+
+ {props.children} +
+ {/*
+
+ Copyright © 支付宝(中国)网络技术有限公司 | + 备案号:沪ICP备15027489号 +
+
+
+
+ Copyright © 支付宝(中国)网络技术有限公司 | + 备案号:沪ICP备15027489号 +
+
*/} +
+ ); +}; + +export default Content; diff --git a/.dumi/theme/slots/ContentFooter/index.less b/.dumi/theme/slots/ContentFooter/index.less new file mode 100644 index 0000000..3272c32 --- /dev/null +++ b/.dumi/theme/slots/ContentFooter/index.less @@ -0,0 +1,172 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-content-footer { + margin-top: 48px; + color: @c-text-note; + font-size: 14px; + line-height: 1; + + @{dark-selector} & { + color: @c-text-note-dark; + } + + svg { + fill: @c-text-note; + width: 14px; + vertical-align: -0.13em; + transition: fill 0.2s; + + @{dark-selector} & { + fill: @c-text-note-dark; + } + } + + > dl { + display: flex; + justify-content: space-between; + margin: 0; + padding-bottom: 12px; + + &:empty { + display: none; + } + + dd { + margin: 0; + + svg { + margin-inline-end: 4px; + } + + > a { + color: @c-primary; + + @{dark-selector} & { + color: @c-primary-dark; + } + + &:not(:hover) { + text-decoration: none; + } + + > svg { + fill: @c-primary; + + @{dark-selector} & { + fill: @c-primary-dark; + } + } + } + } + } + + > nav { + padding: 24px 0; + border-block-start: 1px solid @c-border-light; + overflow: hidden; + + &:empty { + display: none; + } + + @{dark-selector} & { + border-block-start-color: @c-border-less-dark; + } + + > a { + max-width: 180px; + min-width: 120px; + color: @c-primary; + font-size: 16px; + line-height: 22px; + text-decoration: none; + border-radius: 2px; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + + @media @tablet { + min-width: initial; + max-width: 80px; + } + + @{dark-selector} & { + color: @c-primary-dark; + } + + &[data-prev] { + float: left; + padding-inline-end: 24px; + + svg { + margin-inline-end: 4px; + } + + [data-direction='rtl'] & { + float: right; + + svg { + transform: rotate(180deg); + } + } + } + + &[data-next] { + float: right; + text-align: end; + padding-inline-start: 24px; + + svg { + margin-inline-start: 4px; + transform: rotate(180deg); + } + + [data-direction='rtl'] & { + float: left; + + svg { + transform: rotate(0); + } + } + } + + small { + display: block; + margin-bottom: 14px; + color: @c-text-note; + font-size: 14px; + transition: color 0.2s; + + @{dark-selector} & { + color: @c-text-note-dark; + } + } + + &:hover { + small { + color: @c-text-secondary; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + } + + svg { + fill: @c-text-secondary; + + @{dark-selector} & { + fill: @c-text-secondary-dark; + } + } + } + } + } + + [data-no-sidebar] > & { + display: none; + } + + :not([data-no-sidebar]) > & + .@{prefix}-footer { + margin-top: 0; + } +} diff --git a/.dumi/theme/slots/ContentFooter/index.tsx b/.dumi/theme/slots/ContentFooter/index.tsx new file mode 100644 index 0000000..ee35adb --- /dev/null +++ b/.dumi/theme/slots/ContentFooter/index.tsx @@ -0,0 +1,108 @@ +import { ReactComponent as IconLeft } from '@ant-design/icons-svg/inline-svg/outlined/arrow-left.svg'; +import { ReactComponent as IconClock } from '@ant-design/icons-svg/inline-svg/outlined/clock-circle.svg'; +import { ReactComponent as IconEdit } from '@ant-design/icons-svg/inline-svg/outlined/edit.svg'; +import { + FormattedMessage, + Link, + useIntl, + useLocation, + useRouteMeta, + useSidebarData, + useSiteData, +} from 'dumi'; +import React, { useLayoutEffect, useState, type FC } from 'react'; +import './index.less'; + +const ContentFooter: FC = () => { + const { pathname } = useLocation(); + const sidebar = useSidebarData(); + const { themeConfig } = useSiteData(); + const { frontmatter } = useRouteMeta(); + const intl = useIntl(); + const [prev, setPrev] = useState< + typeof sidebar[0]['children'][0] | undefined + >(undefined); + const [next, setNext] = useState(undefined); + const [isoLastUpdated, setIsoLastUpdated] = useState(''); + const [lastUpdated, setLastUpdated] = useState(''); + const showEditLink = themeConfig.editLink && frontmatter.filename; + const showLastUpdated = themeConfig.lastUpdated && frontmatter.lastUpdated; + + // calculate the previous and next page + useLayoutEffect(() => { + if (sidebar) { + const items = sidebar.reduce( + (ret, group) => ret.concat(group.children), + [], + ); + const current = items.findIndex((item) => item.link === pathname); + + setPrev(items[current - 1]); + setNext(items[current + 1]); + } + }, [pathname, sidebar]); + + // to avoid timestamp mismatched between server and client + useLayoutEffect(() => { + if (showLastUpdated) { + setIsoLastUpdated(new Date(frontmatter.lastUpdated!).toISOString()); + setLastUpdated( + new Intl.DateTimeFormat(undefined, { + dateStyle: 'short', + timeStyle: 'short', + }).format(frontmatter.lastUpdated), + ); + } + }, [showLastUpdated]); + + return ( +
+ {/*
+ {showLastUpdated && ( +
+ + + +
+ )} + {showEditLink && ( +
+ + + + +
+ )} +
*/} + +
+ ); +}; + +export default ContentFooter; diff --git a/.dumi/theme/slots/ContentText/index.less b/.dumi/theme/slots/ContentText/index.less new file mode 100644 index 0000000..509b710 --- /dev/null +++ b/.dumi/theme/slots/ContentText/index.less @@ -0,0 +1,27 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +@font-face { + font-family: AlibabaPuHuiTi_2_85_Bold; +} + +.dumi-content-text { + width: 900px; + margin: 0 auto; + color: #e4e9ec; + padding: 24px; + min-height: 700px; + + .title { + margin: 40px 0 32px; + text-align: left; + color: #e4e9ec; + font-weight: 400; + font-size: 32px; + } + .desc { + margin: 0 0 20px; + line-height: 30px; + font-size: 16px; + letter-spacing: 0.61px; + } +} diff --git a/.dumi/theme/slots/ContentText/index.tsx b/.dumi/theme/slots/ContentText/index.tsx new file mode 100644 index 0000000..7803c1c --- /dev/null +++ b/.dumi/theme/slots/ContentText/index.tsx @@ -0,0 +1,11 @@ +import React, { type FC, type string } from 'react'; +import './index.less'; + +const ContentText: FC<{ title: string , desc:any }> = (props) => ( +
+
{props.title}
+
{props.desc}
+
+); + +export default ContentText; diff --git a/.dumi/theme/slots/Contribution/index.less b/.dumi/theme/slots/Contribution/index.less new file mode 100644 index 0000000..ae446c4 --- /dev/null +++ b/.dumi/theme/slots/Contribution/index.less @@ -0,0 +1,78 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-Contribution { + margin: -@s-header-height auto 0 auto; + height: 100%; + display: flex; + flex-direction: column; + justify-content: center; + + @media @mobile { + margin-top: -@s-header-height-m - 20; + padding-top: 160px; + height: 660px; + } + + +* { + position: relative; + } + + .banner { + position: relative; + padding-top: 30px; + height: 380px; + width: 100%; + box-sizing: border-box; + // background: linear-gradient(-225deg, #7085B6 0%, #87A7D9 50%, #DEF3F8 100%); + background-image: url('https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*EamVR52XxpUAAAAAAAAAAAAADlHYAQ/original'); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; + + .bannerContent { + width: 1200px; + padding: 0px 24px; + margin-top: 30px; + width: 900px; + color: #f8f9fa; + font-size: 2.7rem; + display: flex; + justify-content: center; + + + img { + width: 35%; + } + } + } + + .content { + width: 1000px; + margin: 0 auto; + color: #e4e9ec; + line-height: 30px; + padding: 24px; + + .contentTitle { + margin: 40px 0 32px; + text-align: left; + font-weight: 400; + font-size: 32px; + } + + .contentText { + font-size: 16px; + letter-spacing: 0.61px; + padding-left: 20px; + line-height: 30px; + + li { + margin-top: 5px; + } + } + } +} diff --git a/.dumi/theme/slots/Contribution/index.tsx b/.dumi/theme/slots/Contribution/index.tsx new file mode 100644 index 0000000..44fc0bb --- /dev/null +++ b/.dumi/theme/slots/Contribution/index.tsx @@ -0,0 +1,30 @@ +import { useRouteMeta, useOutlet, } from 'dumi'; +import React, { type FC } from 'react'; +import './index.less'; +import ContentText from '../ContentText'; + +const Contribution: FC = () => { + const { frontmatter } = useRouteMeta(); + console.log('frontmatter,,,', frontmatter); + + return ( +
+
+
+ +
+
+
+
{frontmatter.contentTitle}
+
    + { + frontmatter.list.map((item: string) => { + return
  • {item}
  • + }) + } +
+
+
+ ); +}; +export default Contribution; diff --git a/.dumi/theme/slots/DevOps/index.less b/.dumi/theme/slots/DevOps/index.less new file mode 100644 index 0000000..ce95858 --- /dev/null +++ b/.dumi/theme/slots/DevOps/index.less @@ -0,0 +1,102 @@ +.devOps { + margin-top: 71px; + display: flex; + justify-content: center; + + .devOps-center { + display: flex; + justify-content: center; + flex-direction: column; + width: 1200px; + + .devOpsTitle { + margin: 0 auto; + text-align: center; + min-width: 343px; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + .line { + margin: 0 auto; + margin-top: 19px; + background-image: linear-gradient(90deg, #e5b2ca 0%, #7546f3 100%); + height: 4px; + width: 323px; + } + } + + .DevOpsContent { + width: 100%; + + .DevOpsUl { + display: flex; + justify-content: space-between; + flex-direction: row; + flex: 1; + margin-top: 83px; + padding: 0; + + + .DevOpsLi { + position: relative; + width: 379px; + height: 383px; + border-radius: 33px; + cursor: pointer; + + img { + position: absolute; + width: 379px; + height: 383px; + border-radius: 33px; + } + + .title { + position: absolute; + font-size: 36px; + color: #ffffff; + letter-spacing: 1.21px; + font-weight: 600; + top: 38px; + left: 43px; + } + + .desc { + display: none; + position: absolute; + width: 323px; + font-size: 18px; + color: #ffffff; + letter-spacing: 0.61px; + top: 38px; + left: 31px; + } + + &:hover { + background-image: linear-gradient(138deg, #5aefff -10%, #502fe6 42%, #92764d 99%); + border-radius: 33px; + + img { + display: none; + } + + .title { + display: none; + } + + .desc { + display: block; + } + } + } + } + } + } +} diff --git a/.dumi/theme/slots/DevOps/index.tsx b/.dumi/theme/slots/DevOps/index.tsx new file mode 100644 index 0000000..97cd407 --- /dev/null +++ b/.dumi/theme/slots/DevOps/index.tsx @@ -0,0 +1,30 @@ +import { Link, useLocale, useSiteData, useRouteMeta } from 'dumi'; +import './index.less'; +import React, { type FC } from 'react'; + +const DevOps: FC = () => { + const { frontmatter } = useRouteMeta(); + if (!('DevOps' in frontmatter)) return null; + return
+
+
+ {frontmatter.DevOpsTitle.title} +
+
+
+
    + {frontmatter.DevOps.map((item: any) => { + return
  • + +
    {item.cardTitle}
    +
    {item.description}
    +
  • + }) + } +
+
+
+
+}; + +export default DevOps; diff --git a/.dumi/theme/slots/Foot/index.less b/.dumi/theme/slots/Foot/index.less new file mode 100644 index 0000000..aec155c --- /dev/null +++ b/.dumi/theme/slots/Foot/index.less @@ -0,0 +1,200 @@ +.foot { + display: flex; + width: 1200px; + margin: 0 auto; + padding: 48px 40px 0; + flex: 1; + height: 200px; + box-sizing: border-box; + flex-direction: column; + justify-content: center; + + + .subtitle { + box-sizing: border-box; + padding-bottom: 16px; + font-family: PingFangSC, sans-serif; + font-weight: 500; + font-size: 14px; + color: #000; + line-height: 22px; + + @{dark-selector} & { + color: #6d7080; + } + } + + .link { + margin-bottom: 16px; + color: #6d7080; + line-height: 22px; + + @{dark-selector} & { + color: #6d7080; + } + + a { + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 14px; + color: #6d7080 !important; + line-height: 22px; + } + } + + .link-box { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 48px; + + .qrcode { + height: 108px; + width: 108px; + background-color: #fff; + border-radius: 12px; + margin-bottom: 8px; + } + + .qrcode-mr24 { + margin-right: 24px; + + >img { + padding: 6px; + } + } + + .qrcodetext { + width: 108px; + text-align: center; + height: 22px; + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 14px; + color: #6d7080; + line-height: 22px; + } + + >div:last-child { + margin: 0 auto; + } + } + + .copyright { + line-height: 60px; + text-align: center; + border-top: 1px solid rgba(247, 239, 239, 0.392); + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 12px; + color: #6d7080; + max-width: 1200px; + width: 100%; + } +} + +.foot-mobile { + display: none; +} + +@media only screen and (max-width: 767px) { + .foot { + display: none; + } + + .foot-mobile { + display: flex; + flex-direction: column; + padding: 40px 36px 0px 0px; + width: 100vw; + box-sizing: border-box; + // background: #f6f8fa; + font-family: PingFangSC, sans-serif; + + .home-problem-collapse { + margin-top: 22px; + border: none; + background: transparent; + + :global .ant-collapse-item { + padding: 20px 0 0; + border: none; + + .ant-collapse-header { + padding: 0; + font-size: 14px; + color: #000; + font-weight: 500; + line-height: 22px; + } + + .ant-collapse-content { + background-color: transparent; + border-top: 0; + + .ant-collapse-content-box { + padding-left: 0; + color: hsla(0deg, 0%, 100%, 80%); + padding-top: 12px; + padding-bottom: 12px; + } + } + } + + .link { + margin-top: 10px; + + .linkA { + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 14px; + color: #6d7080; + line-height: 22px; + } + } + } + + .link-box { + margin-top: 50px; + display: flex; + align-items: center; + justify-content: center; + + .qrcode-mr86 { + margin-right: 86px; + } + + .link-qrcode { + margin-top: 14px; + display: flex; + align-items: center; + justify-content: center; + width: 120px; + height: 120px; + border-radius: 8px; + background: #fff; + + .qrcode { + width: 105px; + height: 105px; + } + } + + .qrcodetext { + color: #6d7080; + margin-top: 12px; + text-align: center; + } + } + + .copyright { + margin-top: 64px; + text-align: center; + border-top: 1px solid rgba(247, 239, 239, 0.392); + font-family: PingFangSC, sans-serif; + font-weight: 400; + font-size: 12px; + color: #6d7080; + } + } +} diff --git a/.dumi/theme/slots/Foot/index.tsx b/.dumi/theme/slots/Foot/index.tsx new file mode 100644 index 0000000..f7039d6 --- /dev/null +++ b/.dumi/theme/slots/Foot/index.tsx @@ -0,0 +1,25 @@ +import { useRouteMeta, useSidebarData, useSiteData } from 'dumi'; +import React, { type FC, type ReactNode } from 'react'; +import { Row, Col, Collapse } from "antd"; +import classNames from "classnames"; +import './index.less'; + +const Foot: FC = () => { + return ( +
+
+
+ Copyright © 支付宝(中国)网络技术有限公司 | + 备案号:沪ICP备15027489号 +
+
+
+
+ Copyright © 支付宝(中国)网络技术有限公司 | + 备案号:沪ICP备15027489号 +
+
+
+ ); +} +export default Foot; diff --git a/.dumi/theme/slots/Footer/index.less b/.dumi/theme/slots/Footer/index.less new file mode 100644 index 0000000..7681895 --- /dev/null +++ b/.dumi/theme/slots/Footer/index.less @@ -0,0 +1,33 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-footer { + margin-top: @s-content-padding; + border-top: 1px solid @c-border-light; + color: @c-text-note; + font-size: 15px; + line-height: 26px; + text-align: center; + padding: @s-content-padding * 0.6 0; + + @{dark-selector} & { + border-top-color: @c-border-less-dark; + color: @c-text-note-dark; + } + + @media @mobile { + padding: @s-content-padding * 0.3 0; + font-size: 13px; + } + + a { + color: @c-primary; + + @{dark-selector} & { + color: @c-primary-dark; + } + + &:not(:hover) { + text-decoration: none; + } + } +} diff --git a/.dumi/theme/slots/Footer/index.tsx b/.dumi/theme/slots/Footer/index.tsx new file mode 100644 index 0000000..0561465 --- /dev/null +++ b/.dumi/theme/slots/Footer/index.tsx @@ -0,0 +1,18 @@ +import { useSiteData } from 'dumi'; +import React, { type FC } from 'react'; +import './index.less'; + +const Footer: FC = () => { + const { themeConfig } = useSiteData(); + + if (!themeConfig.footer) return null; + + return ( +
+ ); +}; + +export default Footer; diff --git a/.dumi/theme/slots/Header/index.less b/.dumi/theme/slots/Header/index.less new file mode 100644 index 0000000..c07b448 --- /dev/null +++ b/.dumi/theme/slots/Header/index.less @@ -0,0 +1,171 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-header { + top: 0; + position: sticky; + -webkit-backdrop-filter:blur(6px); + backdrop-filter: blur(6px); + z-index: 10; + + &:not([data-static]) { + top: 0; + position: sticky; + // background-color: fadeout(@c-site-bg, 10%); + backdrop-filter: blur(6px); + -webkit-backdrop-filter:blur(6px); + + // @{dark-selector} & { + // background-color: fadeout(@c-site-bg-dark, 10%); + // } + // to avoid backdrop filter conflict with navbar overlay + &[data-mobile-active] { + background-color: @c-site-bg; + backdrop-filter: none; + + // @{dark-selector} & { + // background-color: @c-site-bg-dark; + // } + } + } + + &-content { + display: flex; + align-items: center; + margin: 0 auto; + padding: 0 24px; + // width: 100%; + // width: @s-content-width; + // width: 1300px; + // height: @s-header-height; + height: 70px; + box-sizing: border-box; + justify-content: space-between; + + @media @mobile { + height: @s-header-height-m; + } + } + + &-left { + min-width: @s-sidebar-width - 8px; + display: flex; + justify-content: space-between; + // padding-left: 8px; + } + + &-right { + display: flex; + justify-content: space-between; + min-width: 450px; + + &-aside { + display: flex; + align-items: center; + + @media @mobile { + margin: 8px 16px; + padding-top: 24px; + justify-content: center; + border-top: 1px solid @c-border-light; + + @{dark-selector} & { + border-top-color: @c-border-less-dark; + } + } + } + + @media @mobile { + position: fixed; + top: @s-header-height-m; + left: 0; + right: 0; + height: calc(100vh - @s-header-height-m); + display: block; + background-color: fadeout(@c-site-bg, 40%); + border-top: 1px solid @c-border-light; + backdrop-filter: blur(30px); + -webkit-backdrop-filter:blur(30px); + box-sizing: border-box; + transition: all 0.2s; + + @{dark-selector} & { + background-color: fadeout(@c-site-bg-dark, 40%); + border-top: 1px solid @c-border-less-dark; + } + + .@{prefix}-header:not([data-mobile-active]) & { + opacity: 0; + visibility: hidden; + padding-top: 20px; + } + } + } + + &-menu-btn { + position: absolute; + top: 50%; + inset-inline-end: 24px; + padding: 0; + border: 0; + background: transparent; + transform: translateY(-50%); + display: none; + + @media @mobile { + display: block; + } + + >svg { + width: 20px; + fill: @c-text-secondary; + + // @{dark-selector} & { + // fill: @c-text-secondary-dark; + // } + } + } + + .dumi-default-search-bar { + margin-inline-start: 28px; + margin-inline-end: 0; + width: 140px; + height: 40px; + } + + .dumi-default-search-bar-input { + width: 140px; + height: 40px; + } + + .dumi-default-search-shortcut { + display: none; + } + + .dumi-default-icon>svg { + width: 25px; + height: 25px; + fill: #fff + } + + .dumi-default-icon:hover svg { + fill: #1677ff; + } + + .dumi-default-lang-switch { + font-size: 16px; + color: #fff + } + + .dumi-default-lang-switch:hover { + color: #1677ff; + } + + .dumi-default-navbar>li:not(:last-child) { + margin-inline-end: 40px; + } + + .dumi-default-icon { + border-inline-start: transparent + } + +} diff --git a/.dumi/theme/slots/Header/index.tsx b/.dumi/theme/slots/Header/index.tsx new file mode 100644 index 0000000..6519c3e --- /dev/null +++ b/.dumi/theme/slots/Header/index.tsx @@ -0,0 +1,81 @@ +import type { SocialTypes } from '@/client/theme-api/types'; +import { ReactComponent as IconClose } from '@ant-design/icons-svg/inline-svg/outlined/close.svg'; +import { ReactComponent as IconMenu } from '@ant-design/icons-svg/inline-svg/outlined/menu.svg'; +import { useRouteMeta, useSiteData } from 'dumi'; +import ColorSwitch from '../ColorSwitch'; +import HeaderExtra from 'dumi/theme/slots/HeaderExtra'; +import LangSwitch from 'dumi/theme/slots/LangSwitch'; +import Logo from 'dumi/theme/slots/Logo'; +import Navbar from 'dumi/theme/slots/Navbar'; +import RtlSwitch from 'dumi/theme/slots/RtlSwitch'; +import SearchBar from 'dumi/theme/slots/SearchBar'; +import SocialIcon from 'dumi/theme/slots/SocialIcon'; +import React, { useMemo, useState, type FC } from 'react'; +import './index.less'; +const Header: FC = () => { + const { frontmatter } = useRouteMeta(); + const [showMenu, setShowMenu] = useState(false); + const { themeConfig } = useSiteData(); + console.log('themeConfig==', themeConfig); + console.log('frontmatter==', frontmatter); + const hero = frontmatter.hero; + const socialIcons = useMemo( + () => + themeConfig.socialLinks + ? Object.keys(themeConfig.socialLinks) + .slice(0, 5) + .map((key) => ({ + icon: key as SocialTypes, + link: themeConfig.socialLinks[key as SocialTypes], + })) + : [], + [themeConfig.socialLinks], + ); + // console.log("themeConfig.prefersColor.switch===",themeConfig); + + return ( +
setShowMenu(false)} + > +
+
+ +
+
+ {/* 文档信息下拉弹框 */} + + {/* 导航➕国际化 */} +
+ + + {/* 亮度显示 */} + {/* { + !hero && themeConfig.prefersColor.switch && + } */} + {socialIcons.map((item) => ( + + ))} + + {/* */} +
+
+ {/* 移动端导航栏 */} + +
+
+ ); +}; + +export default Header; diff --git a/.dumi/theme/slots/Hero/index.less b/.dumi/theme/slots/Hero/index.less new file mode 100644 index 0000000..3c89a55 --- /dev/null +++ b/.dumi/theme/slots/Hero/index.less @@ -0,0 +1,122 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-hero { + margin: -@s-header-height - 20 auto 0 auto; + // margin: 0 auto ; + height: 100%; + display: flex; + flex-direction: column; + justify-content: center; + + + @media @mobile { + margin-top: -@s-header-height-m - 20; + padding-top: 160px; + height: 660px; + } + + +* { + position: relative; + } + + .banner { + position: relative; + height: 100vh; + text-align: center; + box-sizing: border-box; + background-image: url('https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*jv_PRqNyheQAAAAAAAAAAAAADlHYAQ/original'); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; + } + // &::before { + // content: ''; + // position: absolute; + // display: block; + // top: 0; + // left: 0; + // right: 0; + // bottom: 0; + // opacity: 0.8; + // pointer-events: none; + // background: no-repeat center/cover; + // background-image: url('https://mdn.alipayobjects.com/huamei_v98cj4/afts/img/A*PpkBT7PHhsYAAAAAAAAAAAAADo6VAQ/original'); + // // background-image: url('https://gw.alipayobjects.com/zos/bmw-prod/a6c3488a-994c-4dd3-8e92-2324d9a1ca48/l9dmd9wl_w2858_h1864.png'); + + // @{dark-selector} & { + // opacity: 1; + // } + // } + + p { + font-weight: 300; + text-align: center; + line-height: 72px; + margin-top: 0px; + font-size: 36px; + color: #ffffff; + letter-spacing: 1.21px; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + + @media @mobile { + font-size: 16px; + } + } + + &-actions { + margin-top: 48px; + display: flex; + justify-content: center; + + >a { + display: inline-block; + height: 52px; + font-size: 18px; + line-height: 52px; + text-decoration: none; + min-width: 168px; + + border-radius: 16px; + box-sizing: border-box; + transition: opacity 0.2s; + + @media @mobile { + font-size: 16px; + height: 42px; + line-height: 40px; + min-width: 128px; + } + + &:hover { + opacity: 0.8; + } + + &:not(:first-child) { + margin-inline-start: 48px; + color: @c-primary; + border: 2px solid @c-primary; + + @{dark-selector} & { + color: @c-primary-dark; + border-color: @c-primary-dark; + } + } + + &:first-child { + color: #fff; + background-color: @c-primary; + + @{dark-selector} & { + background-color: @c-primary-dark; + } + } + } + } +} diff --git a/.dumi/theme/slots/Hero/index.tsx b/.dumi/theme/slots/Hero/index.tsx new file mode 100644 index 0000000..0c4be60 --- /dev/null +++ b/.dumi/theme/slots/Hero/index.tsx @@ -0,0 +1,51 @@ +import { Link, useRouteMeta } from 'dumi'; +import HeroTitle from 'dumi/theme/slots/HeroTitle'; +import CodeGeneration from '../CodeGeneration'; +import React, { type FC } from 'react'; +import './index.less'; +import DevOps from '../DevOps'; +import CodeAnalysis from '../CodeAnalysis'; +import IntelligentInference from '../IntelligentInference'; +import AutomatedTesting from '../AutomatedTesting'; +import PerformanceEvaluation from '../PerformanceEvaluation'; + +const Hero: FC = () => { + const { frontmatter } = useRouteMeta(); + if (!('hero' in frontmatter)) return null; + return ( +
+
+ {frontmatter.hero!.title && ( + {frontmatter.hero!.title} + )} + {frontmatter.hero!.description && ( +

+ )} +

+ + + + + + + {/* {Boolean(frontmatter.hero!.actions?.length) && ( +
+ {frontmatter.hero!.actions!.map(({ text, link }) => + /^(\w+:)\/\/|^(mailto|tel):/.test(link) ? ( + + {text} + + ) : ( + + {text} + + ), + )} +
+ )} */} +
+ ); +}; +export default Hero; diff --git a/.dumi/theme/slots/HeroTitle/index.less b/.dumi/theme/slots/HeroTitle/index.less new file mode 100644 index 0000000..974ed16 --- /dev/null +++ b/.dumi/theme/slots/HeroTitle/index.less @@ -0,0 +1,27 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +@font-face { + font-family: AlibabaPuHuiTi_2_85_Bold; +} + +.@{prefix}-hero-title { + margin: 0px; + display: inline-block; + font-family: Alibaba-PuHuiTi, 'Gill Sans', 'Gill Sans MT', Calibri, + 'Trebuchet MS', sans-serif; + color: lighten(desaturate(spin(@c-primary, -13), 10.5), 20); + line-height: 1; + + img { + margin: 0 auto; + width: 755px; + } + + @media @mobile { + font-size: 60px; + } + + @{dark-selector} & { + opacity: 0.7; + } +} diff --git a/.dumi/theme/slots/HeroTitle/index.tsx b/.dumi/theme/slots/HeroTitle/index.tsx new file mode 100644 index 0000000..0bf8fa8 --- /dev/null +++ b/.dumi/theme/slots/HeroTitle/index.tsx @@ -0,0 +1,10 @@ +import React, { type FC, type string } from 'react'; +import './index.less'; + +const HeroTitle: FC<{ children: string }> = (props) => ( +
+ +
+); + +export default HeroTitle; diff --git a/.dumi/theme/slots/IntelligentInference/index.less b/.dumi/theme/slots/IntelligentInference/index.less new file mode 100644 index 0000000..e67a821 --- /dev/null +++ b/.dumi/theme/slots/IntelligentInference/index.less @@ -0,0 +1,53 @@ +.codeAnalysis { + margin-top: 180px; + display: flex; + justify-content: center; + + .IntelligentInference-center { + display: flex; + justify-content: space-between; + flex-direction: row; + width: 1200px; + + img { + height: 628px; + height: 507px; + border-radius: 25px; + } + + .generationContent { + margin-right: 57px; + padding-top: 21px; + + .generationTitle { + margin: 0 auto; + text-align: left; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + .line { + margin-top: 19px; + background-image: linear-gradient(90deg, #e5b2ca 0%, #7546f3 100%); + height: 4px; + width: 323px; + } + } + + .desc { + width: 542px; + margin-top: 48px; + font-size: 18px; + color: #ffffff; + letter-spacing: 0.61px; + opacity: 0.8; + } + } + } +} diff --git a/.dumi/theme/slots/IntelligentInference/index.tsx b/.dumi/theme/slots/IntelligentInference/index.tsx new file mode 100644 index 0000000..bd221b8 --- /dev/null +++ b/.dumi/theme/slots/IntelligentInference/index.tsx @@ -0,0 +1,24 @@ +import { useLocale, useRouteMeta } from 'dumi'; +import './index.less'; +import React, { type FC } from 'react'; + +const IntelligentInference: FC = () => { + const { frontmatter } = useRouteMeta(); + if (!('IntelligentInference' in frontmatter)) return null; + return
+
+
+
+ {frontmatter.IntelligentInference.title} +
+
+
+ {frontmatter.IntelligentInference.description} +
+
+ +
+
+}; + +export default IntelligentInference; diff --git a/.dumi/theme/slots/LangSwitch/index.less b/.dumi/theme/slots/LangSwitch/index.less new file mode 100644 index 0000000..6fe3b61 --- /dev/null +++ b/.dumi/theme/slots/LangSwitch/index.less @@ -0,0 +1,56 @@ +@import (reference) '../../styles/variables.less'; + +.@{prefix}-lang-switch { + color: @c-text-secondary; + font-size: 14px; + line-height: 16px; + text-decoration: none; + transition: all 0.3s; + cursor: pointer; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + + &:hover { + color: @c-primary; + + @{dark-selector} & { + color: @c-primary-dark; + } + } +} + +.@{prefix}-lang-select { + display: inline-flex; + align-items: center; + + > select { + appearance: none; + padding: 6px 0; + padding-inline-start: 10px; + padding-inline-end: 18px; + color: @c-text-secondary; + text-align: right; + font-size: 14px; + line-height: 1; + border: 0; + background-color: transparent; + cursor: pointer; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + } + + > svg { + margin-inline-start: -16px; + width: 12px; + fill: darken(@c-border, 10%); + pointer-events: none; + + @{dark-selector} & { + fill: lighten(@c-border-dark, 10%); + } + } +} diff --git a/.dumi/theme/slots/LangSwitch/index.tsx b/.dumi/theme/slots/LangSwitch/index.tsx new file mode 100644 index 0000000..f9a2c6b --- /dev/null +++ b/.dumi/theme/slots/LangSwitch/index.tsx @@ -0,0 +1,99 @@ +import { ReactComponent as IconDown } from '@ant-design/icons-svg/inline-svg/outlined/down.svg'; +import { + history, + Link, + useIntl, + useLocale, + useLocation, + useSiteData, +} from 'dumi'; +import React, { useEffect, useState, type FC } from 'react'; +import './index.less'; + +type ILocaleItem = ReturnType['locales'][0]; + +function getTargetLocalePath({ + pathname, + current, + target, +}: { + pathname: string; + current: ILocaleItem; + target: ILocaleItem; +}) { + const clearPath = + 'base' in current + ? // handle '/en-US/a' => '/a' or '/en-US' => '' => '/' + pathname.replace(current.base.replace(/\/$/, ''), '') || '/' + : pathname.replace(new RegExp(`${current.suffix}$`), ''); + + return 'base' in target + ? `${ + // for `/` base, strip duplicated leading slash + target.base.replace(/\/$/, '') + }${clearPath}` + // for `/` clearPath, strip duplicated ending slash + .replace(/([^/])\/$/, '$1') + : `${clearPath}${target.suffix}`; +} + +const SingleSwitch: FC<{ locale: ILocaleItem; current: ILocaleItem }> = ({ + locale, + current, +}) => { + const { pathname } = useLocation(); + const [path, setPath] = useState(() => + getTargetLocalePath({ pathname, current, target: locale }), + ); + + useEffect(() => { + setPath(getTargetLocalePath({ pathname, current, target: locale })); + }, [pathname, current.id, locale.id]); + + return ( + + {locale.name} + + ); +}; + +const LangSwitch: FC = () => { + const { locales } = useSiteData(); + const { locale } = useIntl(); + const current = useLocale(); + + // do not render in single language + if (locales.length <= 1) return null; + + return locales.length > 2 ? ( +
+ + +
+ ) : ( + // single language switch + id !== locale)!} + current={current} + /> + ); +}; + +export default LangSwitch; diff --git a/.dumi/theme/slots/Logo/index.less b/.dumi/theme/slots/Logo/index.less new file mode 100644 index 0000000..d06a064 --- /dev/null +++ b/.dumi/theme/slots/Logo/index.less @@ -0,0 +1,33 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-logo { + display: inline-flex; + align-items: center; + color: @c-text; + font-size: 22px; + line-height: 1; + font-weight: bold; + text-decoration: none; + + @{dark-selector} & { + color: @c-text-dark; + } + + @media @mobile { + font-size: 18px; + + img { + height: 32px; + } + } + + img { + margin-inline-end: 10px; + width: 168px; + height: 30px; + + @media @mobile { + height: 32px; + } + } +} diff --git a/.dumi/theme/slots/Logo/index.tsx b/.dumi/theme/slots/Logo/index.tsx new file mode 100644 index 0000000..620c552 --- /dev/null +++ b/.dumi/theme/slots/Logo/index.tsx @@ -0,0 +1,28 @@ +import { Link, useLocale, useSiteData } from 'dumi'; +import { type FC } from 'react'; +import './index.less'; + +const Logo: FC = () => { + const { themeConfig } = useSiteData(); + const locale = useLocale(); + + return ( + + {themeConfig.logo !== false && ( + {themeConfig.name} + )} + {themeConfig.name} + + ); +}; + +export default Logo; diff --git a/.dumi/theme/slots/Navbar/index.less b/.dumi/theme/slots/Navbar/index.less new file mode 100644 index 0000000..343e939 --- /dev/null +++ b/.dumi/theme/slots/Navbar/index.less @@ -0,0 +1,289 @@ +@import (reference) '../../styles/variables.less'; +.@{prefix}-navbar { + list-style: none; + margin: 0; + padding: 0; + display: flex; + align-items: center; + justify-content: space-between; + width: 320px; + height: 50px; + + @media @mobile { + display: block; + padding-top: 24px; + } + + .line { + display: none; + background-image: linear-gradient(90deg, #5c6cf7 0%, #e5b2ca 100%); + border-radius: 2px; + height: 3px; + width: 50%; + margin: 0 auto; + position: absolute; + bottom: -10px; + left: 50%; + transform: translate(-50%); + } + + >li { + width: 96px; + color: #fff; + cursor: pointer; + font-size: 18px; + line-height: 1; + text-align: center; + position: relative; + font-weight: 500; + line-height: 22px; + + // @{dark-selector} & { + // color: @c-text-secondary-dark; + // } + + @media @mobile { + padding: 12px 0; + } + + >.@{prefix}-navbar-dropdown>li>a, + >a { + color: #fff; + // color: @c-text-secondary; + text-decoration: none; + transition: all 0.3s; + + + // @{dark-selector} & { + // color: @c-text-secondary-dark; + // } + + &:hover { + color: @c-primary; + background-image: linear-gradient(90deg, #5c6cf7 0%, #e5b2ca 100%); + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + // @{dark-selector} & { + // color: @c-primary-dark; + // } + } + } + + a.active +.line { + display: block; + } + + span.active + button>svg { + fill:#e5b2ca; + } + + >a.active, + >span.active, + >.@{prefix}-navbar-dropdown>li>a.active { + color: @c-text; + background-image: linear-gradient(90deg, #5c6cf7 0%, #e5b2ca 100%); + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + font-weight: bold; + + // @{dark-selector} & { + // color: @c-text-dark; + // } + } + >a.active, + >span.active, + >.@{prefix}-navbar-dropdown>li>a.active>svg { + fill: #e5b2ca; + } + + >.@{prefix}-navbar-collapse-btn { + appearance: none; + margin-left: 6px; + margin-right: -24px; + width: 18px; + height: 18px; + padding: 0; + border: 0; + background: transparent; + vertical-align: middle; + + >svg { + fill: #fff; + width: 14px; + transition: transform 0.3s; + + // @{dark-selector} & { + // fill: @c-text-note-dark; + // } + } + + @media @mobile { + &[data-collapsed]>svg { + transform: rotate(180deg); + } + } + + @media @desktop { + margin-left: 4px; + margin-right: 0; + pointer-events: none; + + >svg { + width: 12px; + transition-delay: 0.1s; + } + } + } + + &:hover>.@{prefix}-navbar-collapse-btn>svg { + transform: rotate(180deg); + transition-delay: 0; + } + + >.@{prefix}-navbar-dropdown { + position: absolute; + top: 40px; + left: -215px; + min-width: calc(100% + 26px); + list-style: none; + padding: 4px; + background-color: #05030d; + // box-shadow: 0 4px 16px rgba(0, 0, 0, 10%); + transition: all 0.2s ease-in-out; + z-index: 1; + border-radius: 13px; + box-shadow: 1px 2px 4px 0px #7275c5ff; + + &[data-docs=true] { + display: grid; + grid-auto-rows: auto; + grid-auto-columns: minmax(200px, 1fr); + // grid-auto-columns: max-content; + grid-auto-flow: column dense; + gap: 0.1rem; + align-items: flex-start; + } + + // @{dark-selector} & { + // background-color: lighten(@c-site-bg-dark, 6%); + // } + + >li { + >ul { + // @{dark-selector} & { + + // background-color: lighten(@c-site-bg-dark, 6%); + // } + + @media @mobile { + border-radius: 8px; + background-color: transparent; + } + } + + >a { + display: block; + padding: 0 18px; + font-size: 14px; + line-height: 2.4; + text-align: left; + white-space: nowrap; + + @media @mobile { + display: inline; + } + + &:hover { + color: #fff; + // color: rgba(0, 0, 0, 0.88); + background-color: #05030d; + border-radius: 8px; + + // @{dark-selector} & { + // background-color: @c-primary; + // color: #fff; + // font-weight: 400; + // } + } + } + + >span { + display: block; + padding-block: 0.5rem 0.3rem; + margin-inline: 14px; + text-align: left; + font-size: 14px; + font-weight: 300; + } + + // &:first-child > a { + // padding-top: 8px; + // } + + // &:last-child > a { + // padding-bottom: 8px; + // } + } + + @media @mobile { + position: static; + background: transparent; + box-shadow: none; + min-width: 0; + + // @{dark-selector} & { + // background: transparent; + // } + + &:not([data-collapsed]) { + display: none; + } + } + } + + &:not(:hover)>.@{prefix}-navbar-dropdown { + visibility: hidden; + opacity: 0; + transform: translateY(-6px) scale(0.98); + transition-delay: 0.1s; + + @media @mobile { + visibility: visible; + opacity: 1; + transform: none; + } + } + + &:not(:last-child) { + margin-inline-end: 48px; + + @media @mobile { + margin-inline-end: 0; + } + } + } + + .ant-menu-light { + background: #05030d + } + + .ant-menu-light .ant-menu-item { + color: #fff; + } + + .ant-menu-light:not(.ant-menu-horizontal) .ant-menu-item:not(.ant-menu-item-selected):hover { + background-color: #181d29; + border-radius: 5px; + } + .ant-menu-light .ant-menu-item:not(.ant-menu-item-selected):not(.ant-menu-submenu-selected):hover { + color: #7180ff; + } + .ant-menu .ant-menu-title-content { + transition: none; + font-weight: 600; + } + +} diff --git a/.dumi/theme/slots/Navbar/index.tsx b/.dumi/theme/slots/Navbar/index.tsx new file mode 100644 index 0000000..af8c1a0 --- /dev/null +++ b/.dumi/theme/slots/Navbar/index.tsx @@ -0,0 +1,158 @@ +/* eslint-disable @typescript-eslint/no-use-before-define */ +import { ReactComponent as IconDown } from '@ant-design/icons-svg/inline-svg/outlined/down.svg'; +import { + FormattedMessage, + Link, + useLocale, + useLocation, + useNavData, +} from 'dumi'; +import NavbarExtra from '/.dumi/theme/slots/NavbarExtra'; +import React, { useState, type FC } from 'react'; +import './index.less'; +import { NavbarMenus, getItem } from './menu'; + +const NavbarItem: FC<{ data: ReturnType[0] }> = ({ + data, +}) => { + const { pathname } = useLocation(); + const isDevDocs = + data.activePath && ['/docs', '/en-US/docs', '/zh-CN/docs'].includes(data.activePath); + const [isCollapsed, setIsCollapsed] = useState(() => { + return data.children?.some((item) => { + const activePath = item.activePath || item.link; + return activePath && pathname.startsWith(activePath); + }); + }); + const CollapsedBtn = data.children && ( + + ); + const NestedNav = data.children && ( +
    + {isDevDocs ? ( + + ) : ( + + )} +
+ ); + // user custom nav has no activePath, so fallback to link + const activePath = data.activePath || data.link; + const extraProps = + activePath && pathname.startsWith(activePath) + ? { className: 'active' } + : {}; + + return data.link ? ( + <> + + {data.title} + + {CollapsedBtn} + {NestedNav} + + ) : ( + <> + { + e.stopPropagation(); + setIsCollapsed((v) => !v); + }} + {...extraProps} + > + {data.title} + + {CollapsedBtn} + {NestedNav} + + ); +}; + +const NavbarContent: FC<{ data: ReturnType }> = ({ + data, +}) => { + return ( + <> + {data.map((item) => ( +
  • + {item.link && /^(\w+:)\/\/|^(mailto|tel):/.test(item.link) ? ( + + {item.title} + + ) : ( + + )} +
    +
  • + ))} + + ); +}; + +const NavbarChildrenContent: FC = () => { + const locale = useLocale(); + const isEn = locale.id === 'en-US'; + return ( + <> +
  • + {/* 开发者文档 */} + + + + {NavbarMenus([ + getItem('CodeFuse-Query'), + getItem('MFTCoder'), + getItem('CodeFuse-MFT-VLM'), + getItem('Test-Agent'), + getItem('CodeFuse-ModelCache'), + getItem('CodeFuse-ChatBot'), + getItem('CodeFuse-DevOps-Eval'), + getItem('CodeFuse-DevOps-Model'), + getItem('CodeFuse-evalution'), + ])} +
  • +
  • + {/* API 文档 */} + + + + {NavbarMenus([ + getItem('MuAgent'), + ])} +
  • +
  • + {/* 关于 CodeFuse */} + + + + {NavbarMenus([getItem(isEn ? 'Overview' : '整体介绍')])} +
  • + + ); +}; + +const Navbar: FC = () => { + const nav = useNavData(); + return ( +
      + + +
    + ); +}; + +export default Navbar; diff --git a/.dumi/theme/slots/Navbar/menu.tsx b/.dumi/theme/slots/Navbar/menu.tsx new file mode 100644 index 0000000..3653287 --- /dev/null +++ b/.dumi/theme/slots/Navbar/menu.tsx @@ -0,0 +1,71 @@ +import type { MenuProps } from 'antd'; +import { ConfigProvider, Menu, theme } from 'antd'; +import { useLocale, useNavigate, usePrefersColor } from 'dumi'; +import React from 'react'; +import { NavbarEnums, NavbarEnumsEn } from '../../constants'; + +type MenuItem = Required['items'][number]; + +const linkUrl = (title: string, link: string) => { + const navigate = useNavigate(); + return ( + navigate(link)} title={title}> + {title} + + ); +}; + +export function getItem( + label: keyof typeof NavbarEnums | keyof typeof NavbarEnumsEn, + key?: any, + icon?: React.ReactNode, + children?: MenuItem[], + type?: 'group', +): MenuItem { + const locale = useLocale(); + const isEn = locale.id === 'en-US'; + const Enums: any = isEn ? NavbarEnumsEn : NavbarEnums; + const uniqueKey: keyof typeof NavbarEnums = key ?? label; + return { + key: key ?? label, + icon, + children, + label: linkUrl(label, Enums[uniqueKey]), + type, + } as MenuItem; +} + +export const NavbarMenus = (items: MenuProps['items']) => { + const [color] = usePrefersColor(); + return ( + + + + ); +}; diff --git a/.dumi/theme/slots/NavbarExtra/index.tsx b/.dumi/theme/slots/NavbarExtra/index.tsx new file mode 100644 index 0000000..60fafe0 --- /dev/null +++ b/.dumi/theme/slots/NavbarExtra/index.tsx @@ -0,0 +1,5 @@ +import React, { type FC } from 'react'; + +const NavbarExtra: FC = () => <>; + +export default NavbarExtra; diff --git a/.dumi/theme/slots/PerformanceEvaluation/index.less b/.dumi/theme/slots/PerformanceEvaluation/index.less new file mode 100644 index 0000000..41c751d --- /dev/null +++ b/.dumi/theme/slots/PerformanceEvaluation/index.less @@ -0,0 +1,64 @@ +.Performance { + margin-top: 100px; + display: flex; + justify-content: center; + + .Performance-center { + display: flex; + justify-content: center; + flex-direction: column; + width: 1200px; + + .PerformanceTitle { + margin: 0 auto; + text-align: center; + min-width: 343px; + font-size: 40px; + color: #ffffff; + letter-spacing: 1.35px; + font-weight: 600; + background-image: linear-gradient(90deg, #d8d8d8 0%, #545eff 100%); + display: inline-block; + background-clip: text; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + + .line { + margin: 0 auto; + margin-top: 19px; + background-image: linear-gradient(90deg, #e5b2ca 0%, #7546f3 100%); + height: 4px; + width: 323px; + } + } + + .PerformanceContent { + display: flex; + justify-content: center; + flex-direction: column; + margin-top: 80px; + width: 1200px; + border-radius: 13px; + background-image: url('https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*uh7FRKajlBIAAAAAAAAAAAAADlHYAQ/original'); + background-repeat: no-repeat; + background-size: cover; + background-position: center; + + img { + margin: 21px auto 41px auto; + border-radius: 5px; + width: 1050px; + height: 341px; + } + + .desc { + margin: 41px auto 0; + width: 1050px; + text-align: left; + font-size: 18px; + color: #ffffff; + letter-spacing: 0.61px; + } + } + } +} diff --git a/.dumi/theme/slots/PerformanceEvaluation/index.tsx b/.dumi/theme/slots/PerformanceEvaluation/index.tsx new file mode 100644 index 0000000..0cbbc4b --- /dev/null +++ b/.dumi/theme/slots/PerformanceEvaluation/index.tsx @@ -0,0 +1,26 @@ +import { Link, useLocale, useSiteData, useRouteMeta } from 'dumi'; +import './index.less'; +import React, { type FC } from 'react'; + +const PerformanceEvaluation: FC = () => { + const { frontmatter } = useRouteMeta(); + if (!('PerformanceEvaluation' in frontmatter)) return null; + return
    +
    +
    + {frontmatter.PerformanceEvaluation.title} +
    +
    +
    +
    + {frontmatter.PerformanceEvaluation.description} +
    + +
    +
    +
    +}; + +export default PerformanceEvaluation; diff --git a/.dumi/theme/slots/SearchBar/Mask.tsx b/.dumi/theme/slots/SearchBar/Mask.tsx new file mode 100644 index 0000000..4f7a8f5 --- /dev/null +++ b/.dumi/theme/slots/SearchBar/Mask.tsx @@ -0,0 +1,29 @@ +import React, { useEffect, type FC, type ReactNode } from 'react'; + +type MaskProps = { + visible: boolean; + children: ReactNode; + onMaskClick?: () => void; + onClose?: () => void; +}; + +export const Mask: FC = (props) => { + useEffect(() => { + if (props.visible) { + document.body.style.overflow = 'hidden'; + } else if (document.body.style.overflow) { + document.body.style.overflow = ''; + props.onClose?.(); + } + }, [props.visible]); + + return props.visible ? ( +
    +
    +
    {props.children}
    +
    + ) : null; +}; diff --git a/.dumi/theme/slots/SearchBar/index.less b/.dumi/theme/slots/SearchBar/index.less new file mode 100644 index 0000000..7808c55 --- /dev/null +++ b/.dumi/theme/slots/SearchBar/index.less @@ -0,0 +1,244 @@ +@import (reference) '../../styles/variables.less'; + +.@{prefix}-search-bar { + position: relative; + + @media @mobile { + // TODO: support search for mobile devices + display: none; + } + + &:not(:last-child) { + margin-inline-end: 28px; + } + + &-svg { + position: absolute; + top: 50%; + margin-top: 1px; + inset-inline-start: 16px; + width: 16px; + fill: @c-text-note; + transform: translateY(-50%); + + @{dark-selector} & { + fill: @c-text-note-dark; + } + } + + &-input { + width: 140px; + height: 40px; + padding: 0; + padding-inline-start: 40px; + padding-inline-end: 12px; + color: @c-border; + font-size: 14px; + border: 1px solid @c-border; + border-radius: 20px; + box-sizing: border-box; + outline: none; + transition: all 0.3s; + background-color: transparent; + + @{dark-selector} & { + color: @c-text-dark; + border-color: @c-border-dark; + } + + &:focus { + border-color: fade(@c-primary, 50%); + background-color: rgba(249, 247, 247, 0.45); + box-shadow: 0 0 0 3px fade(@c-primary, 10%); + + @{dark-selector} & { + border-color: fade(@c-primary-dark, 50%); + background-color: @c-site-bg-dark; + box-shadow: 0 0 0 3px fade(@c-primary-dark, 10%); + } + } + + &:focus, + &:not(:placeholder-shown) { + ~.@{prefix}-search-shortcut { + opacity: 0; + } + } + } + + .@{prefix}-search-shortcut { + position: absolute; + top: 50%; + inset-inline-end: 11px; + display: inline-block; + padding: 4px 8px; + color: @c-text-note; + font-size: 12px; + line-height: 1; + white-space: nowrap; + background-color: fade(#fff, 80%); + border-radius: 11px; + border: 1px solid @c-border; + transform: translateY(-50%); + transition: all 0.3s; + pointer-events: none; + + @{dark-selector} & { + background-color: fade(#000, 20%); + border-color: @c-border-dark; + } + + @media @mobile { + display: none; + } + } + + .@{prefix}-search-popover { + position: absolute; + top: 100%; + inset-inline-end: 0; + display: flex; + flex-direction: column; + width: 540px; + max-height: 460px; + margin-top: 18px; + background-color: #fff; + border-radius: 8px; + box-shadow: 0 4px 30px rgba(0, 0, 0, 20%); + + @{dark-selector} & { + background-color: lighten(@c-site-bg-dark, 6%); + } + + &::before { + content: ''; + position: absolute; + bottom: 100%; + inset-inline-end: 100px; + display: inline-block; + width: 0; + height: 0; + border: 8px solid transparent; + border-bottom-color: #fff; + + @{dark-selector} & { + border-bottom-color: lighten(@c-site-bg-dark, 6%); + } + } + + >section { + flex: 1; + min-height: 60px; + overflow: auto; + overscroll-behavior: contain; + -webkit-overflow-scrolling: touch; + border-radius: inherit; + } + } + + .@{prefix}-search-modal { + position: fixed; + top: 0; + inset-inline-start: 0; + z-index: 1000; + width: 100vw; + height: 100vh; + display: flex; + justify-content: center; + + &-mask { + background-color: rgba(0, 0, 0, 45%); + width: 100%; + height: 100%; + } + + &-content { + position: absolute; + top: 60px; + background-color: #fff; + width: 500px; + padding: 12px; + box-sizing: border-box; + box-shadow: inset 1px 1px 0 0 hsla(0deg, 0%, 100%, 50%), + 0 3px 8px 0 #555a64; + border-radius: 8px; + max-height: calc(100% - 120px); + display: flex; + flex-direction: column; + + @{dark-selector} & { + background-color: lighten(@c-site-bg-dark, 6%); + } + } + + .@{prefix}-search-bar-input { + width: 100%; + border-radius: 4px; + } + + .@{prefix}-search-result { + min-height: 60px; + margin-top: 12px; + flex: auto; + overflow: auto; + + >dl>dd { + margin: 0 auto; + } + } + + &-commands { + justify-content: flex-start; + font-size: 12px; + color: @c-text-note; + list-style: none; + padding: 0; + margin: 0; + border-top: 1px solid @c-border-light; + padding-top: 12px; + display: flex; + align-items: center; + user-select: none; + + @{dark-selector} & { + color: @c-text-note-dark; + border-top-color: @c-border-less-dark; + } + + >li { + margin-inline-end: 10px; + } + + &-arrow { + .@{prefix}-search-modal-shortcut { + margin-inline-end: 4px; + } + } + + &-text { + margin-inline-start: 5px; + } + } + + &-shortcut { + display: inline-block; + padding: 4px 8px; + color: @c-text-note; + font-size: 12px; + line-height: 1; + white-space: nowrap; + background-color: @c-site-bg; + border-radius: 3px; + border: 1px solid @c-border; + border-bottom-width: 2px; + transition: all 0.3s; + pointer-events: none; + + @{dark-selector} & { + color: @c-text-note-dark; + background-color: @c-site-bg-dark; + border-color: @c-border-dark; + } + } + } +} diff --git a/.dumi/theme/slots/SearchBar/index.tsx b/.dumi/theme/slots/SearchBar/index.tsx new file mode 100644 index 0000000..9b70fa1 --- /dev/null +++ b/.dumi/theme/slots/SearchBar/index.tsx @@ -0,0 +1,175 @@ +import { ReactComponent as IconArrowDown } from '@ant-design/icons-svg/inline-svg/outlined/arrow-down.svg'; +import { ReactComponent as IconArrowUp } from '@ant-design/icons-svg/inline-svg/outlined/arrow-up.svg'; +import { ReactComponent as IconSearch } from '@ant-design/icons-svg/inline-svg/outlined/search.svg'; +import { useSiteSearch } from 'dumi'; +import SearchResult from 'dumi/theme/slots/SearchResult'; +import React, { useEffect, useRef, useState, type FC } from 'react'; +import { Input } from './Input'; +import { Mask } from './Mask'; +import './index.less'; +export { Input as SearchInput } from './Input'; +export { Mask as SearchMask } from './Mask'; + +const isAppleDevice = /(mac|iphone|ipod|ipad)/i.test( + typeof navigator !== 'undefined' ? navigator?.platform : '', +); + +/** Determine if the element that triggered the event is an input element */ +const isInput = (target: HTMLElement) => + ['TEXTAREA', 'INPUT'].includes(target.tagName) || + target.contentEditable === 'true'; + +const SearchBar: FC = () => { + const [focusing, setFocusing] = useState(false); + const inputRef = useRef(null); + const modalInputRef = useRef(null); + const [symbol, setSymbol] = useState('⌘'); + const { + keywords, + setKeywords, + result, + loading, + load: loadSearchData, + } = useSiteSearch(); + const [modalVisible, setModalVisible] = useState(false); + + useEffect(() => { + // why put useEffect? + // to avoid Text content mismatch between server & client in ssr + if (!isAppleDevice) { + setSymbol('Ctrl'); + } + + const handler = (ev: KeyboardEvent) => { + if ( + ((isAppleDevice ? ev.metaKey : ev.ctrlKey) && ev.key === 'k') || + (ev.key === '/' && !isInput(ev.target as HTMLElement)) + ) { + ev.preventDefault(); + + if (inputRef.current) { + const { top, bottom, left, right } = + inputRef.current.getBoundingClientRect(); + const isInViewport = + top >= 0 && + left >= 0 && + bottom <= window.innerHeight && + right <= window.innerWidth; + + if (isInViewport) { + inputRef.current.focus(); + } else { + setKeywords(''); + setModalVisible(true); + setTimeout(() => { + modalInputRef.current?.focus(); + }); + } + } + } + + if (ev.key === 'Escape') { + ev.preventDefault(); + setModalVisible(false); + } + }; + + document.addEventListener('keydown', handler); + + return () => document.removeEventListener('keydown', handler); + }, []); + + return ( +
    + + { + setFocusing(true); + loadSearchData(); + }} + onMouseEnter={() => { + loadSearchData(); + }} + onBlur={() => { + // wait for item click + setTimeout(() => { + setFocusing(false); + }, 1); + }} + onChange={(keywords) => setKeywords(keywords)} + ref={inputRef} + /> + {symbol} K + {keywords.trim() && focusing && !modalVisible && ( +
    +
    + +
    +
    + )} + + { + setModalVisible(false); + }} + onClose={() => setKeywords('')} + > +
    + + setFocusing(true)} + onBlur={() => { + // wait for item click + setTimeout(() => { + setFocusing(false); + }, 1); + }} + onChange={(keywords) => setKeywords(keywords)} + ref={modalInputRef} + /> +
    + + { + setModalVisible(false); + }} + /> + +
    +
      +
    • + + + + + + + + to navigate + +
    • +
    • + esc + + to close + +
    • +
    +
    +
    +
    + ); +}; + +export default SearchBar; diff --git a/.dumi/theme/slots/SearchBar/input.tsx b/.dumi/theme/slots/SearchBar/input.tsx new file mode 100644 index 0000000..a9a1bb0 --- /dev/null +++ b/.dumi/theme/slots/SearchBar/input.tsx @@ -0,0 +1,51 @@ +import { useIntl } from 'dumi'; +import React, { forwardRef, useImperativeHandle, useRef } from 'react'; + +type NativeInputProps = React.DetailedHTMLProps< + React.InputHTMLAttributes, + HTMLInputElement +>; + +type InputProps = { + onChange: (keywords: string) => void; +} & Pick; + +export const Input = forwardRef((props, ref) => { + const intl = useIntl(); + + const imeWaiting = useRef(false); + const nativeInputRef = useRef(null); + + useImperativeHandle(ref, () => nativeInputRef.current!); + + return ( + (imeWaiting.current = true)} + onCompositionEnd={(ev) => { + imeWaiting.current = false; + // special case: press Enter open IME panel will not trigger onChange + props.onChange(ev.currentTarget.value); + }} + onFocus={props.onFocus} + onBlur={props.onBlur} + onMouseEnter={props.onMouseEnter} + onKeyDown={(ev) => { + if (['ArrowDown', 'ArrowUp'].includes(ev.key)) ev.preventDefault(); + // esc to blur input + if (ev.key === 'Escape' && !imeWaiting.current) ev.currentTarget.blur(); + }} + onChange={(ev) => { + // wait for onCompositionEnd event be triggered + const value = ev.target.value; + setTimeout(() => { + if (!imeWaiting.current) { + props.onChange(value); + } + }, 1); + }} + placeholder={intl.formatMessage({ id: 'header.search.placeholder' })} + ref={nativeInputRef} + /> + ); +}); diff --git a/.dumi/theme/slots/Sidebar/SidebarMenu.tsx b/.dumi/theme/slots/Sidebar/SidebarMenu.tsx new file mode 100644 index 0000000..0ed197c --- /dev/null +++ b/.dumi/theme/slots/Sidebar/SidebarMenu.tsx @@ -0,0 +1,184 @@ +import type { GetProp, MenuProps } from 'antd'; +import { ConfigProvider, Menu, theme } from 'antd'; +import { NavLink, useLocation, usePrefersColor } from 'dumi'; +import React, { useEffect, useMemo, useRef, useState } from 'react'; +import styled from 'styled-components'; +import { ISidebarGroup } from '/.dumi/hooks/types'; + +type MenuItem = GetProp[number]; +type GroupItem = { + title: string; + link?: string; + children?: GroupItem[]; +}; + +function getItem( + label: React.ReactNode, + key?: React.Key | null, + icon?: React.ReactNode, + children?: MenuItem[], +): MenuItem { + return { + key, + icon, + children, + label, + } as MenuItem; +} + +const SideMenu = styled(Menu)` + border-inline-end: none !important; + + .ant-menu-title-content { + flex: 0.85 !important; + } + + .ant-menu-submenu-title { + padding-inline-end: 0 !important; + } + + .ant-menu-sub, + .ant-menu-light { + background-color: transparent !important; + } +`; + +const SidebarMenu = ({ menuData }: { menuData: ISidebarGroup[] }) => { + const { pathname } = useLocation(); + const [color] = usePrefersColor(); + const [stateOpenKeys, setStateOpenKeys] = useState([]); + + const menuItems = useRef([]); + const items: MenuItem[] = useMemo( + () => constructNewStructure(menuData), + [menuData], + ); + + function buildMenuItems(data: GroupItem[]): MenuItem[] { + return data.map((item) => { + const titleElement = item.link ? ( + (isActive ? { color: '#5c6cf7' } : {})} + > + {item.title} + + ) : ( + {item.title} + ); + const children = + item.children && item.children.length > 0 + ? buildMenuItems(item.children) + : undefined; + return getItem(titleElement, item.link ?? item.title, null, children); + }); + } + function constructNewStructure(rawData: ISidebarGroup[]) { + if (!rawData) return []; + const result: GroupItem[] = rawData + .map((category) => { + const indexGroup = category.children.find( + (child) => (child.frontmatter?.group as { index: boolean })?.index, + ); + const newItem: GroupItem = { + title: category.title!, + link: indexGroup?.link, + children: [], + }; + + const subGroupsMap: Record = {}; + category.children.forEach((g) => { + if ( + !g.frontmatter?.subGroup?.title && + !(g.frontmatter?.group as { index: boolean })?.index && + !g.frontmatter?.resource + ) { + const child: GroupItem = { + title: g.title, + link: g.link, + children: [], + }; + newItem.children?.push(child); + subGroupsMap[g.title] = child; + } + }); + category.children.forEach((subGroup) => { + const subTitle = subGroup.frontmatter?.subGroup?.title; + if (subTitle) { + subGroupsMap[subTitle]?.children?.push({ + title: subGroup.title, + link: subGroup.link, + }); + } + }); + return newItem; + }) + .filter((item): item is GroupItem => item !== null); + menuItems.current = result; + return buildMenuItems(result); + } + + function getOpenKeys( + data: GroupItem[], + pathname: string, + parentLinks: string[] = [], + ) { + for (const item of data) { + if (item.link === pathname) { + // 找到匹配项,返回累积的父链接数组 + return parentLinks; + } + // 如果当前节点有子节点,递归搜索子节点 + if (item.children && item.children.length) { + const result: string[] = getOpenKeys( + item.children, + pathname, + parentLinks.concat(item.link ?? item.title), + ); + // 如果在子节点中找到匹配项,返回结果 + if (result.length) return result; + } + } + // 如果没有找到匹配项,返回空数组 + return []; + } + + useEffect(() => { + setStateOpenKeys(getOpenKeys(menuItems.current, pathname)); + }, [pathname, menuItems.current]); + + return ( + + setStateOpenKeys(openKeys)} + selectedKeys={[pathname]} + style={{ height: '100%' }} + items={items} + /> + + ); +}; + +export default SidebarMenu; diff --git a/.dumi/theme/slots/Sidebar/index.less b/.dumi/theme/slots/Sidebar/index.less new file mode 100644 index 0000000..5e06b44 --- /dev/null +++ b/.dumi/theme/slots/Sidebar/index.less @@ -0,0 +1,79 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-sidebar { + position: sticky; + top: @s-header-height; + width: @s-sidebar-width; + max-height: calc(100vh - @s-header-height); + padding-top: 20px; + padding-bottom: 24px; + // padding-inline-start: 8px; + padding-inline-end: 32px; + box-sizing: border-box; + overflow: auto; + + .@{prefix}-sidebar-version { + display: flex; + width: 100%; + margin-top: 10px; + justify-content: space-between; + + margin-bottom: 16px; + padding-bottom: 16px; + border-bottom: 1px solid @c-border; + + @{dark-selector} & { + border-top-color: #d0d5d8; + } + } + + @{dark-selector} & { + border-top-color: @c-border-dark; + } + + @media @mobile { + position: fixed; + z-index: 20; + top: 0; + left: 0; + bottom: 0; + max-height: initial; + padding-inline-start: 32px; + background-color: @c-site-bg; + border-top: 1px solid @c-border-light; + box-shadow: 0 0 20px rgba(0, 0, 0, 10%); + transition: 0.2s all; + + @{dark-selector} & { + background-color: @c-site-bg-dark; + border-top-color: @c-border-less-dark; + } + + .@{prefix}-doc-layout:not([data-mobile-sidebar-active]) & { + opacity: 0; + visibility: hidden; + transform: translateX(-100%); + } + } +} + +.ant-select-dropdown { + background-color: #0d0d15; + border: 1px solid #3f414c; + border-radius: 10px; +} + +.ant-select-dropdown .ant-select-item-option-selected:not(.ant-select-item-option-disabled) { + background: #181d29; + border-radius: 5px; +} +.ant-select-dropdown .ant-select-item { + color: #b5b5b5; +} +.ant-select-single.ant-select-open .ant-select-selection-item { + color: #fff; +} +.ant-btn-default { + background: transparent; + color: #fff; +} diff --git a/.dumi/theme/slots/Sidebar/index.tsx b/.dumi/theme/slots/Sidebar/index.tsx new file mode 100644 index 0000000..fc48533 --- /dev/null +++ b/.dumi/theme/slots/Sidebar/index.tsx @@ -0,0 +1,204 @@ +import { GithubOutlined } from '@ant-design/icons'; +import { Button, ConfigProvider, Select, SelectProps, theme } from 'antd'; +import { + useLocale, + useLocation, + useNavigate, + usePrefersColor, + useRouteMeta, + useSidebarData, +} from 'dumi'; +import React, { useEffect, useRef, useState, type FC } from 'react'; +import { ISidebarGroup } from '../../../hooks/types'; +import { NavbarEnums, NavbarEnumsEn } from '../../constants'; +import SidebarMenu from './SidebarMenu'; +import './index.less'; + +/** lodash pickBy, 注:dumi源文件无法解析lodash,且无法解析utils文件夹 */ +function pickBy( + object: T, + predicate: (value: any, key: keyof T) => boolean, +): Partial { + const result: Partial = {}; + for (const key in object) { + if ( + Object.prototype.hasOwnProperty.call(object, key) && + predicate(object[key], key) + ) { + result[key] = object[key]; + } + } + return result; +} +/** 数组转换为 Options */ +function toOptions(arr: T[]) { + return arr.map((o) => ({ + value: o, + label: o, + })); +} + +const Sidebar: FC = () => { + const { pathname } = useLocation(); + const meta = useRouteMeta(); + const navigate = useNavigate(); + const sidebar = useSidebarData(); + const [color] = usePrefersColor(); + const locale = useLocale(); + const isEn = locale.id === 'en-US'; + /** 侧边栏数组,方便做筛选 */ + const [cloneSidebar, setCloneSidebar] = useState(); + /** 仓库名 */ + const [storeValue, setStoreValue] = useState(); + /** 版本号 */ + const [versionValue, setVersionValue] = useState(); + /** 版本号 Options */ + const [versionOptions, setVersionOptions] = useState( + [], + ); + const Enums: any = isEn ? NavbarEnumsEn : NavbarEnums; + /** 开发者文档,选项数组 */ + const devDocsObj: Record = pickBy(Enums, (value) => + value.includes('developer-docs'), + ); + /** 仓库枚举 */ + const StoreOptions = toOptions(Object.keys(devDocsObj)); + /** 上一次保存的版本号 */ + const versionPrevious = useRef(); + /** 仓库:[版本号] 映射关系 */ + const versionMap = + useRef>(); + + /** 判断是否是开发者文档 */ + const isDevDocs = pathname.includes('developer-docs'); + + /** 根据路由获取仓库名和版本号 */ + function getStoreAndVersion() { + /** 根据 url 获取当前仓库 */ + const storeMatch = pathname.match(/\/developer-docs\/(.*?)\//)![1]; + /** 根据 url 和 仓库 获取当前版本号 */ + const regex = new RegExp(`/${storeMatch}/([^/]+)`, 'i'); + const versionMatch = pathname.match(regex)?.[1]; + + return { + storeMatch, + versionMatch, + }; + } + /** 获取版本号选项 */ + function getVersionOptions() { + const selectObject = sidebar.reduce((accumulator, module) => { + module.children.forEach((child) => { + if (child.frontmatter?.store) { + const title = ( + child.frontmatter?.store as { title: string; version: string } + )?.title; + const version: string = ( + child.frontmatter?.store as { title: string; version: string } + )?.version; + if (!accumulator[title]) { + accumulator[title] = []; + } + const versionExists = accumulator[title].some( + (item) => item.label === version, + ); + if (!versionExists) { + accumulator[title].push({ + value: version.replace(/\./g, '-'), + label: version, + }); + } + } + }); + return accumulator; + }, {} as Record); + return selectObject; + } + + useEffect(() => { + if (!isDevDocs) { + setCloneSidebar(sidebar); + } else { + const { storeMatch, versionMatch } = getStoreAndVersion(); + // 仓库:[版本号] 映射关系 + versionMap.current = getVersionOptions(); + setStoreValue(storeMatch); + // 根据仓库名称获取版本号Options + setVersionOptions(versionMap.current[storeMatch]); + // 保存默认版本号 + setVersionValue(versionMatch); + } + return () => { + setStoreValue(''); + setVersionValue(''); + }; + }, [sidebar, pathname]); + + useEffect(() => { + if (storeValue && versionValue) { + const { storeMatch, versionMatch } = getStoreAndVersion(); + setCloneSidebar( + sidebar + .filter((o) => o.title?.includes(storeMatch)) + .map((o) => ({ + ...o, + children: o.children.filter( + (child) => + child.frontmatter?.store?.version === + versionMatch?.replace(/\-/g, '.'), + ), + })) + .filter((o) => o.children.length > 0), + ); + } + }, [storeValue, versionValue]); + + if (!sidebar) return null; + + return ( +
    + {isDevDocs && ( + + + (versionPrevious.current = versionValue) + } + onChange={(e) => { + navigate(pathname.replace(versionPrevious.current!, e)); + }} + /> +
    + + )} + +
    + ); +}; + +export default Sidebar; diff --git a/.dumi/theme/slots/Toc/index.less b/.dumi/theme/slots/Toc/index.less new file mode 100644 index 0000000..b202d04 --- /dev/null +++ b/.dumi/theme/slots/Toc/index.less @@ -0,0 +1,98 @@ +@import (reference) '.dumi/theme/styles/variables.less'; + +.@{prefix}-content-tool { + color: @c-text-note; + font-size: 14px; + + @{dark-selector} & { + color: @c-text-note-dark; + } + + >dl { + &:empty { + display: none; + } + + dd { + margin-inline-start: 0px; + padding-bottom: 10px; + + >a { + color: @c-text-note; + + @{dark-selector} & { + color: @c-primary-dark; + } + + &:not(:hover) { + text-decoration: none; + } + + >svg { + fill: @c-primary; + + @{dark-selector} & { + fill: @c-primary-dark; + } + } + } + } + } +} + +.@{prefix}-toc { + list-style: none; + margin: 12px 0 0; + padding: 0; + border-inline-start: 2px solid @c-border; + + @{dark-selector} & { + border-inline-start-color: @c-border-dark; + } + + &:empty { + display: none; + } + + >li { + >a { + display: block; + margin: 6px 0; + padding: 3px 16px; + color: @c-text-secondary; + font-size: 14px; + line-height: 1; + text-decoration: none; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + + @{dark-selector} & { + color: @c-text-secondary-dark; + } + + &:hover { + color: @c-text; + + @{dark-selector} & { + color: @c-text-dark; + } + } + + &.active { + margin-inline-start: -2px; + color: @c-text !important; + border-inline-start: 2px solid @c-primary; + + @{dark-selector} & { + color: @c-text-dark; + border-inline-start-color: @c-primary-dark; + } + } + } + + &[data-depth='3']>a { + padding-inline-start: 32px; + } + } +} diff --git a/.dumi/theme/slots/Toc/index.tsx b/.dumi/theme/slots/Toc/index.tsx new file mode 100644 index 0000000..e7763d0 --- /dev/null +++ b/.dumi/theme/slots/Toc/index.tsx @@ -0,0 +1,146 @@ +import { ClockCircleOutlined, EditOutlined } from '@ant-design/icons'; +import { Scrollspy as ScrollSpy } from '@makotot/ghostui/src/Scrollspy'; +import { + FormattedMessage, + Link, + history, + useIntl, + useLocation, + useRouteMeta, + useSiteData, + useTabMeta, +} from 'dumi'; +import React, { + useEffect, + useLayoutEffect, + useRef, + useState, + type FC, + type RefObject, +} from 'react'; +import './index.less'; + +const Toc: FC = () => { + const { pathname, search, hash } = useLocation(); + const meta = useRouteMeta(); + const tabMeta = useTabMeta(); + const { loading } = useSiteData(); + + const intl = useIntl(); + const prevIndexRef = useRef(0); + const { frontmatter } = useRouteMeta(); + const [sectionRefs, setSectionRefs] = useState[]>([]); + const [isoLastUpdated, setIsoLastUpdated] = useState(''); + const [lastUpdated, setLastUpdated] = useState(''); + const { themeConfig } = useSiteData(); + const showEditLink = themeConfig.editLink && frontmatter.filename; + const showLastUpdated = themeConfig.lastUpdated && frontmatter.lastUpdated; + const memoToc = React.useMemo(() => { + let toc = meta.toc; + if (tabMeta) { + toc = tabMeta.toc; + } + // only render h2 ~ h4 + return toc.filter(({ depth }) => depth > 1 && depth < 4); + }, [meta, tabMeta]); + + useEffect(() => { + // wait for page component ready (DOM ready) + if (!loading) { + // find all valid headings as ref elements + const refs = memoToc.map(({ id }) => ({ + current: document.getElementById(id), + })); + + setSectionRefs(refs as any); + } + }, [pathname, search, loading, memoToc]); + + // to avoid timestamp mismatched between server and client + useLayoutEffect(() => { + if (showLastUpdated) { + setIsoLastUpdated(new Date(frontmatter.lastUpdated!).toISOString()); + setLastUpdated( + new Intl.DateTimeFormat(undefined, { + dateStyle: 'short', + timeStyle: 'short', + }).format(frontmatter.lastUpdated), + ); + } + }, [showLastUpdated]); + + return ( + <> +
    +
    +
    + {' '} + {/* */} + +
    +
    + + {' '} + + +
    +
    +
    + {sectionRefs.length ? ( + <> + + {({ currentElementIndexInViewport }) => { + // for keep prev item active when no item in viewport + if (currentElementIndexInViewport > -1) + prevIndexRef.current = currentElementIndexInViewport; + + return ( +
      + {memoToc + .filter(({ depth }) => depth > 1 && depth < 4) + .map((item, i) => { + const link = `${search}#${encodeURIComponent(item.id)}`; + const activeIndex = + currentElementIndexInViewport > -1 + ? currentElementIndexInViewport + : prevIndexRef.current; + + return ( +
    • + { + if ( + decodeURIComponent(hash).slice(1) === item.id + ) { + history.replace(`${pathname}${search}`); + } + }} + title={item.title} + {...(activeIndex === i + ? { className: 'active' } + : {})} + > + {item.title} + +
    • + ); + })} +
    + ); + }} +
    + + ) : null} + + ); +}; + +export default Toc; diff --git a/.dumi/theme/styles/variables.less b/.dumi/theme/styles/variables.less new file mode 100644 index 0000000..9ff5748 --- /dev/null +++ b/.dumi/theme/styles/variables.less @@ -0,0 +1,38 @@ +@prefix: dumi-default; +@s-content-width: 1200px; +@s-content-padding: 48px; +@s-sidebar-width: 248px; +@s-header-height: 100px; +@s-header-height-m: 52px; + +// default theme colors +@c-primary: #5c6cf7; +@c-warning: #d59200; +@c-success: #208a41; +@c-error: #ce1f31; +@c-text: #30363f; +@c-text-secondary: #4f5866; +@c-text-note: #8a9099; +@c-border: #d0d5d8; +@c-border-light: #e4e9ec; +@c-site-bg: #04040e; + +// dark theme colors +// @dark-selector be injected by less-loader in feature/theme/index.ts +@dark-solid-amount: 15%; +@dark-light-amount: 22%; +@dark-border-amount: 71%; +@c-primary-dark: darken(@c-primary, @dark-solid-amount); +@c-warning-dark: darken(@c-warning, @dark-solid-amount); +@c-success-dark: darken(@c-success, @dark-solid-amount); +@c-error-dark: darken(@c-error, @dark-solid-amount); +@c-text-dark: lighten(@c-text-note, @dark-light-amount); +@c-text-secondary-dark: lighten(@c-text-secondary, @dark-light-amount); +@c-text-note-dark: lighten(@c-text, @dark-light-amount); +@c-border-dark: darken(@c-border, @dark-border-amount); +@c-border-less-dark: darken(@c-border-light, @dark-border-amount); +@c-site-bg-dark: darken(@c-site-bg, 95%); + +@mobile: ~'only screen and (max-width: 767px)'; +@tablet: ~'only screen and (min-width: 768px) and (max-width: 1024px)'; +@desktop: ~'only screen and (min-width: 1025px)'; diff --git a/.dumi/tsconfig.json b/.dumi/tsconfig.json new file mode 100644 index 0000000..60e702d --- /dev/null +++ b/.dumi/tsconfig.json @@ -0,0 +1,7 @@ +{ + "compilerOptions": { + "jsx": "react" + }, + "extends": "../tsconfig.json", + "include": ["**/*"] +} diff --git a/.dumirc.ts b/.dumirc.ts new file mode 100644 index 0000000..251bcc7 --- /dev/null +++ b/.dumirc.ts @@ -0,0 +1,25 @@ +import { defineConfig } from 'dumi'; + +export default defineConfig({ + favicons: [ + 'https://mdn.alipayobjects.com/huamei_v98cj4/afts/img/A*EfwQTpYQfq4AAAAAAAAAAAAADo6VAQ/original', + ], + locales: [ + { id: 'en-US', name: 'EN' }, + { id: 'zh-CN', name: '中文' }, + ], + themeConfig: { + logo: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*_pzERpyma84AAAAAAAAAAAAADlHYAQ/original', + footer: false, + // 'Copyright © 支付宝(中国)网络技术有限公司 | 备案号:沪ICP备15027489号', + socialLinks: { + github: 'https://github.com/codefuse-ai', + }, + editLink: true, + }, + + mfsu: false, + resolve: { + forceKebabCaseRouting: false, + } +}); diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..e717f5e --- /dev/null +++ b/.editorconfig @@ -0,0 +1,13 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +trim_trailing_whitespace = false diff --git a/.gitignore b/.gitignore index d70ebaa..b1143ce 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ -public \ No newline at end of file +node_modules +.dumi/tmp +.dumi/tmp-production +.DS_Store diff --git a/.hugo_build.lock b/.hugo_build.lock deleted file mode 100644 index e69de29..0000000 diff --git a/.husky/commit-msg b/.husky/commit-msg new file mode 100644 index 0000000..5b0b354 --- /dev/null +++ b/.husky/commit-msg @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +. "$(dirname -- "$0")/_/husky.sh" + +npx commitlint --edit "${1}" diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100644 index 0000000..d24fdfc --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +. "$(dirname -- "$0")/_/husky.sh" + +npx lint-staged diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..22b6303 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,3 @@ +.dumi/tmp +.dumi/tmp-production +*.yaml diff --git a/.prettierrc.js b/.prettierrc.js new file mode 100644 index 0000000..e048a9d --- /dev/null +++ b/.prettierrc.js @@ -0,0 +1,14 @@ +module.exports = { + printWidth: 80, + proseWrap: 'never', + singleQuote: true, + trailingComma: 'all', + overrides: [ + { + files: '*.md', + options: { + proseWrap: 'preserve', + }, + }, + ], +}; diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..25fa621 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "typescript.tsdk": "node_modules/typescript/lib" +} diff --git a/LEGAL.md b/LEGAL.md new file mode 100644 index 0000000..dfc3285 --- /dev/null +++ b/LEGAL.md @@ -0,0 +1,7 @@ +Legal Disclaimer + +Within this source code, the comments in Chinese shall be the original, governing version. Any comment in other languages are for reference only. In the event of any conflict between the Chinese language version comments and other language version comments, the Chinese language version shall prevail. + +法律免责声明 + +关于代码注释部分,中文注释为官方版本,其它语言注释仅做参考。中文注释可能与其它语言注释存在不一致,当中文注释与其它语言注释存在不一致时,请以中文注释为准。 diff --git a/README.md b/README.md new file mode 100644 index 0000000..a63ed6e --- /dev/null +++ b/README.md @@ -0,0 +1,20 @@ +# CodeFuse-Docs + +A static site base on [dumi](https://d.umijs.org). + +## Development + +```bash +# install dependencies +$ tnpm install + +# start dev server +$ tnpm start + +# build docs +$ tnpm run build +``` + +## LICENSE + +MIT diff --git a/archetypes/default.md b/archetypes/default.md deleted file mode 100644 index c6f3fce..0000000 --- a/archetypes/default.md +++ /dev/null @@ -1,5 +0,0 @@ -+++ -title = '{{ replace .File.ContentBaseName "-" " " | title }}' -date = {{ .Date }} -draft = true -+++ diff --git a/content/en/coagent/connector/connector_agent.md b/content/en/coagent/connector/connector_agent.md deleted file mode 100644 index 4efe08e..0000000 --- a/content/en/coagent/connector/connector_agent.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Connector Agent -slug: Connector Agent -url: "coagent/connector-agent" -aliases: -- "/coagent/connector-agent" ---- - - -## 快速构建一个Agent -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.configs import AGETN_CONFIGS -from coagent.connector.agents import BaseAgent -from coagent.connector.schema import Message, load_role_configs - - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - - -- 配置相关 LLM 和 Embedding Model -``` -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - -- 这里从已有的agent配置选一个role来做示例 -``` -# 从已有的配置中选择一个config,具体参数细节见下面 -role_configs = load_role_configs(AGETN_CONFIGS) -agent_config = role_configs["general_planner"] -# 生成agent实例 -base_agent = BaseAgent( - role=agent_config.role, - prompt_config = agent_config.prompt_config, - prompt_manager_type=agent_config.prompt_manager_type, - chat_turn=agent_config.chat_turn, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, - embed_config=embed_config, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - ) -# round-1 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message = base_agent.step(query) -print(output_message.to_str_content(content_key="parsed_output_list")) -``` - -## Agent 参数配置 -``` -# 配置结构在这个目录 -from coagent.connector.schema import Role, PromptField -``` - - -### Agent Config -|Config Key Name| Type| Description| -| ------------------ | ---------- | ---------- | -|role| Role |角色描述| -|prompt_config |List[PromptField] |Enum:PromptManager 也可以继承以上几种Agent然后去构造相关的Agent| -|prompt_manager_type |String |Enum:PromptManager 也可以继承以上几种Agent然后去构造自定义的Enum:PromptManager| -|focus_agents |List[String] |metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name -|focus_message_keys |List[String]| 额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys| -|chat_turn |int |只针对ReactAgent有效| -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| - -### Role - -| Config Key Name | Type | Description | -|------------------|------|--------------------| -| role_type | str | 角色类型, Enum: system、user、assistant、function、observation、summary | -| role_name | str | 角色名称 | -| role_desc | str | 角色描述 | -| agent_type | str | 代理类型 | -| role_prompt | str | 角色提示 | -| template_prompt | str | 模板提示 | - - -### PromptField - -| Config Key Name | Type | Description | -|-----------------|------|-------------| -| field_name | str | | -| function_name | str | | -| title | str | | -| description | str | | -| is_context | bool | | -| omit_if_empty | bool | | \ No newline at end of file diff --git a/content/en/coagent/connector/connector_chain.md b/content/en/coagent/connector/connector_chain.md deleted file mode 100644 index c7a1622..0000000 --- a/content/en/coagent/connector/connector_chain.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Connector Chain -slug: Connector Chain -url: "coagent/connector-chain" -aliases: -- "/coagent/connector-chain" ---- - -## 快速构建一个 agent chain -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -# 设置openai的api-key -import os, sys -import openai -import importlib - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xxxx" -openai.api_key = "sk-xxxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -- 配置相关 LLM 和 Embedding Model -``` -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - - -- 这里从已有的agent配置选多个role组合成 agent chain -``` -from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.configs import AGETN_CONFIGS -from coagent.connector.chains import BaseChain -from coagent.connector.schema import Message, load_role_configs - -# 构建 agent chain 链路 -role_configs = load_role_configs(AGETN_CONFIGS) -agent_config = role_configs["general_planner"] -role1 = role_configs["general_planner"] -role2 = role_configs["executor"] -agent_module = importlib.import_module("examples.connector.agents") -agents = [ - getattr(agent_module, role1.role.agent_type)( - role=role1.role, - prompt_config = role1.prompt_config, - prompt_manager_type=role1.prompt_manager_type, - chat_turn=role1.chat_turn, - focus_agents=role1.focus_agents, - focus_message_keys=role1.focus_message_keys, - llm_config=llm_config, - embed_config=embed_config, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - ), - getattr(agent_module, role2.role.agent_type)( - role=role2.role, - prompt_config = role2.prompt_config, - prompt_manager_type=role2.prompt_manager_type, - chat_turn=role2.chat_turn, - focus_agents=role2.focus_agents, - focus_message_keys=role2.focus_message_keys, - llm_config=llm_config, - embed_config=embed_config, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - ), - ] - -chain = BaseChain( - agents, - chat_turn=1, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - llm_config=llm_config, - embed_config=embed_config, - ) -``` - - -- 开始执行 -``` -# round-1 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = chain.step(query) -print(output_memory.to_str_messages(content_key="parsed_output_list")) - -``` - - -## Chain 参数配置 -|Config Key Name| Type |Description| -| ------------------ | ---------- | ---------- | -|agents| List[BaseAgent] | -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| diff --git a/content/en/coagent/connector/connector_memory.md b/content/en/coagent/connector/connector_memory.md deleted file mode 100644 index c921bbe..0000000 --- a/content/en/coagent/connector/connector_memory.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Connector Memory -slug: Connector Memory -url: "coagent/connector-memory" -aliases: -- "/coagent/connector-memory" ---- - - -## Memory Manager -主要用于 chat history 的管理,暂未完成 -- 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -- 对 chat history 进行关键信息总结 summary context,作为 prompt context -- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - -## 使用示例 - -### 创建 memory manager 实例 -``` -import os -import openai - -from coagent.base_configs.env_config import KB_ROOT_PATH -from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.schema import Message - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" - -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - -# -phase_name = "test" -memory_manager = LocalMemoryManager( - unique_name=phase_name, - do_init=True, - kb_root_path = KB_ROOT_PATH, - embed_config=embed_config, - llm_config=llm_config - ) -``` - -### 支持Message管理 - -``` -message1 = Message( - role_name="test1", role_type="user", input_query="hello", origin_query="hello", - parsed_output_list=[{"input": "hello"}] -) - -text = "hi! how can I help you?" -message2 = Message( - role_name="test2", role_type="assistant", input_query=text, origin_query=text, - role_content=text, step_content=text, parsed_output_list=[{"answer": text}] -) - -text = "they say hello and hi to each other" -message3 = Message( - role_name="test3", role_type="summary", - role_content=text, step_content=text, - parsed_output_list=[{"summary": text}] - ) - -``` - -### 支持 memory 检索 -``` -# embedding retrieval test -text = "say hi, i want some help" -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "datetime")) -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "embedding")) -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "text")) - -``` -### 支持 memory 总结 -``` -# recursive_summary test -print(memory_manager.recursive_summary(local_memory_manager.recall_memory.messages, split_n=1)) -``` \ No newline at end of file diff --git a/content/en/coagent/connector/connector_phase.md b/content/en/coagent/connector/connector_phase.md deleted file mode 100644 index 28d03fb..0000000 --- a/content/en/coagent/connector/connector_phase.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Connector Phase -slug: Connector Phase -url: "coagent/connector-phase" -aliases: -- "/coagent/connector-phase" ---- - - - -## 快速构建一个 agent phase -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.configs import AGETN_CONFIGS -from coagent.connector.phase import BasePhase -from coagent.connector.schema import Message, load_role_configs - - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - - -- 配置相关 LLM 和 Embedding Model -``` -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - - -- 这里从已有的 phase 配置中选一个 phase 来做示例 -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "searchChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -query_content1 = "美国当前总统是谁?" -query = Message( - role_name="human", role_type="user", - role_content=query_content1, input_query=query_content1, origin_query=query_content1, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content2 = "美国上一任总统是谁,两个人有什么关系没?" -query = Message( - role_name="human", role_type="user", - role_content=query_content2, input_query=query_content2, origin_query=query_content2, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - - -## Phase 参数配置 -|Config Key Name |Type |Description| -| ------------------ | ---------- | ---------- | -|phase_name| String| 场景名称| -|phase_config|CompletePhaseConfig| 默认为None,可直接指定完整的phaseconfig, 暂未实现| -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| -| base_phase_config | Union[dict, str] | 默认配置:PHASE_CONFIGS,可通过实现对这个变量新增来实现自定义配置 | -| base_chain_config | Union[dict, str] | 默认配置:CHAIN_CONFIGS,可通过实现对这个变量新增来实现自定义配置 | -| base_role_config | Union[dict, str] | 默认配置:AGETN_CONFIGS,可通过实现对这个变量新增来实现自定义配置 | diff --git a/content/en/coagent/connector/connector_prompt.md b/content/en/coagent/connector/connector_prompt.md deleted file mode 100644 index fdadd90..0000000 --- a/content/en/coagent/connector/connector_prompt.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: Connector Prompt -slug: Connector Prompt -url: "coagent/connector-prompt" -aliases: -- "/coagent/connector-prompt" ---- - -## Prompt 的标准结构 -在整个Prompt的整个结构中,我们需要去定义三个部分 -- Agent Profil -- Input Format -- Response Output Format - -``` -#### Agent Profile - -Agent Description ... - -#### Input Format - -**Origin Query:** the initial question or objective that the user wanted to achieve - -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved. - -#### Response Output Format -**Action Status:** finished or continued -If it's 'finished', the context can answer the origin query. -If it's 'continued', the context cant answer the origin query. - -**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step. -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion. -``` - - -其中,我们整合了部分 `Input Format` 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。如下所示 -只需要定义如下字段和执行函数, - -``` -AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False} -] -``` - -未来我们会也会进一步将 Agent Profile和Response Output Format的部分,实现可配置化操作,降低Prompt编写难度 - -### 自定义 Input Format -同时,我们也支持 用户自定义 Input Format 的操作 - -``` -from coagent.connector.prompt_manager import PromptManager - -# 增加了两个新处理函数,用于prompt组装 -class CodeRetrievalPM(PromptManager): - def handle_code_packages(self, **kwargs) -> str: - if 'previous_agent_message' not in kwargs: - return "" - previous_agent_message: Message = kwargs['previous_agent_message'] - # 由于两个agent共用了同一个manager,所以临时性处理 - vertices = previous_agent_message.customed_kargs.get("RelatedVerticesRetrivalRes", {}).get("vertices", []) - return ", ".join([str(v) for v in vertices]) - - def handle_retrieval_codes(self, **kwargs) -> str: - if 'previous_agent_message' not in kwargs: - return "" - previous_agent_message: Message = kwargs['previous_agent_message'] - return '\n'.join(previous_agent_message.customed_kargs["Retrieval_Codes"]) - - -# Design your personal PROMPT INPPUT FORMAT -CODE_RETRIEVAL_PROMPT_CONFIGS = [ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'reference_documents', "function_name": 'handle_doc_info'}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'retrieval_codes', "function_name": 'handle_retrieval_codes'}, - {"field_name": 'code_packages', "function_name": 'handle_code_packages'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False} - ] - -# 进行注册 -import importlib -prompt_manager_module = importlib.import_module("coagent.connector.prompt_manager") -setattr(prompt_manager_module, 'CodeRetrievalPM', CodeRetrievalPM) - -# 更新配置 -from coagent.connector.configs import AGETN_CONFIGS -AGETN_CONFIGS.update({ - "codeRetrievalJudger": { - "role": { - "role_prompt": codeRetrievalJudger_PROMPT, - "role_type": "assistant", - "role_name": "codeRetrievalJudger", - "role_desc": "", - "agent_type": "CodeRetrievalJudger" - # "agent_type": "BaseAgent" - }, - "prompt_config": CODE_RETRIEVAL_PROMPT_CONFIGS, - "prompt_manager_type": "CodeRetrievalPM", - "chat_turn": 1, - "focus_agents": [], - "focus_message_keys": [], - }, - }) -``` - - - -在我们构建phase、chain或者agent之后,可以通过函数的预打印功能,实现agents链路确认,避免在执行后才发现问题,可提前进行debug -``` -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -phase.pre_print(query) - -## 完整信息确认 coagent.connector.configs中进行确认 -########################## -<<<>>> -########################## - -### Agent Profile -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. -ATTENTION: response carefully referenced "Response Output Format" in format. - -### Tool Information - -### Agent Infomation - Please ensure your selection is one of the listed roles. Available roles for selection: - "role name: tool_react -role description: Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.," -"role name: code_react -role description: Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.," - Please ensure select the Role from agent names, such as tool_react, code_react - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Current Plan - -### Response Output Format -**Thoughts:** think the reason step by step about why you selecte one role -**Role:** Select the role from agent names. - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Role:** - - -########################### -<<<>>> -########################### -### Agent Profile -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools. -Please note that all the tools you can use are listed below. You can only choose from these tools for use. -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use. -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. - -### Tool Information - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Task Records - -### Response Output Format -**Thoughts:** According the previous observations, plan the approach for using the tool effectively. -... - -### Begin!!! - -################### -<<<>>> -################### -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -########################### -<<<>>> -########################### -### Agent Profile -When users need help with coding, your role is to provide precise and effective guidance. -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step. - -### Context Data - -#### Reference Documents - -#### Session Records - -### Response Output Format - -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem, -outline the plan for executing this step. - -**Action Status:** Set to 'stopped' or 'code_executing'. -If it's 'stopped', the action is to provide the final answer to the session records and executed steps. -If it's 'code_executing', the action is to write the code. -... - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -``` diff --git a/content/en/coagent/connector/customed_examples.md b/content/en/coagent/connector/customed_examples.md deleted file mode 100644 index 46c032b..0000000 --- a/content/en/coagent/connector/customed_examples.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Customed Examples -slug: Customed Examples -url: "coagent/customed-examples" -aliases: -- "/coagent/customed-examples" ---- - - -## 如何创建你个性化的 agent phase 场景 - -下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建 - -### 设计你的prompt结构 -``` -import os, sys, requests - -# from configs.model_config import * -from coagent.connector.phase import BasePhase -from coagent.connector.chains import BaseChain -from coagent.connector.schema import Message -from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS -import importlib - - -# update new agent configs -auto_feedback_from_code_execution_PROMPT = """#### Agent Profile - -You are a helpful AI assistant. Solve tasks using your coding and language skills. -In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. - 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. -Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. -When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. -If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. -When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. -Reply "stopped" in the end when everything is done. - -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. - -#### Response Output Format - -**Thoughts:** Based on the question and observations above, provide the plan for executing this step. - -**Action Status:** Set to 'stopped' or 'code_executing'. If it's 'stopped', the action is to provide the final answer to the original question. If it's 'code_executing', the action is to write the code. - -**Action:** -# Write your code here -import os -... - - -**Observation:** Check the results and effects of the executed code. - -... (Repeat this Thoughts/Action/Observation cycle as needed) - -**Thoughts:** I now know the final answer - -**Action Status:** stopped - -**Action:** The final answer to the original input question -""" -``` - -### 开始配置 Prompt Configs -``` -AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False} -] -``` - -### 更新完整的agent、chain、phase配置,以便后续更读取执行 -``` -from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS -import os - -## set a -AGETN_CONFIGS.update({ - "auto_feedback_from_code_execution": { - "role": { - "role_prompt": auto_feedback_from_code_execution_PROMPT, - "role_type": "assistant", - "role_name": "auto_feedback_from_code_execution", - "role_desc": "", - "agent_type": "ReactAgent" - }, - "prompt_config": AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS, - "chat_turn": 5, - "stop": "\n**Observation:**", - "focus_agents": [], - "focus_message_keys": [], - }, -}) -# update new chain configs -CHAIN_CONFIGS.update({ - "auto_feedback_from_code_executionChain": { - "chain_name": "auto_feedback_from_code_executionChain", - "chain_type": "BaseChain", - "agents": ["auto_feedback_from_code_execution"], - "chat_turn": 1, - "do_checker": False, - "chain_prompt": "" - } -}) - -# update phase configs -PHASE_CONFIGS.update({ - "auto_feedback_from_code_executionPhase": { - "phase_name": "auto_feedback_from_code_executionPhase", - "phase_type": "BasePhase", - "chains": ["auto_feedback_from_code_executionChain"], - "do_summary": False, - "do_search": False, - "do_doc_retrieval": False, - "do_code_retrieval": False, - "do_tool_retrieval": False, - "do_using_tool": False - }, -}) - -``` - - - -### 接下来就构建 phase 实例,开始执行 -``` -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.phase import BasePhase -from coagent.connector.schema import Message -import base64, openai - -# -os.environ["API_BASE_URL"] = "http://openai.com/v1/chat/completions" -os.environ["OPENAI_API_KEY"] = "sk-xxxx" -openai.api_key = "sk-xxxx" - -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) - -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# -phase_name = "auto_feedback_from_code_executionPhase" -phase = BasePhase( - phase_name, - embed_config=embed_config, llm_config=llm_config, - base_phase_config = PHASE_CONFIGS, - base_chain_config = CHAIN_CONFIGS, - base_role_config = AGETN_CONFIGS, -) - - -# round-1 -query_content = """Plot a chart of META and TESLA's stock prices for the past year and save it as stock_price_ytd.png.""" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file diff --git a/content/en/coagent/overview/agent-flow.md b/content/en/coagent/overview/agent-flow.md deleted file mode 100644 index 936b51c..0000000 --- a/content/en/coagent/overview/agent-flow.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Agent Flow -slug: Agent Flow -url: "coagent/agent-flow" -aliases: -- "/coagent/agent-flow" ---- - - -## Introduction to Core Connectors -To facilitate everyone's understanding of the entire CoAgent link, we use a Flow format to detail how to build through configuration settings. - -
    - 图片 -
    - - -
    Below, we will first introduce the related core components
    - -### Agent -At the design level of the Agent, we provide four basic types of Agents, which allows for the basic role settings of these Agents to meet the interaction and usage of a variety of common scenarios. -1. BaseAgent: Provides basic question and answer, tool usage, and code execution functions. It implements Input => Output according to the Prompt format. - -
    - 图片 -
    - -2. ExecutorAgent: Executes tasks in sequence from a task list based on the plan arranged by the User or the previous Agent, completing the related tasks. -3. ReactAgent: Provides standard React functionality, based on the issue to perform the current task. -4. electorAgent: Provides the functionality of choosing an Agent. - -It selects the appropriate Agent to respond based on the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which is subsequently managed by the Memory Manager. - -### Chain -Basic Chain: BaseChain, which connects the interaction of agents, completing the management of related messages and memory. - -### Phase -Basic Phase: BasePhase, which connects the interaction of chains, completing the management of related messages and memory. - -### Prompt Manager -Creation of prompts for each agent in a Multi-Agent link: - -- By simply setting prompt_input_keys and prompt_output_keys, one can reuse the preset Prompt Context creation logic, thus achieving rapid configuration of the agent prompt. -- The prompt manager module can also be redesigned with new key-context designs to implement a personalized Agent Prompt. - -### Memory Manager -Mainly used for the management of chat history, which is not yet completed: - -- Manages the reading and writing of chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval. -- Summarizes key information from the chat history to form a summary context, which serves as prompt context. -- Provides a search function to retrieve information related to the question from the chat history or the summary context, aiding in question and answer sessions. diff --git a/content/en/coagent/overview/multi-agent.md b/content/en/coagent/overview/multi-agent.md deleted file mode 100644 index f376ed1..0000000 --- a/content/en/coagent/overview/multi-agent.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: CoAgent -slug: CoAgent -url: "coagent/coagent" -aliases: -- "/coagent" -- "/coagent/multi-agent" -- "/coagent/coagent" -- "/coagent/coagent-overview" ---- - - -## 简介 -To enhance the performance of large language models (LLMs) in terms of inference accuracy, the industry has seen various innovative approaches to utilizing LLMs. From the earliest Chain of Thought (CoT), Text of Thought (ToT), to Graph of Thought (GoT), these methods have continually expanded the capability boundaries of LLMs. In dealing with complex problems, we can use the ReAct process to select, invoke, and execute tool feedback, achieving multi-round tool usage and multi-step execution. - -However, for more complex scenarios, such as the development of intricate code, single-function LLM Agents are clearly insufficient. Thus, the community has begun to develop combinations of multiple Agents, such as projects focused on metaGPT, GPT-Engineer, chatDev in the development domain, and AutoGen projects focused on automating the construction of Agents and Agent dialogue. - -After in-depth analysis of these frameworks, it has been found that most Agent frameworks are highly coupled, with poor usability and extensibility. They achieve specific scenarios in preset environments, but expanding these scenarios is fraught with difficulty. - -Therefore, we aim to build an extensible, user-friendly Multi-Agent framework to support ChatBots in retrieving knowledge base information while assisting with various common tasks such as daily office work, data analysis, and development operations. - -This Multi-Agent framework project incorporates excellent design elements from multiple frameworks, such as the message pool from metaGPT and the agent selector from autogen. - -
    - 图片 -
    - -The following modules will introduce the necessary components of the Multi Agent framework from five aspects: - -- **Agent Communication:** In the Multi-Agent framework, ensuring effective information exchange among Agents is crucial for managing context and improving Q&A efficiency. - - Follow a straightforward and intuitive chain-based dialogue principle, arranging Agents in a linear fashion to form an execution chain. - - Drawing from the Message Pool framework in metaGPT, Agents are allowed to push and subscribe to the Message Pool, making the chain more flexible. This is beneficial for fine-tuning the scenario of Prompt engineering but challenging to manage complex chain relationship analysis. - -- **Standard Operation Process (SOP)**: Standardizing the parsing and handling of LLM's generated results. - - Define the input and output scope of an Agent, assembling and parsing relevant Actions and Statuses to ensure the stability of the framework. - - Encapsulate a variety of fundamental Action execution modules, such as Tool Using, Planning, Coding, Direct Answering, final answer, etc., to meet the basic work requirements of an Agent. - -- **Plan and Executor**: Enhance LLM's tool usage, Agent scheduling, and code generation. Several basic chains have been set up, for example: - - a. Single-round Q&A, which can also be expanded to forms like CoT, ToT, GoT, etc. - - b. ReAct, a basic response decision-making process where the model sets SOP status to terminate the loop. - - c. Task Planning - Executor, where the task is completed and can end. -- **Long-short term memory Management**: The key difference between Multi-Agent and single Agent is that Multi-Agent needs to handle a large amount of communication information, similar to the process of human teamwork collaboration. Add an Agent specifically responsible for content summarization (similar to a meeting assistant) to summarize long-term memories and provide more effective information to the next Agent, rather than passing all content to the next one. -- **Human-agent interaction**: In the face of complex scenarios, human intervention is required in the Agent interaction process to provide feedback. Through the aforementioned Long-short term memory Management and Agent Communication processes, enable the LLM to accurately understand human intentions, thereby completing tasks more effectively. - -In summary, these five elements together construct a Multi-Agent framework, ensuring closer and more efficient cooperation between Agents while also adapting to more complex task requirements and a variety of interaction scenarios. By combining multiple Agent chains to implement a complete and complex project launch scenario (Dev Phase), such as Demand Chain (CEO), Product Argument Chain (CPO, CFO, CTO), Engineer Group Chain (Selector, Developer1~N), QA Engineer Chain (Developer, Tester), Deploy Chain (Developer, Deployer). - -## 模块分类 -- [connector](/coagent/connector) -- document_loaders -- embeddings -- llm_models -- orm -- sandbox -- service -- text_splitter -- tools -- utils - diff --git a/content/en/coagent/overview/prompt-manager.md b/content/en/coagent/overview/prompt-manager.md deleted file mode 100644 index 5ec59d3..0000000 --- a/content/en/coagent/overview/prompt-manager.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Prompt Manager -slug: Prompt Manager -url: "coagent/prompt-manager" -aliases: -- "/coagent/prompt-manager" ---- - - -### 提示管理器(Prompt Manager) -管理多智能体链路中的prompt创建 -- 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 -- 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 - -### Prompt预设模板结构 - -- Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 -- Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 - - Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 - - Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 - - Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 -- Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 -- Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 - -### Prompt自定义配置 - -#### Prompt模块参数 -- field_name:唯一的字段名称标识,必须提供。 -- function:指定如何处理输入数据的函数,必须提供。 -- title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 -- description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 -- is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 -- omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 - -#### Prompt配置示例 - -Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 - -在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 - -context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 -``` -[ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": True}, - {"field_name": 'reference_documents', "function_name": 'handle_doc_info'}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'task_records', "function_name": 'handle_task_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'response', "function_name": 'handle_response', "title"="begin!!!", "is_context": False, "omit_if_empty": False} -] -``` - -### 未来规划 - -#### Prompt配置简化 - -未来的Prompt配置简化旨在降低用户面对复杂配置的难度。通过引入更直观的配置方法,我们计划使得Prompt配置不仅对高级用户友好,还能让初学者轻松上手。简化计划可能包括: - -- 预设配置短语:将复杂的配置字典转换为简洁的短语,每个短语都预定义了一个Prompt模块。用户将能够使用简单的字符串指令来快速配置Prompt,而无需深入了解所有参数。 -- 配置校验和建议:增加配置的即时校验,如果检测到配置错误或不一致性,自动提供修改建议,帮助用户优化Prompt结构。 - -#### 动作(Action)注册的改进计划 - -在现行系统中,智能体必须在其角色提示(role prompt)内定义所有的动作(actions)。这意味着智能体需要同时处理动作的意图识别和生成动作所需的输入数据,这一过程对语言模型的理解和推理能力提出了更高要求。 - -为了优化这一流程,我们打算在后续版本中对动作的输入生成和执行进行模块化。这将使智能体的工作重点转移至判断当前情境下应执行哪些动作,而不必负责具体的操作指令。在这种新的架构下,当需要执行某个动作时,将有专门的机制负责生成相应动作的具体输入指令。 - -这种分离将显著降低单个模块的复杂性,使得整个系统更加灵活、易于扩展,同时也提升了动作执行的效率和准确性。 diff --git a/content/en/coagent/overview/quick-start.md b/content/en/coagent/overview/quick-start.md deleted file mode 100644 index 9b55912..0000000 --- a/content/en/coagent/overview/quick-start.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Quick Start -slug: Quick Start -url: "coagent/quick-start" -aliases: -- "/coagent/quick-start" ---- - - - - - -## Quick Start -### First, set up the LLM configuration -``` -import os, sys -import openai - -# llm config -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xxx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -``` - -### Next, configure the LLM settings and vector model -``` -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig - -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) - -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - -### Finally, choose a pre-existing scenario to execute -``` -from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS -from coagent.connector.phase import BasePhase -from coagent.connector.schema import Message - -# Copy the data to a working directory; specify the directory if needed (default can also be used) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# Choose a scenario to execute -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1: Use a code interpreter to complete tasks -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart" -query = Message( - role_name="human", role_type="user", tools=[], - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -# phase.pre_print(query) # This function is used to preview the Prompt of the Agents' execution chain -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2: Execute tools -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -query_content = "Please check if there were any issues with the server at 127.0.0.1 at 10 o'clock; help me make a judgment" -query = Message( - role_name="human", role_type="user", tools=tools, - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -# phase.pre_print(query) # This function is used to preview the Prompt of the Agents' execution chain -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -``` - -## Phase Introduction and Usage - -Below are some specific Phase introduced and how to use them. - -Feel free to brainstorm and create some interesting cases. - -### baseGroupPhase -The group usage Phase in autogen - -``` -# Copy the data to a working directory; specify the directory if needed (default can also be used) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# Set the log level to control the printing of the prompt, LLM output, or other information -os.environ["log_verbose"] = "0" - -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart" - -query = Message( - role_name="human", role_type="user", tools=[], - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -# phase.pre_print(query) # This function is used to preview the Prompt of the Agents' execution chain -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### baseTaskPhase -The task splitting and multi-step execution scenario in xAgents - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "baseTaskPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) -# round-1 -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### codeReactPhase -The code interpreter scenario based on React - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# then, create a data analyze phase -phase_name = "codeReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, - jupyter_work_path=JUPYTER_WORK_PATH, -) - -# round-1 -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### codeToolReactPhase -The tool invocation and code interpreter scenario based on the React template - - -``` -TOOL_SETS = [ - "StockName", "StockInfo", - ] -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "codeToolReactPhase" - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析" - -query = Message( - role_name="human", role_type="user", - input_query=query_content, role_content=query_content, - origin_query=query_content, tools=tools - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### docChatPhase -The knowledge base retrieval Q&A Phase - -``` -# create your knowledge base -from io import BytesIO -from pathlib import Path - -from coagent.service.kb_api import create_kb, upload_doc -from coagent.service.service_factory import get_kb_details -from coagent.utils.server_utils import run_async -kb_list = {x["kb_name"]: x for x in get_kb_details(KB_ROOT_PATH)} - - -# create a knowledge base -kb_name = "example_test" -data = { - "knowledge_base_name": kb_name, - "vector_store_type": "faiss", # default - "kb_root_path": KB_ROOT_PATH, - "embed_model": embed_config.embed_model, - "embed_engine": embed_config.embed_engine, - "embed_model_path": embed_config.embed_model_path, - "model_device": embed_config.model_device, -} -run_async(create_kb(**data)) - -# add doc to knowledge base -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") -files = [file] -# if embedding init failed, you can use override = True -data = [{"override": True, "file": f, - "knowledge_base_name": kb_name, "not_refresh_vs_cache": False, - "kb_root_path": KB_ROOT_PATH, "embed_model": embed_config.embed_model, - "embed_engine": embed_config.embed_engine, "embed_model_path": embed_config.embed_model_path, - "model_device": embed_config.model_device, - } - for f in files] - -for k in data: - file = Path(file).absolute().open("rb") - filename = file.name - - from fastapi import UploadFile - from tempfile import SpooledTemporaryFile - - temp_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024) - temp_file.write(file.read()) - temp_file.seek(0) - - k.update({"file": UploadFile(file=temp_file, filename=filename),}) - run_async(upload_doc(**k)) - - -# start to chat with knowledge base -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) -# round-1 -query_content = "what modules does langchain have?" -query = Message( - role_name="human", role_type="user", - origin_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content = "What is the purpose of prompts?" -query = Message( - role_name="human", role_type="user", - origin_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### metagpt_code_devlop -The code construction Phase in metagpt - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "metagpt_code_devlop" -llm_config = LLMConfig( - model_name="gpt-4", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -query_content = "create a snake game by pygame" -query = Message(role_name="human", role_type="user", input_query=query_content, role_content=query_content, origin_query=query_content) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### searchChatPhase -The fixed Phase: search first, then answer directly with LLM - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "searchChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -query_content1 = "who is the president of the United States?" -query = Message( - role_name="human", role_type="user", - role_content=query_content1, input_query=query_content1, origin_query=query_content1, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content2 = "Who was the previous president of the United States, and is there any relationship between the two individuals?" -query = Message( - role_name="human", role_type="user", - role_content=query_content2, input_query=query_content2, origin_query=query_content2, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### toolReactPhase -The tool invocation scene based on the React template - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "toolReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) -query_content = "Please check if there were any issues with the server at 127.0.0.1 at 10 o'clock; help me make a judgment" -query = Message( - role_name="human", role_type="user", tools=tools, - role_content=query_content, input_query=query_content, origin_query=query_content - ) - -# phase.pre_print(query) # This function is used to preview the Prompt of the Agents' execution chain -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file diff --git a/content/en/contribution/acknowledgements/d1.acknowledgements.md b/content/en/contribution/acknowledgements/d1.acknowledgements.md deleted file mode 100644 index 4e36379..0000000 --- a/content/en/contribution/acknowledgements/d1.acknowledgements.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Acknowledgements -slug: Acknowledgements -description: 介绍主要功能 -url: "contribution/acknowledgements" -aliases: -- "/contribution/acknowledgements" ---- - -The documentation homepage of CodeFuse-ai is built on [docura](https://github.com/docura/docura) - -The ChatBot project is based on [langchain-chatchat](https://github.com/chatchat-space/Langchain-Chatchat) and [codebox-api](https://github.com/shroominic/codebox-api). - -...... - -Deep gratitude is extended for their open-source contributions! diff --git a/content/en/contribution/contribute/d1.contribution.md b/content/en/contribution/contribute/d1.contribution.md deleted file mode 100644 index 2c5869e..0000000 --- a/content/en/contribution/contribute/d1.contribution.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Contribution Guide -slug: Contribution Guide -description: 介绍主要功能 -url: "contribution/contribution-guide" -aliases: -- "/contribution/contribution-guide" -- "/contribution" ---- - - -

    - 中文  |  English  -

    - - - - -Thank you for your interest in the Codefuse project. We warmly welcome any suggestions, opinions (including criticisms), comments, and contributions to the Codefuse project. - -Your suggestions, opinions, and comments on Codefuse can be directly submitted through GitHub Issues. - -There are many ways to participate in the Codefuse project and contribute to it: code implementation, test writing, process tool improvement, documentation enhancement, and more. We welcome any contributions and will add you to our list of contributors. - -Furthermore, with enough contributions, you may have the opportunity to become a Committer for Codefuse. - -For any questions, you can contact us for timely answers through various means including WeChat, Gitter (an instant messaging tool provided by GitHub), email, and more. - - -## Getting Started -If you are new to the Codefuse community, you can: -- Follow the Codefuse GitHub repository. -- Join related WeChat groups for Codefuse to ask questions at any time; - -Through the above methods, you can stay up-to-date with the development dynamics of the Codefuse project and express your opinions on topics of interest. - - -## Contributation Ways -This contribution guide is not just about writing code. We value and appreciate help in all areas. Here are some ways you can contribute: -- Documentation -- Issues -- Pull Requests (PR) - -### Improve Documentation -Documentation is the main way for you to understand Codefuse and is also where we need the most help! - -By browsing the documentation, you can deepen your understanding of Codefuse and also help you grasp the features and technical details of Codefuse. If you find any issues with the documentation, please contact us in time; - -If you are interested in improving the quality of the documentation, whether it is revising an address of a page, correcting a link, or writing a better introductory document, we are very welcoming! - -Most of our documentation is written in markdown format. You can directly modify and submit documentation changes in the docs/ directory on GitHub. For submitting code changes, please refer to Pull Requests. - -### If You Discover a Bug or Issue -If you discover a bug or issue, you can directly submit a new Issue through GitHub Issues, and someone will handle it regularly. For more details, see Issue Template.[Issue Template](/contribution/issue-report) - -You can also choose to read and analyze the code to fix it yourself (it is best to communicate with us before doing so, as someone might already be working on the same issue), and then submit a Pull Request. - -### Modify Code and Submit a PR (Pull Request) -You can download the code, compile, install, and deploy to try it out (you can refer to the compilation documentation to see if it works as you expected). If there are any issues, you can directly contact us, submit an Issue, or fix it yourself by reading and analyzing the source code. For more details, see[How to Submit a PR.](/contribution/pull-request) - -Whether it's fixing a bug or adding a feature, we warmly welcome it. If you wish to submit code to Doris, you need to fork the code repository to your project space on GitHub, create a new branch for your submitted code, add the original project as an upstream, and submit a PR. The method for submitting a PR can be referenced in the Pull Request documentation. \ No newline at end of file diff --git a/content/en/contribution/contribute/d1.issue.md b/content/en/contribution/contribute/d1.issue.md deleted file mode 100644 index ea533e6..0000000 --- a/content/en/contribution/contribute/d1.issue.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Issue Report -slug: Issue Report -description: 介绍主要功能 -url: "contribution/issue-report" -aliases: -- "/contribution/issue-report" ---- - - -

    - 中文  |  English  -

    - - - -## Issue Type -Issues can be categorized into three types: -- Bug: Issues where code or execution examples contain bugs or lack dependencies, resulting in incorrect execution. -- Documentation: Discrepancies in documentation, inconsistencies between documentation content and code, etc. -- Feature: New functionalities that evolve from the current codebase. - -## Issue Template -### Issue: Bug Template - -**Checklist before submitting an issue** -
    Please confirm that you have checked the document, issues, discussions (GitHub feature), and other publicly available documentation. -- I have searched through all documentation related to Codefuse. -- I used GitHub search to find a similar issue, but did not find one. -- I have added a very descriptive title for this issue. - -**System Information** -
    Please confirm your operating system, such as mac-xx, windows-xx, linux-xx. - -**Code Version** -
    Please confirm the code version or branch, such as master, release, etc. - -**Problem Description** -
    Describe the problem you encountered, what you want to achieve, or the bug encountered during code execution. - -**Code Example** -
    Attach your execution code and relevant configuration to facilitate rapid intervention and reproduction. - -**Error Information, Logs** -
    The error logs and related information after executing the above code example. - -**Related Dependencies** -
    Taking the chatbot project as an example: -- connector -- codechat -- sandbox -- ... - - -### Issue: Documentation Template - -**Issue with current documentation:** -
    Please point out any problems, typos, or confusing points in the current documentation. - -**Idea or request for content** -
    What do you think would be a reasonable way to express the documentation? - -### Issue: Feature Template - -**Checklist before submitting an issue** -
    Please confirm that you have checked the document, issues, discussions (GitHub feature), and other publicly available documentation. -- I have searched through all documentation related to Codefuse. -- I used GitHub Issue search to find a similar issue, but did not find one. -- I have added a very descriptive title for this issue. - -**Feature Description** -
    Describe the purpose of this feature. - -**Related Examples** -
    Provide references to documents, repositories, etc., Please provide links to any relevant GitHub repos, papers, or other resources if relevant. - -**Motivation** -
    Describe the motivation for this feature. Why is it needed? Provide enough context information to help understand the demand for this feature. - -**Contribution** -
    How you can contribute to the building of this feature (if you are participating). \ No newline at end of file diff --git a/content/en/contribution/contribute/d1.pr.md b/content/en/contribution/contribute/d1.pr.md deleted file mode 100644 index 768f0ae..0000000 --- a/content/en/contribution/contribute/d1.pr.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Pull Request -slug: Pull Request -description: 介绍主要功能 -url: "contribution/pull-request" -aliases: -- "/contribution/pull-request" ---- - - -

    - 中文  |  English  -

    - - -## Contribution - -### Pre-Checklist -- First, confirm whether you have checked the document, issue, discussion (GitHub features), or other publicly available documentation. -- Find the GitHub issue you want to address. If none exists, create an issue or draft PR and ask a Maintainer for a check -- Check for related, similar, or duplicate pull requests -- Create a draft pull request -- Complete the PR template for the description -- Link any GitHub issue(s) that are resolved by your PR - -### Description - -A description of the PR should be articulated in concise language, highlighting the work completed by the PR. See specific standards at[Commit Format Specification](#Commit-Format-Specification) - -### Related Issue -#xx if has - -### Test Code with Result -Please provide relevant test code when necessary. - - - -## Commit Format Specification -A commit consists of a "title" and a "body." The title should generally be in lowercase, while the first letter of the body should be uppercase. - -### Title -The title of the commit message: `[]() (#pr)` - - -### Type - Available Options - -本次提交的类型,限定在以下类型(全小写) -- fix: Bug fixes -- feature: New features -- feature-wip: Features that are currently in development, such as partial code for a function. -- improvement: Optimizations and improvements to existing features -- style: Adjustments to code style -- typo: Typographical errors in code or documentation -- refactor: Code refactoring (without changing functionality) -- performance/optimize: Performance optimization -- test: Addition or fix of unit tests -- deps: Modifications to third-party dependencies -- community: Community-related changes, such as modifying Github Issue templates, etc. - -Please note: - -If multiple types occur in one commit, add multiple types. - -If code refactoring leads to performance improvement, both [refactor][optimize] can be added. - -Other types not listed above should not appear. If necessary, new types must be added to this document. - -### Scope - Available Options -The scope of the modules involved in the current submission. Due to the multitude of functional modules, only a few are listed here, and this list will be updated continuously based on needs. - -For example, using a chatbot framework: -connector -codechat -sandbox -... - -Please note: - -Try to use options that are already listed. If you need to add new ones, please update this document promptly. - -### Subject Content -The title should clearly indicate the main content of the current submission. - -For Example -`[feature](coagent)<增加antflow兼容和增加coagent demo>` -## Example -comming soon - - -## Reference -[doris-commit-format](https://doris.apache.org/zh-CN/community/how-to-contribute/commit-format-specification) \ No newline at end of file diff --git a/content/en/docs/b2.codefuseDevopsEval.md b/content/en/docs/b2.codefuseDevopsEval.md deleted file mode 100644 index 5bcb181..0000000 --- a/content/en/docs/b2.codefuseDevopsEval.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: codefuse-devops-eval -slug: codefuse-devops-eval -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-eval" ---- - -Comming soon \ No newline at end of file diff --git a/content/en/docs/b3.codefuseDevopsModel.md b/content/en/docs/b3.codefuseDevopsModel.md deleted file mode 100644 index d1ec739..0000000 --- a/content/en/docs/b3.codefuseDevopsModel.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: codefuse-devops-model -slug: codefuse-devops-model -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-model" ---- - -Comming soon \ No newline at end of file diff --git a/content/en/docs/b4.MFTCoder.md b/content/en/docs/b4.MFTCoder.md deleted file mode 100644 index d144d30..0000000 --- a/content/en/docs/b4.MFTCoder.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: MFTCoder -slug: MFTCoder -description: 介绍主要功能 -aliases: -- "/docs/mftcoder" ---- - -## MFTCoder -MFTCoder \ No newline at end of file diff --git a/content/en/docs/b5.CodeFuseModelCache.md b/content/en/docs/b5.CodeFuseModelCache.md deleted file mode 100644 index 1ae68c9..0000000 --- a/content/en/docs/b5.CodeFuseModelCache.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-ModelCache -slug: CodeFuse-ModelCache -description: 介绍主要功能 -aliases: -- "/docs/codefuse-modelcache" ---- - -## CodeFuse-ModelCache -CodeFuse-ModelCache \ No newline at end of file diff --git a/content/en/docs/b6.FasterTransformer4CodeFuse.md b/content/en/docs/b6.FasterTransformer4CodeFuse.md deleted file mode 100644 index d26f074..0000000 --- a/content/en/docs/b6.FasterTransformer4CodeFuse.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: FasterTransformer4CodeFuse -slug: FasterTransformer4CodeFuse -description: 介绍主要功能 -aliases: -- "/docs/fastertransformer4codefuse" ---- - -## FasterTransformer4CodeFuse -FasterTransformer4CodeFuse \ No newline at end of file diff --git a/content/en/docs/b7.TestAgent.md b/content/en/docs/b7.TestAgent.md deleted file mode 100644 index f5adc14..0000000 --- a/content/en/docs/b7.TestAgent.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Test-Agent -slug: Test-Agent -description: 介绍主要功能 -aliases: -- "/docs/test-agent" ---- - -## Test-Agent -Test-Agent \ No newline at end of file diff --git a/content/en/docs/b8.CodeFuseQuery.md b/content/en/docs/b8.CodeFuseQuery.md deleted file mode 100644 index 2800529..0000000 --- a/content/en/docs/b8.CodeFuseQuery.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-Query -slug: CodeFuse-Query -description: 介绍主要功能 -aliases: -- "/docs/codefuse-query" ---- - -## CodeFuse-Query -CodeFuse-Query \ No newline at end of file diff --git a/content/en/docs/chatbot/c1.quickstart.md b/content/en/docs/chatbot/c1.quickstart.md deleted file mode 100644 index 68a3118..0000000 --- a/content/en/docs/chatbot/c1.quickstart.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: QuickStart -slug: QuickStart -description: 介绍主要功能 -url: "docs/codefuse-chatbot-quickstart" -aliases: -- "/docs/codefuse-chatbot-quickstart" ---- - -

    - 中文  |  English  -

    - -## 🚀 Quick Start - -To deploy private models, please install the NVIDIA driver by yourself. -This project has been tested on Python 3.9.18 and CUDA 11.7 environments, as well as on Windows and macOS systems with x86 architecture. -For Docker installation, private LLM access, and related startup issues, see: [Start-detail...](/docs/start-detail) - -### Preparation of Python environment - -- It is recommended to use conda to manage the python environment (optional) -```bash -# Prepare conda environment -conda create --name Codefusegpt python=3.9 -conda activate Codefusegpt -``` - -- Install related dependencies -```bash -cd Codefuse-ChatBot -pip install -r requirements.txt -``` - -### Basic Configuration - -```bash -# Modify the basic configuration for service startup -cd configs -cp model_config.py.example model_config.py -cp server_config.py.example server_config.py - -# model_config#11~12 If you need to use the OpenAI interface, the OpenAI interface key -os.environ["OPENAI_API_KEY"] = "sk-xxx" -# Replace with the api_base_url you need -os.environ["API_BASE_URL"] = "https://api.openai.com/v1" - -# vi model_config#LLM_MODEL The language model you need to choose -LLM_MODEL = "gpt-3.5-turbo" -LLM_MODELs = ["gpt-3.5-turbo"] - -# vi model_config#EMBEDDING_MODEL The private vector model you need to choose -EMBEDDING_ENGINE = 'model' -EMBEDDING_MODEL = "text2vec-base" - -# Example of vector model access, modify model_config#embedding_model_dict -# If the model directory is: -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese -# Configure as follows -"text2vec-base": "shibing624/text2vec-base-chinese" - - -# vi server_config#8~14, It's recommended to use a container to start the service to prevent environment conflicts when installing other dependencies using the codeInterpreter feature -DOCKER_SERVICE = True -# Whether to use a container sandbox -SANDBOX_DO_REMOTE = True -``` - -### Start the Service - -By default, only webui related services are started, and fastchat is not started (optional). -```bash -# If you need to support the codellama-34b-int4 model, you need to patch fastchat -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py -# Modify examples/llm_api.py#258 to kwargs={"gptq_wbits": 4}, - -# Start llm-service (optional) -python examples/llm_api.py -``` - -For more LLM access methods, see [Details...](/docs/fastchat) -```bash -# After completing the server_config.py configuration, you can start with one click -cd examples -python start.py -``` \ No newline at end of file diff --git a/content/en/docs/chatbot/roadmap.md b/content/en/docs/chatbot/roadmap.md deleted file mode 100644 index 7c2854b..0000000 --- a/content/en/docs/chatbot/roadmap.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: ChatBot-RoadMap -slug: ChatBot-RoadMap -description: 介绍主要功能 -url: "docs/chatbot-roadmap" -aliases: -- "/docs/chatbot-roadmap" ---- - -

    - 中文  |  English  -

    - - -## RoadMap - -
    - 图片 -
    -
    - -Roadmap Overview - -- [x] Sandbox Environment ✅ - - [x] Isolated sandbox environment for code execution ✅ - - [x] File upload and download ✅ - - [ ] Support for Java execution environment ⬜ -- [x] Vector Database & Retrieval ✅ - - [x] Task retrieval ✅ - - [x] Tool retrieval ✅ -- [x] Prompt Management ✅ -- [x] Memory Management ✅ -- [x] Multi Agent Framework ✅ - - [ ] PRD (Product Requirement Document), system analysis, interface design ⬜ - - [ ] Generate code based on requirement documents, system analysis, and interface design ⬜ - - [ ] Automated testing, automated debugger ⬜ - - [ ] Operations process integration (ToolLearning) ⬜ - - [ ] Fully automated end-to-end process ⬜ -- [x] Integration with LLM based on fastchat ✅ -- [x] Integration with Text Embedding based on sentencebert ✅ -- [x] Improved vector loading speed ✅ -- [x] Connector ✅ - - [x] React Mode based on langchain ✅ - - [x] Tool retrieval completed with langchain ✅ -- [ ] General Capability for Web Crawl ⬜ - - [x] Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. ✅ - - [ ] Issue document ⬜ - - [ ] SDK Library Document ⬜ - -v0.0 -- [x] Sandbox Environment ✅ - - [x] Isolated sandbox environment for code execution ✅ -- [x] Integration with LLM based on fastchat ✅ -- [x] Integration with Text Embedding based on sentencebert ✅ -- [x] General Capability for Web Crawl: Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. ✅ - -Done -
    - -v0.1 -- [x] Sandbox Environment: File upload and download ✅ -- [x] Vector Database & Retrieval ✅ - - [x] Task retrieval ✅ - - [x] Tool retrieval ✅ -- [x] Connector ✅ - - [x] React Mode based on langchain ✅ -- [x] Integration with Text Embedding based on sentencebert: Improved vector loading speed ✅ - -Done -
    - -v0.2 -- [x] Prompt Management ✅ -- [x] Memory Management ✅ -- [x] Vector Database & Retrieval ✅ - -Done -
    - -v0.3 -- [x] Sandbox Environment ✅ - - [ ] Support for Java execution environment ⬜ -- [x] Multi Agent ✅ - - [ ] PRD (Product Requirement Document), system analysis, interface design ⬜ - - [ ] Generate code based on requirement documents, system analysis, and interface design ⬜ - - [ ] Automated testing, automated debugger ⬜ - - [ ] Operations process integration (ToolLearning) ⬜ - - [ ] Fully automated end-to-end process ⬜ -- [x] General Capability for Web Crawl ✅ - - [ ] Issue document ⬜ - - [ ] SDK Library Document ⬜ - -DDL: 2024.12.31 -
    \ No newline at end of file diff --git a/content/en/docs/codefuse-modelcache/4_release.md b/content/en/docs/codefuse-modelcache/4_release.md deleted file mode 100644 index 6f4c683..0000000 --- a/content/en/docs/codefuse-modelcache/4_release.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Release Note -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-release" -aliases: -- "/docs/codefuse-modelcache-release" ---- - - -| 时间 |功能 |版本号| -| ----- | ------ | ----- | -| 20230430| Completed GPTCache research, open-source process running through OpenAI interface, single-node form |无| -| 20230509| 1. Completed technology selection and upstream/downstream interaction scheme
    2. Redeveloped database module, replaced SQLAlchemy framework
    3. Refactored llm_handler module, compatible with codegpt, adapted codegpt model parameters 数| V0.1.0| -| 20230519| 1. Dynamically selected codegpt service mode based on environment
    2. Capability for local model loading and pre-loading
    3. Added dynamic loading capability for local paths based on environment| V0.1.1| -| 20230522| 1. Architecture optimized, adjusted to a Redis-like structure, decoupled large model invocation
    2. Switched relational database from SQLite to OceanBase
    3. Switched vector database from FAISS to Milvus
    4. Model data isolation capability
    5. Added core modules adapter_query, adapter_insert |V0.2.0| -| 20230531| 1. Online environment launched with dynamic sensing capability
    2. Embedding model evaluation and selection
    3. Added staging environment and data isolation capability
    4. Added exposure capability for the original query field| V0.2.1| -| 20230607| 1. Optimized relational database access performance
    2. Optimized environment and model isolation capabilities| V0.2.2| -| 20230630| 1. Added large model embedding layer adaptation module in modelCache
    2. Added adoption rate statistical capability |V0.2.3| -| 20230730| 1. Added cache statistics feature
    2. Added data deletion function interface
    3. One-click cache clearing capability launched
    4. Developed multi-turn conversation ability, supporting system commands and multi-turn dialogues| v0.3.0| -| 20230830| 1. Added asynchronous processing capability, performance improved by over 20%
    2. Architecture change, decoupled embedding inference and business processing logic
    3. Blacklist filtering feature |V0.3.1| \ No newline at end of file diff --git a/content/en/docs/codefuse-query/1_abstract.en.md b/content/en/docs/codefuse-query/1_abstract.en.md deleted file mode 100644 index 55c5f2c..0000000 --- a/content/en/docs/codefuse-query/1_abstract.en.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Abstract -slug: Abstract -description: 介绍主要功能 -url: "docs/abstract" -aliases: -- "/docs/abstract" ---- - -# Abstract -With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. For example, a security team might need sophisticated algorithms like context-sensitive taint analysis to review smaller codebases, while project managers might need a lighter algorithm, such as one that calculates cyclomatic complexity, to measure developer productivity on larger codebases. - -These diversified needs, coupled with the common computational resource constraints in large organizations, pose a significant challenge. Traditional tools, with their problem-specific computation methods, often fail to scale in such environments. This is why we introduced CodeQuery, a centralized data platform specifically designed for large-scale static analysis. -In implementing CodeQuery, we treat source code and analysis results as data, and the execution process as big data processing, a significant departure from traditional tool-centric approaches. We leverage common systems in large organizations, such as data warehouses, data computation facilities like MaxCompute and Hive, OSS object storage, and flexible computing resources like Kubernetes, allowing CodeQuery to integrate seamlessly into these systems. This approach makes CodeQuery highly maintainable and scalable, capable of supporting diverse needs and effectively addressing changing demands. Furthermore, CodeQuery's open architecture encourages interoperability between various internal systems, facilitating seamless interaction and data exchange. This level of integration and interaction not only increases the degree of automation within the organization but also improves efficiency and reduces the likelihood of manual errors. By breaking down information silos and fostering a more interconnected, automated environment, CodeQuery significantly enhances the overall productivity and efficiency of the software development process. -Moreover, CodeQuery's data-centric approach offers unique advantages when addressing domain-specific challenges in static source code analysis. For instance, source code is typically a highly structured and interconnected dataset, with strong informational and relational ties to other code and configuration files. By treating code as data, CodeQuery can adeptly handle these issues, making it especially suitable for use in large organizations where codebases evolve continuously but incrementally, with most code undergoing minor changes daily while remaining stable. CodeQuery also supports use cases like code-data based Business Intelligence (BI), generating reports and dashboards to aid in monitoring and decision-making processes. Additionally, CodeQuery plays an important role in analyzing training data for large language models (LLMs), providing deep insights to enhance the overall effectiveness of these models. - -In the current field of static analysis, CodeQuery introduces a new paradigm. It not only meets the needs of analyzing large, complex codebases but is also adaptable to the ever-changing and diversified scenarios of static analysis. CodeQuery's data-centric approach gives it a unique advantage in dealing with code analysis issues in big data environments. Designed to address static analysis problems in large-scale software development settings, it views both source code and analysis results as data, allowing it to integrate flexibly into various systems within large organizations. This approach not only enables efficient handling of large codebases but can also accommodate various complex analysis needs, thereby making static analysis work more effective and accurate. - -The characteristics and advantages of CodeQuery can be summarized as follows: - -- **Highly Scalable**: CodeQuery can handle large codebases and adapt to different analysis needs. This high level of scalability makes CodeQuery particularly valuable in large organizations. -- **Data-Centric**: By treating source code and analysis results as data, CodeQuery's data-centric approach gives it a distinct edge in addressing code analysis problems in big data environments. -- **Highly Integrated**: CodeQuery can integrate seamlessly into various systems within large organizations, including data warehouses, data computation facilities, object storage, and flexible computing resources. This high level of integration makes the use of CodeQuery in large organizations more convenient and efficient. -- **Supports Diverse Needs**: CodeQuery can process large codebases and accommodate various complex analysis needs, including QoS analysis, cross-language analysis, algorithmic needs, and performance requirements. - -CodeQuery is a powerful static code analysis platform, suitable for large-scale, complex codebase analysis scenarios. Its data-centric approach and high scalability give it a unique advantage in the modern software development environment. As static code analysis technology continues to evolve, CodeQuery is expected to play an increasingly important role in this field. \ No newline at end of file diff --git a/content/en/docs/mftcoder/2_quickstart.md b/content/en/docs/mftcoder/2_quickstart.md deleted file mode 100644 index 34d6a0e..0000000 --- a/content/en/docs/mftcoder/2_quickstart.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: QuickStart -slug: QuickStart -description: QuickStart Document -url: /docs/mftcoder-quickstart -aliases: -- "/docs/mftcoder-quickstart" ---- - - - - -## Requirements -To begin, ensure that you have successfully installed CUDA (version >= 11.4, preferably 11.7) along with the necessary drivers. Additionally, make sure you have installed torch (version 2.0.1). - -Next, we have provided an init_env.sh script to simplify the installation of required packages. Execute the following command to run the script: -```bash -sh init_env.sh -``` -We highly recommend training with flash attention(version >= 2.1.0, preferably 2.3.6), please refer to the following link for installation instructions: https://github.com/Dao-AILab/flash-attention - - -## Training -As mentioned above, we open source two training frameworks. You could refer to their own READMEs for more details as followed. - -If you are familiar with open source ```transformers```, ```DeepSpeed``` or ```FSDP```, we highly recommend you try: - -🚀🚀 [**MFTCoder-accelerate: Accelerate + Deepspeed/FSDP Codebase for MFT(Multi-task Finetuning)**](/docs/mftcoder-accelerate) - - -If you want to explore some new framework like atorch, you could check: - -🚀 [MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning)](/docs/mftcoder-atorch) - - -## Models - -We are excited to release the following two CodeLLMs trained by MFTCoder, now available on both HuggingFace and ModelScope: - - -| Model | HuggingFace Links | ModelScope Links | Base Model | Num of examples trained | Batch Size | Seq Length | -|--------------------------------------|---------------------------------------------------------------------------|---------------------------------------------------------------------------------|----------------------|------|------------|------------| -| 🔥 CodeFuse-DeepSeek-33B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-DeepSeek-33B) | DeepSeek-coder-33B | 60万 | 80 | 4096 | -| 🔥 CodeFuse-Mixtral-8x7B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-Mixtral-8x7B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-Mixtral-8x7B) | Mixtral-8x7B | 60万 | 80 | 4096 | -| 🔥 CodeFuse-CodeLlama-34B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B) | CodeLlama-34b-Python | 60万 | 80 | 4096 | -| 🔥 CodeFuse-CodeLlama-34B-4bits | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | CodeLlama-34b-Python | | | 4096 | -| 🔥 CodeFuse-StarCoder-15B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-StarCoder-15B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-StarCoder-15B) | StarCoder-15B | 60万 | 80 | 4096 | -| 🔥 CodeFuse-QWen-14B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-QWen-14B) | Qwen-14b | 110万 | 256 | 4096 | -| 🔥 CodeFuse-CodeGeex2-6B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeGeex2-6B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeGeex2-6B) | CodeGeex2-6B | 110万 | 256 | 4096 | - - -## Datasets -We are also pleased to release two code-related instruction datasets, meticulously selected from a range of datasets to facilitate multitask training. Moving forward, we are committed to releasing additional instruction datasets covering various code-related tasks. - -| Dataset | Description | -|-----------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [⭐ Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) | Based on open-evol-instruction-80k, filter out low-quality, repeated, and similar instructions to HumanEval, thus get high-quality code instruction dataset. | -| [⭐ CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k) | python code exercise instruction dataset | diff --git a/content/en/docs/overview/b1.codefusechatbot.md b/content/en/docs/overview/b1.codefusechatbot.md deleted file mode 100644 index 198f850..0000000 --- a/content/en/docs/overview/b1.codefusechatbot.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Codefuse-ChatBot Development by Private Knowledge Augmentation -slug: codefuse-chatbot -language: en -description: 介绍主要功能 -aliases: -- "/docs/codefuse-chatbot" ---- - -

    - 中文  |  English  -

    - - -This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface. - - -## 📜 Contents -- [🤝 Introduction](#-introduction) -- [🧭 Technical Route](#-technical-route) - -## 🤝 Introduction - -💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. It transitions gradually from the traditional development and operations mode of querying information from various sources and operating on standalone, disparate platforms to an intelligent development and operations mode based on large-model Q&A, changing people's development and operations habits. - -- **🧠 Intelligent Scheduling Core:** Constructed a well-integrated scheduling core system that supports multi-mode one-click configuration, simplifying the operational process.[Use Introduction](/docs/multi-agent) -- **💻 Comprehensive Code Repository Analysis:** Achieved in-depth understanding at the repository level and coding and generation at the project file level, enhancing development efficiency. -- **📄 Enhanced Document Analysis:** Integrated document knowledge bases with knowledge graphs, providing deeper support for document analysis through enhanced retrieval and reasoning. -- **🔧 Industry-Specific Knowledge:** Tailored a specialized knowledge base for the DevOps domain, supporting the self-service one-click construction of industry-specific knowledge bases for convenience and practicality. -- **🤖 Compatible Models for Specific Verticals:** Designed small models specifically for the DevOps field, ensuring compatibility with related DevOps platforms and promoting the integration of the technological ecosystem. - -🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.[Access Demo](/docs/fastchat) - -👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the CodefuseGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of "Making Development Seamless for Everyone." - - -
    - Image -
    - -🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API. - -👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the DevOpsGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of "Making Development Seamless for Everyone." - -## 🧭 Technical Route -
    - Image -
    - -- 🧠 **Multi-Agent Schedule Core:** Easily configurable to create interactive intelligent agents. -- 🕷️ **Multi Source Web Crawl:** Offers the capability to crawl specified URLs for collecting the required information. -- 🗂️ **Data Processor:** Effortlessly handles document loading, data cleansing, and text segmentation, integrating data from different sources. -- 🔤 **Text Embedding & Index:**:Users can easily upload files for document retrieval, optimizing the document analysis process. -- 🗄️ **Vector Database & Graph Database:** Provides flexible and powerful data management solutions. -- 📝 **Prompt Control & Management:**:Precisely defines the contextual environment for intelligent agents. -- 🚧 **SandBox:**:Safely executes code compilation and actions. -- 💬 **LLM:**:Supports various open-source models and LLM interfaces. -- 🛠️ **API Management::** Enables rapid integration of open-source components and operational platforms. - -For implementation details, see: [Technical Route Details](sources/readme_docs/roadmap.md) diff --git a/content/en/docs/overview/b2.codefuseDevopsEval.md b/content/en/docs/overview/b2.codefuseDevopsEval.md deleted file mode 100644 index 71b2ba0..0000000 --- a/content/en/docs/overview/b2.codefuseDevopsEval.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: codefuse-devops-eval -slug: codefuse-devops-eval -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-eval" ---- - -

    - - - -DevOps-Eval is a comprehensive evaluation suite specifically designed for foundation models in the DevOps field. We hope DevOps-Eval could help developers, especially in the DevOps field, track the progress and analyze the important strengths/shortcomings of their models. - - -📚 This repo contains questions and exercises related to DevOps, including the AIOps, ToolLearning; - -💥️ There are currently **7486** multiple-choice questions spanning 8 diverse general categories, as shown [below](/images/devops_eval/data_info.png). - -🔥 There are a total of **2840** samples in the AIOps subcategory, covering scenarios such as **log parsing**, **time series anomaly detection**, **time series classification**, **time series forecasting**, and **root cause analysis**. - -🔧 There are a total of **1509** samples in the ToolLearning subcategory, covering 239 tool scenes across 59 fields. - -

    - -## 🏆 Leaderboard -Below are zero-shot and five-shot accuracies from the models that we evaluate in the initial release. We note that five-shot performance is better than zero-shot for many instruction-tuned models. -### 👀 DevOps -#### Zero Shot - -| **ModelName** | plan | code | build | test | release | deploy | operate | monitor | **AVG** | -|:------------------------:|:-----:|:-----:|:-----:|:------:|:--------:|:------:|:-------:|:--------:|:-----------:| -| DevOpsPal-14B-Chat | 60.61 | 78.35 | 84.86 | 84.65 | 87.26 | 82.75 | 69.89 | 79.17 | 78.23 | -| DevOpsPal-14B-Base | 54.55 | 77.82 | 83.49 | 85.96 | 86.32 | 81.96 | 71.18 | 82.41 | 78.23 | -| Qwen-14B-Chat | 60.61 | 75.4 | 85.32 | 84.21 | 89.62 | 82.75 | 69.57 | 80.56 | 77.18 | -| Qwen-14B-Base | 57.58 | 73.81 | 84.4 | 85.53 | 86.32 | 81.18 | 70.05 | 80.09 | 76.19 | -| Baichuan2-13B-Base | 60.61 | 69.42 | 79.82 | 79.82 | 82.55 | 81.18 | 70.37 | 83.8 | 73.73 | -| Baichuan2-13B-Chat | 60.61 | 68.43 | 77.98 | 80.7 | 81.6 | 83.53 | 67.63 | 84.72 | 72.9 | -| DevOpsPal-7B-Chat | 54.55 | 69.11 | 83.94 | 82.02 | 76.89 | 80 | 64.73 | 77.78 | 71.92 | -| DevOpsPal-7B-Base | 54.55 | 68.96 | 82.11 | 78.95 | 80.66 | 76.47 | 65.54 | 78.7 | 71.69 | -| Qwen-7B-Base | 53.03 | 68.13 | 78.9 | 75.44 | 80.19 | 80 | 65.06 | 80.09 | 71.09 | -| Qwen-7B-Chat | 57.58 | 66.01 | 80.28 | 79.82 | 76.89 | 77.65 | 62.64 | 79.17 | 69.75 | -| Baichuan2-7B-Chat | 54.55 | 63.66 | 77.98 | 76.32 | 71.7 | 73.33 | 59.42 | 79.63 | 66.97 | -| Internlm-7B-Chat | 60.61 | 62.15 | 77.06 | 76.32 | 66.98 | 74.51 | 60.39 | 78.24 | 66.27 | -| Baichuan2-7B-Base | 56.06 | 62.45 | 75.69 | 70.61 | 74.06 | 69.8 | 61.67 | 75.93 | 66.21 | -| Internlm-7B-Base | 54.55 | 58.29 | 79.36 | 78.95 | 77.83 | 70.59 | 65.86 | 75.93 | 65.99 | - - -#### Five Shot - -| **ModelName** | plan | code | build | test | release | deploy | operate | monitor | **AVG** | -|:------------------------:|:-----:|:-----:|:-----:|:------:|:--------:|:------:|:-------:|:--------:|:---------:| -| DevOpsPal-14B-Chat | 63.64 | 79.49 | 81.65 | 85.96 | 86.79 | 86.67 | 72.95 | 81.48 | 79.69 | -| DevOpsPal-14B-Base | 62.12 | 80.55 | 82.57 | 85.53 | 85.85 | 84.71 | 71.98 | 80.09 | 79.63 | -| Qwen-14B-Chat | 65.15 | 76 | 82.57 | 85.53 | 84.91 | 84.31 | 70.85 | 81.48 | 77.81 | -| Qwen-14B-Base | 66.67 | 76.15 | 84.4 | 85.53 | 86.32 | 80.39 | 72.46 | 80.56 | 77.56 | -| Baichuan2-13B-Base | 63.64 | 71.39 | 80.73 | 82.46 | 81.13 | 84.31 | 73.75 | 85.19 | 75.8 | -| Qwen-7B-Base | 75.76 | 72.52 | 78.9 | 81.14 | 83.96 | 81.18 | 70.37 | 81.94 | 75.36 | -| Baichuan2-13B-Chat | 62.12 | 69.95 | 76.61 | 84.21 | 83.49 | 79.61 | 71.98 | 80.56 | 74.12 | -| DevOpsPal-7B-Chat | 66.67 | 69.95 | 83.94 | 81.14 | 80.19 | 82.75 | 68.6 | 76.85 | 73.61 | -| DevOpsPal-7B-Base | 69.7 | 69.49 | 82.11 | 81.14 | 82.55 | 82.35 | 67.15 | 79.17 | 73.35 | -| Qwen-7B-Chat | 65.15 | 66.54 | 82.57 | 81.58 | 81.6 | 81.18 | 65.38 | 81.02 | 71.69 | -| Baichuan2-7B-Base | 60.61 | 67.22 | 76.61 | 75 | 77.83 | 78.43 | 67.31 | 79.63 | 70.8 | -| Internlm-7B-Chat | 60.61 | 63.06 | 79.82 | 80.26 | 67.92 | 75.69 | 60.06 | 77.31 | 69.21 | -| Baichuan2-7B-Chat | 60.61 | 64.95 | 81.19 | 75.88 | 71.23 | 75.69 | 64.9 | 79.17 | 69.05 | -| Internlm-7B-Base | 62.12 | 65.25 | 77.52 | 80.7 | 74.06 | 78.82 | 63.45 | 75.46 | 67.17 | - -### 🔥 AIOps - -
    - -#### Zero Shot -| **ModelName** | LogParsing | RootCauseAnalysis | TimeSeriesAnomalyDetection | TimeSeriesClassification | TimeSeriesForecasting | **AVG** | -|:-------------------:|:------------:|:------------------:|:---------------------------:|:-----------------------------------------:|:---------------------------:|:-------:| -| Qwen-14B-Base | 66.29 | 58.8 | 25.33 | 43.5 | 62.5 | 52.25 | -| DevOpsPal-14B—Base | 63.14 | 53.6 | 23.33 | 43.5 | 64.06 | 50.49 | -| Qwen-14B-Chat | 64.57 | 51.6 | 22.67 | 36 | 62.5 | 48.94 | -| DevOpsPal-14B—Chat | 60 | 56 | 24 | 43 | 57.81 | 48.8 | -| Qwen-7B-Base | 50 | 39.2 | 22.67 | 54 | 43.75 | 41.48 | -| DevOpsPal-7B—Chat | 56.57 | 30.4 | 25.33 | 45 | 44.06 | 40.92 | -| Baichuan2-13B-Chat | 64 | 18 | 21.33 | 37.5 | 46.88 | 39.3 | -| Qwen-7B-Chat | 57.43 | 38.8 | 22.33 | 39.5 | 25.31 | 36.97 | -| Internlm-7B—Chat | 58.86 | 8.8 | 22.33 | 28.5 | 51.25 | 36.34 | -| Baichuan2-7B-Chat | 60.86 | 10 | 28 | 34.5 | 39.06 | 36.34 | -| Baichuan2-7B-Base | 53.43 | 12.8 | 27.67 | 36.5 | 40.31 | 35.49 | -| Baichuan2-13B-Base | 54 | 12.4 | 23 | 34.5 | 42.81 | 34.86 | -| DevOpsPal-7B—Base | 46.57 | 20.8 | 25 | 34 | 38.75 | 33.94 | -| Internlm-7B—Base | 48.57 | 18.8 | 23.33 | 37.5 | 33.75 | 33.1 | - -#### One Shot -| **ModelName** | LogParsing | RootCauseAnalysis | TimeSeriesAnomalyDetection | TimeSeriesClassification | TimeSeriesForecasting | **AVG** | -|:-------------------:|:------------:|:------------------:|:---------------------------:|:-----------------------------------------:|:---------------------------:|:-------:| -| DevOpsPal-14B—Chat | 66.29 | 80.8 | 23.33 | 44.5 | 56.25 | 54.44 | -| DevOpsPal-14B—Base | 60 | 74 | 25.33 | 43.5 | 52.5 | 51.13 | -| Qwen-14B-Base | 64.29 | 74.4 | 28 | 48.5 | 40.31 | 50.77 | -| Qwen-7B-Base | 56 | 60.8 | 27.67 | 44 | 57.19 | 49.44 | -| Qwen-14B-Chat | 49.71 | 65.6 | 28.67 | 48 | 42.19 | 46.13 | -| Baichuan2-13B-Base | 56 | 43.2 | 24.33 | 41 | 46.88 | 42.89 | -| Baichuan2-7B-Chat | 58.57 | 31.6 | 27 | 31.5 | 51.88 | 41.83 | -| DevOpsPal-7B—Base | 52.86 | 44.4 | 28 | 44.5 | 36.25 | 41.2 | -| Baichuan2-7B-Base | 48.29 | 40.4 | 27 | 42 | 40.94 | 39.86 | -| Qwen-7B-Chat | 54.57 | 52 | 29.67 | 26.5 | 27.19 | 38.73 | -| Baichuan2-13B-Chat | 57.43 | 44.4 | 25 | 25.5 | 30.63 | 37.75 | -| DevOpsPal-7B—Chat | 56.57 | 27.2 | 25.33 | 41.5 | 33.44 | 37.46 | -| Internlm-7B—Chat | 62.57 | 12.8 | 22.33 | 21 | 50.31 | 36.69 | -| Internlm-7B—Base | 48 | 33.2 | 29 | 35 | 31.56 | 35.85 | - -
    - - -### 🔧 ToolLearning -
    - -| **FuncCall-Filler** | dataset_name | fccr | 1-fcffr | 1-fcfnr | 1-fcfpr | 1-fcfnir | aar | -|:-------------------:| :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| Qwen-14b-chat | luban | 61 | 100 | 97.68 | 63.32 | 100 | 69.46 | -| Qwen-7b-chat | luban | 50.58 | 100 | 98.07 | 52.51 | 100 | 63.59 | -| Baichuan-7b-chat | luban | 60.23 | 100 | 97.3 | 62.93 | 99.61 | 61.12 | -| Internlm-chat-7b | luban | 47.88 | 100 | 96.14 | 51.74 | 99.61 | 61.85 | -| Qwen-14b-chat | fc_data | 98.37 | 99.73 | 99.86 | 98.78 | 100 | 81.58 | -| Qwen-7b-chat | fc_data | 99.46 | 99.86 | 100 | 99.59 | 100 | 79.25 | -| Baichuan-7b-chat | fc_data | 97.96 | 99.32 | 100 | 98.64 | 100 | 89.53 | -| Internlm-chat-7b | fc_data | 94.29 | 95.78 | 100 | 98.5 | 100 | 88.19 | -| CodeLLaMa-7b | fc_data | 98.78 | 99.73 | 100 | 99.05 | 100 | 94.7 | -| CodeLLaMa-7b-16 | fc_data | 98.1 | 99.87 | 99.73 | 98.5 | 100 | 93.14 | -| CodeFuse-7b-4k | fc_data | 98.91 | 99.87 | 99.87 | 99.18 | 100 | 89.5 | - - -
    diff --git a/content/en/docs/overview/b3.codefuseDevopsModel.md b/content/en/docs/overview/b3.codefuseDevopsModel.md deleted file mode 100644 index 4c2d39e..0000000 --- a/content/en/docs/overview/b3.codefuseDevopsModel.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: codefuse-devops-model -slug: codefuse-devops-model -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-model" ---- - - -## codeFuse-devops-model -DevOps-Model is a large language model for the Chinese DevOps field jointly released by Ant Group and Peking University. By collecting professional data related to the DevOps domain and conducting additional training and alignment on the model, a large model has been produced to help engineers enhance efficiency throughout the entire development and operations lifecycle. This fills the current gap in large models within the DevOps domain, with the aim to provide solutions to any problems by asking DevOps-Model! -We have now open-sourced two versions of the model, the Base model with additional training and the Chat model after alignment, in both 7B and 14B specifications, as well as the corresponding training code. We welcome everyone to collaborate and contribute! - -## Project Address -GitHub Address: https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main -ModelScope Address: - -- DevOps-Model-7B-Base: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Base/summary -- DevOps-Model-7B-Chat: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Chat/summary -- DevOps-Model-14B-Base: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Base/summary -- DevOps-Model-14B-Chat: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Chat/summary - -## Evaluation Questions -For model evaluation, there was initially no benchmark for testing in the DevOps domain, so we first selected some domain-related multiple-choice questions from general open-source tests for evaluation. The specific test data is as follows: - -|Dataset |Subject |Total Questions| -| ---- | --------- | ----- | -|CMMLU |Computer science 204| -|Computer |security |171| -|Machine |learning |122| -|CEval |college programming| 37| -|CEval |computer_architecture| 21| -|CEval |computer_network |19| -|总计 |总计题目数 |574| - - -## Evaluation Methods -Since all are multiple-choice questions, we adopted the method of selecting the highest-scoring Token among the four option Tokens in the first Token produced by the model as the model's answer to the question. We also tested Zero-shot and Five-shot results. - - -## Evaluation Results -![](/images/devops_model/devops_eval.webp) - -The specific scores are shown in the table below: - -|Scale of Parameters |Model |Model Size |Zero-shot Score |Five-shot Score| -| - | ---- | --- | ---- | ---- | -|10+ B| DevOps-Model-14B-Base |14B |70.73 |73.00| -|10+ B|Qwen-14B-Base |14B |69.16| 71.25| -|10+ B|Baichuan2-13B-Base |13B |55.75| 61.15| -|10+ B|DevOps-Model-14B-Chat| 14B |74.04 |75.96| -|10+ B|Qwen-14B-Chat |14B |69.16| 70.03| -|10+ B|Baichuan2-13B-Chat |13B |52.79 |55.23| -|7B| DevOps-Model-7B-Base| 7B |62.72| 62.02| -|7B|Qwen-7B-Base| 7B| 55.75| 56.0| -|7B|Baichuan2-7B-Base| 7B |49.30| 55.4| -|7B|Internlm-7B-Base |7B |47.56 |52.6| -|7B|DevOps-Model-7B-Chat| 7B |62.20| 64.11| -|7B|Qwen-7B-Chat| 7B |46.00 |52.44| -|7B|Baichuan2-7B-Chat| 7B| 52.26| 54.46| -|7B|Internlm-7B-Chat |7B |52.61 |55.75| \ No newline at end of file diff --git a/content/en/docs/overview/b6.FasterTransformer4CodeFuse.md b/content/en/docs/overview/b6.FasterTransformer4CodeFuse.md deleted file mode 100644 index d26f074..0000000 --- a/content/en/docs/overview/b6.FasterTransformer4CodeFuse.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: FasterTransformer4CodeFuse -slug: FasterTransformer4CodeFuse -description: 介绍主要功能 -aliases: -- "/docs/fastertransformer4codefuse" ---- - -## FasterTransformer4CodeFuse -FasterTransformer4CodeFuse \ No newline at end of file diff --git a/content/en/muagent/connector/connector_memory.md b/content/en/muagent/connector/connector_memory.md deleted file mode 100644 index d3d5ec4..0000000 --- a/content/en/muagent/connector/connector_memory.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Connector Memory -slug: Connector Memory -url: "muagent/connector-memory" -aliases: -- "/muagent/connector-memory" ---- - -## Memory Manager -Primarily used for managing chat history, not yet completed -- Read and write chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval. -- Summarize key information from the chat history into a summary context, serving as a prompt context. -- Provide a search function to retrieve information related to the question from chat history or summary context, aiding in Q&A. - -## Usage Example -### Create memory manager instance -``` -import os -import openai -from coagent.base_configs.env_config import KB_ROOT_PATH -from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.schema import Message -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" - -# LLM and Embedding Model configurations -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -# -phase_name = "test" -memory_manager = LocalMemoryManager( - unique_name=phase_name, - do_init=True, - kb_root_path=KB_ROOT_PATH, - embed_config=embed_config, - llm_config=llm_config - ) -``` - -### Support for Message management -``` -message1 = Message( - role_name="test1", role_type="user", input_query="hello", origin_query="hello", - parsed_output_list=[{"input": "hello"}] -) -text = "hi! how can I help you?" -message2 = Message( - role_name="test2", role_type="assistant", input_query=text, origin_query=text, - role_content=text, step_content=text, parsed_output_list=[{"answer": text}] -) -text = "they say hello and hi to each other" -message3 = Message( - role_name="test3", role_type="summary", - role_content=text, step_content=text, - parsed_output_list=[{"summary": text}] - ) -``` - -### Support for memory retrieval -``` -# embedding retrieval test -text = "say hi, i want some help" -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "datetime")) -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "embedding")) -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "text")) -``` - -### Support for memory summarization -``` -# recursive_summary test -print(memory_manager.recursive_summary(local_memory_manager.recall_memory.messages, split_n=1)) -``` \ No newline at end of file diff --git a/content/en/muagent/overview/prompt-manager.md b/content/en/muagent/overview/prompt-manager.md deleted file mode 100644 index 5ec59d3..0000000 --- a/content/en/muagent/overview/prompt-manager.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Prompt Manager -slug: Prompt Manager -url: "coagent/prompt-manager" -aliases: -- "/coagent/prompt-manager" ---- - - -### 提示管理器(Prompt Manager) -管理多智能体链路中的prompt创建 -- 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 -- 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 - -### Prompt预设模板结构 - -- Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 -- Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 - - Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 - - Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 - - Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 -- Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 -- Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 - -### Prompt自定义配置 - -#### Prompt模块参数 -- field_name:唯一的字段名称标识,必须提供。 -- function:指定如何处理输入数据的函数,必须提供。 -- title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 -- description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 -- is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 -- omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 - -#### Prompt配置示例 - -Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 - -在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 - -context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 -``` -[ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": True}, - {"field_name": 'reference_documents', "function_name": 'handle_doc_info'}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'task_records', "function_name": 'handle_task_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'response', "function_name": 'handle_response', "title"="begin!!!", "is_context": False, "omit_if_empty": False} -] -``` - -### 未来规划 - -#### Prompt配置简化 - -未来的Prompt配置简化旨在降低用户面对复杂配置的难度。通过引入更直观的配置方法,我们计划使得Prompt配置不仅对高级用户友好,还能让初学者轻松上手。简化计划可能包括: - -- 预设配置短语:将复杂的配置字典转换为简洁的短语,每个短语都预定义了一个Prompt模块。用户将能够使用简单的字符串指令来快速配置Prompt,而无需深入了解所有参数。 -- 配置校验和建议:增加配置的即时校验,如果检测到配置错误或不一致性,自动提供修改建议,帮助用户优化Prompt结构。 - -#### 动作(Action)注册的改进计划 - -在现行系统中,智能体必须在其角色提示(role prompt)内定义所有的动作(actions)。这意味着智能体需要同时处理动作的意图识别和生成动作所需的输入数据,这一过程对语言模型的理解和推理能力提出了更高要求。 - -为了优化这一流程,我们打算在后续版本中对动作的输入生成和执行进行模块化。这将使智能体的工作重点转移至判断当前情境下应执行哪些动作,而不必负责具体的操作指令。在这种新的架构下,当需要执行某个动作时,将有专门的机制负责生成相应动作的具体输入指令。 - -这种分离将显著降低单个模块的复杂性,使得整个系统更加灵活、易于扩展,同时也提升了动作执行的效率和准确性。 diff --git a/content/zh/coagent/connector/connector_agent.md b/content/zh/coagent/connector/connector_agent.md deleted file mode 100644 index 0bc2e5b..0000000 --- a/content/zh/coagent/connector/connector_agent.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Connector Agent -slug: Connector Agent ZH -url: "coagent/connector-agent-zh" -aliases: -- "/coagent/connector-agent-zh" ---- - - -## 快速构建一个Agent -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.configs import AGETN_CONFIGS -from coagent.connector.agents import BaseAgent -from coagent.connector.schema import Message, load_role_configs - - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - - -- 配置相关 LLM 和 Embedding Model -``` -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - -- 这里从已有的agent配置选一个role来做示例 -``` -# 从已有的配置中选择一个config,具体参数细节见下面 -role_configs = load_role_configs(AGETN_CONFIGS) -agent_config = role_configs["general_planner"] -# 生成agent实例 -base_agent = BaseAgent( - role=agent_config.role, - prompt_config = agent_config.prompt_config, - prompt_manager_type=agent_config.prompt_manager_type, - chat_turn=agent_config.chat_turn, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, - embed_config=embed_config, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - ) -# round-1 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message = base_agent.step(query) -print(output_message.to_str_content(content_key="parsed_output_list")) -``` - -## Agent 参数配置 -``` -# 配置结构在这个目录 -from coagent.connector.schema import Role, PromptField -``` - - -### Agent Config -|Config Key Name| Type| Description| -| ------------------ | ---------- | ---------- | -|role| Role |角色描述| -|prompt_config |List[PromptField] |Enum:PromptManager 也可以继承以上几种Agent然后去构造相关的Agent| -|prompt_manager_type |String |Enum:PromptManager 也可以继承以上几种Agent然后去构造自定义的Enum:PromptManager| -|focus_agents |List[String] |metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name -|focus_message_keys |List[String]| 额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys| -|chat_turn |int |只针对ReactAgent有效| -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| - -### Role - -| Config Key Name | Type | Description | -|------------------|------|--------------------| -| role_type | str | 角色类型, Enum: system、user、assistant、function、observation、summary | -| role_name | str | 角色名称 | -| role_desc | str | 角色描述 | -| agent_type | str | 代理类型 | -| role_prompt | str | 角色提示 | -| template_prompt | str | 模板提示 | - - -### PromptField - -| Config Key Name | Type | Description | -|-----------------|------|-------------| -| field_name | str | | -| function_name | str | | -| title | str | | -| description | str | | -| is_context | bool | | -| omit_if_empty | bool | | \ No newline at end of file diff --git a/content/zh/coagent/connector/connector_chain.md b/content/zh/coagent/connector/connector_chain.md deleted file mode 100644 index 897100f..0000000 --- a/content/zh/coagent/connector/connector_chain.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Connector Chain -slug: Connector Chain ZH -url: "coagent/connector-chain-zh" -aliases: -- "/coagent/connector-chain-zh" ---- - -## 快速构建一个 agent chain -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -# 设置openai的api-key -import os, sys -import openai -import importlib - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xxxx" -openai.api_key = "sk-xxxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -- 配置相关 LLM 和 Embedding Model -``` -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - - -- 这里从已有的agent配置选多个role组合成 agent chain -``` -from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.configs import AGETN_CONFIGS -from coagent.connector.chains import BaseChain -from coagent.connector.schema import Message, load_role_configs - -# 构建 agent chain 链路 -role_configs = load_role_configs(AGETN_CONFIGS) -agent_config = role_configs["general_planner"] -role1 = role_configs["general_planner"] -role2 = role_configs["executor"] -agent_module = importlib.import_module("examples.connector.agents") -agents = [ - getattr(agent_module, role1.role.agent_type)( - role=role1.role, - prompt_config = role1.prompt_config, - prompt_manager_type=role1.prompt_manager_type, - chat_turn=role1.chat_turn, - focus_agents=role1.focus_agents, - focus_message_keys=role1.focus_message_keys, - llm_config=llm_config, - embed_config=embed_config, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - ), - getattr(agent_module, role2.role.agent_type)( - role=role2.role, - prompt_config = role2.prompt_config, - prompt_manager_type=role2.prompt_manager_type, - chat_turn=role2.chat_turn, - focus_agents=role2.focus_agents, - focus_message_keys=role2.focus_message_keys, - llm_config=llm_config, - embed_config=embed_config, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - ), - ] - -chain = BaseChain( - agents, - chat_turn=1, - jupyter_work_path=JUPYTER_WORK_PATH, - kb_root_path=KB_ROOT_PATH, - llm_config=llm_config, - embed_config=embed_config, - ) -``` - - -- 开始执行 -``` -# round-1 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = chain.step(query) -print(output_memory.to_str_messages(content_key="parsed_output_list")) - -``` - - -## Chain 参数配置 -|Config Key Name| Type |Description| -| ------------------ | ---------- | ---------- | -|agents| List[BaseAgent] | -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| diff --git a/content/zh/coagent/connector/connector_memory.md b/content/zh/coagent/connector/connector_memory.md deleted file mode 100644 index 75057d2..0000000 --- a/content/zh/coagent/connector/connector_memory.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Connector Memory -slug: Connector Memory ZH -url: "coagent/connector-memory-zh" -aliases: -- "/coagent/connector-memory-zh" ---- - - -## Memory Manager -主要用于 chat history 的管理,暂未完成 -- 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -- 对 chat history 进行关键信息总结 summary context,作为 prompt context -- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - -## 使用示例 - -### 创建 memory manager 实例 -``` -import os -import openai - -from coagent.base_configs.env_config import KB_ROOT_PATH -from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.schema import Message - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" - -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - -# -phase_name = "test" -memory_manager = LocalMemoryManager( - unique_name=phase_name, - do_init=True, - kb_root_path = KB_ROOT_PATH, - embed_config=embed_config, - llm_config=llm_config - ) -``` - -### 支持Message管理 - -``` -message1 = Message( - role_name="test1", role_type="user", input_query="hello", origin_query="hello", - parsed_output_list=[{"input": "hello"}] -) - -text = "hi! how can I help you?" -message2 = Message( - role_name="test2", role_type="assistant", input_query=text, origin_query=text, - role_content=text, step_content=text, parsed_output_list=[{"answer": text}] -) - -text = "they say hello and hi to each other" -message3 = Message( - role_name="test3", role_type="summary", - role_content=text, step_content=text, - parsed_output_list=[{"summary": text}] - ) - -``` - -### 支持 memory 检索 -``` -# embedding retrieval test -text = "say hi, i want some help" -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "datetime")) -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "embedding")) -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "text")) - -``` -### 支持 memory 总结 -``` -# recursive_summary test -print(memory_manager.recursive_summary(local_memory_manager.recall_memory.messages, split_n=1)) -``` \ No newline at end of file diff --git a/content/zh/coagent/connector/connector_phase.md b/content/zh/coagent/connector/connector_phase.md deleted file mode 100644 index ed26b5f..0000000 --- a/content/zh/coagent/connector/connector_phase.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Connector Phase -slug: Connector Phase ZH -url: "coagent/connector-phase-zh" -aliases: -- "/coagent/connector-phase-zh" ---- - - - -## 快速构建一个 agent phase -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.configs import AGETN_CONFIGS -from coagent.connector.phase import BasePhase -from coagent.connector.schema import Message, load_role_configs - - -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - - -- 配置相关 LLM 和 Embedding Model -``` -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - - -- 这里从已有的 phase 配置中选一个 phase 来做示例 -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "searchChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -query_content1 = "美国当前总统是谁?" -query = Message( - role_name="human", role_type="user", - role_content=query_content1, input_query=query_content1, origin_query=query_content1, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content2 = "美国上一任总统是谁,两个人有什么关系没?" -query = Message( - role_name="human", role_type="user", - role_content=query_content2, input_query=query_content2, origin_query=query_content2, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - - -## Phase 参数配置 -|Config Key Name |Type |Description| -| ------------------ | ---------- | ---------- | -|phase_name| String| 场景名称| -|phase_config|CompletePhaseConfig| 默认为None,可直接指定完整的phaseconfig, 暂未实现| -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| -| base_phase_config | Union[dict, str] | 默认配置:PHASE_CONFIGS,可通过实现对这个变量新增来实现自定义配置 | -| base_chain_config | Union[dict, str] | 默认配置:CHAIN_CONFIGS,可通过实现对这个变量新增来实现自定义配置 | -| base_role_config | Union[dict, str] | 默认配置:AGETN_CONFIGS,可通过实现对这个变量新增来实现自定义配置 | diff --git a/content/zh/coagent/connector/connector_prompt.md b/content/zh/coagent/connector/connector_prompt.md deleted file mode 100644 index 389d3c8..0000000 --- a/content/zh/coagent/connector/connector_prompt.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: Connector Prompt -slug: Connector Prompt ZH -url: "coagent/connector-prompt-zh" -aliases: -- "/coagent/connector-prompt-zh" ---- - -## Prompt 的标准结构 -在整个Prompt的整个结构中,我们需要去定义三个部分 -- Agent Profil -- Input Format -- Response Output Format - -``` -#### Agent Profile - -Agent Description ... - -#### Input Format - -**Origin Query:** the initial question or objective that the user wanted to achieve - -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved. - -#### Response Output Format -**Action Status:** finished or continued -If it's 'finished', the context can answer the origin query. -If it's 'continued', the context cant answer the origin query. - -**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step. -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion. -``` - - -其中,我们整合了部分 `Input Format` 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。如下所示 -只需要定义如下字段和执行函数, - -``` -AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False} -] -``` - -未来我们会也会进一步将 Agent Profile和Response Output Format的部分,实现可配置化操作,降低Prompt编写难度 - -### 自定义 Input Format -同时,我们也支持 用户自定义 Input Format 的操作 - -``` -from coagent.connector.prompt_manager import PromptManager - -# 增加了两个新处理函数,用于prompt组装 -class CodeRetrievalPM(PromptManager): - def handle_code_packages(self, **kwargs) -> str: - if 'previous_agent_message' not in kwargs: - return "" - previous_agent_message: Message = kwargs['previous_agent_message'] - # 由于两个agent共用了同一个manager,所以临时性处理 - vertices = previous_agent_message.customed_kargs.get("RelatedVerticesRetrivalRes", {}).get("vertices", []) - return ", ".join([str(v) for v in vertices]) - - def handle_retrieval_codes(self, **kwargs) -> str: - if 'previous_agent_message' not in kwargs: - return "" - previous_agent_message: Message = kwargs['previous_agent_message'] - return '\n'.join(previous_agent_message.customed_kargs["Retrieval_Codes"]) - - -# Design your personal PROMPT INPPUT FORMAT -CODE_RETRIEVAL_PROMPT_CONFIGS = [ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'reference_documents', "function_name": 'handle_doc_info'}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'retrieval_codes', "function_name": 'handle_retrieval_codes'}, - {"field_name": 'code_packages', "function_name": 'handle_code_packages'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False} - ] - -# 进行注册 -import importlib -prompt_manager_module = importlib.import_module("coagent.connector.prompt_manager") -setattr(prompt_manager_module, 'CodeRetrievalPM', CodeRetrievalPM) - -# 更新配置 -from coagent.connector.configs import AGETN_CONFIGS -AGETN_CONFIGS.update({ - "codeRetrievalJudger": { - "role": { - "role_prompt": codeRetrievalJudger_PROMPT, - "role_type": "assistant", - "role_name": "codeRetrievalJudger", - "role_desc": "", - "agent_type": "CodeRetrievalJudger" - # "agent_type": "BaseAgent" - }, - "prompt_config": CODE_RETRIEVAL_PROMPT_CONFIGS, - "prompt_manager_type": "CodeRetrievalPM", - "chat_turn": 1, - "focus_agents": [], - "focus_message_keys": [], - }, - }) -``` - - - -在我们构建phase、chain或者agent之后,可以通过函数的预打印功能,实现agents链路确认,避免在执行后才发现问题,可提前进行debug -``` -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -phase.pre_print(query) - -## 完整信息确认 coagent.connector.configs中进行确认 -########################## -<<<>>> -########################## - -### Agent Profile -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. -ATTENTION: response carefully referenced "Response Output Format" in format. - -### Tool Information - -### Agent Infomation - Please ensure your selection is one of the listed roles. Available roles for selection: - "role name: tool_react -role description: Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.," -"role name: code_react -role description: Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.," - Please ensure select the Role from agent names, such as tool_react, code_react - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Current Plan - -### Response Output Format -**Thoughts:** think the reason step by step about why you selecte one role -**Role:** Select the role from agent names. - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Role:** - - -########################### -<<<>>> -########################### -### Agent Profile -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools. -Please note that all the tools you can use are listed below. You can only choose from these tools for use. -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use. -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. - -### Tool Information - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Task Records - -### Response Output Format -**Thoughts:** According the previous observations, plan the approach for using the tool effectively. -... - -### Begin!!! - -################### -<<<>>> -################### -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -########################### -<<<>>> -########################### -### Agent Profile -When users need help with coding, your role is to provide precise and effective guidance. -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step. - -### Context Data - -#### Reference Documents - -#### Session Records - -### Response Output Format - -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem, -outline the plan for executing this step. - -**Action Status:** Set to 'stopped' or 'code_executing'. -If it's 'stopped', the action is to provide the final answer to the session records and executed steps. -If it's 'code_executing', the action is to write the code. -... - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -``` diff --git a/content/zh/coagent/connector/customed_examples.md b/content/zh/coagent/connector/customed_examples.md deleted file mode 100644 index 42ca740..0000000 --- a/content/zh/coagent/connector/customed_examples.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Customed Examples -slug: Customed Examples ZH -url: "coagent/customed-examples-zh" -aliases: -- "/coagent/customed-examples-zh" ---- - - -## 如何创建你个性化的 agent phase 场景 - -下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建 - -### 设计你的prompt结构 -``` -import os, sys, requests - -# from configs.model_config import * -from coagent.connector.phase import BasePhase -from coagent.connector.chains import BaseChain -from coagent.connector.schema import Message -from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS -import importlib - - -# update new agent configs -auto_feedback_from_code_execution_PROMPT = """#### Agent Profile - -You are a helpful AI assistant. Solve tasks using your coding and language skills. -In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. - 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. -Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. -When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. -If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. -When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. -Reply "stopped" in the end when everything is done. - -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. - -#### Response Output Format - -**Thoughts:** Based on the question and observations above, provide the plan for executing this step. - -**Action Status:** Set to 'stopped' or 'code_executing'. If it's 'stopped', the action is to provide the final answer to the original question. If it's 'code_executing', the action is to write the code. - -**Action:** -# Write your code here -import os -... - - -**Observation:** Check the results and effects of the executed code. - -... (Repeat this Thoughts/Action/Observation cycle as needed) - -**Thoughts:** I now know the final answer - -**Action Status:** stopped - -**Action:** The final answer to the original input question -""" -``` - -### 开始配置 Prompt Configs -``` -AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False} -] -``` - -### 更新完整的agent、chain、phase配置,以便后续更读取执行 -``` -from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS -import os - -## set a -AGETN_CONFIGS.update({ - "auto_feedback_from_code_execution": { - "role": { - "role_prompt": auto_feedback_from_code_execution_PROMPT, - "role_type": "assistant", - "role_name": "auto_feedback_from_code_execution", - "role_desc": "", - "agent_type": "ReactAgent" - }, - "prompt_config": AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS, - "chat_turn": 5, - "stop": "\n**Observation:**", - "focus_agents": [], - "focus_message_keys": [], - }, -}) -# update new chain configs -CHAIN_CONFIGS.update({ - "auto_feedback_from_code_executionChain": { - "chain_name": "auto_feedback_from_code_executionChain", - "chain_type": "BaseChain", - "agents": ["auto_feedback_from_code_execution"], - "chat_turn": 1, - "do_checker": False, - "chain_prompt": "" - } -}) - -# update phase configs -PHASE_CONFIGS.update({ - "auto_feedback_from_code_executionPhase": { - "phase_name": "auto_feedback_from_code_executionPhase", - "phase_type": "BasePhase", - "chains": ["auto_feedback_from_code_executionChain"], - "do_summary": False, - "do_search": False, - "do_doc_retrieval": False, - "do_code_retrieval": False, - "do_tool_retrieval": False, - "do_using_tool": False - }, -}) - -``` - - - -### 接下来就构建 phase 实例,开始执行 -``` -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig -from coagent.connector.phase import BasePhase -from coagent.connector.schema import Message -import base64, openai - -# -os.environ["API_BASE_URL"] = "http://openai.com/v1/chat/completions" -os.environ["OPENAI_API_KEY"] = "sk-xxxx" -openai.api_key = "sk-xxxx" - -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) - -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# -phase_name = "auto_feedback_from_code_executionPhase" -phase = BasePhase( - phase_name, - embed_config=embed_config, llm_config=llm_config, - base_phase_config = PHASE_CONFIGS, - base_chain_config = CHAIN_CONFIGS, - base_role_config = AGETN_CONFIGS, -) - - -# round-1 -query_content = """Plot a chart of META and TESLA's stock prices for the past year and save it as stock_price_ytd.png.""" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file diff --git a/content/zh/coagent/overview/agent-flow.md b/content/zh/coagent/overview/agent-flow.md deleted file mode 100644 index 63200d1..0000000 --- a/content/zh/coagent/overview/agent-flow.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Agent 编排 -slug: Agent 编排 -url: "coagent/agent-编排" -aliases: -- "/coagent/agent-编排" -- "/coagent/agent-flow-zh" ---- - - - -## 核心Connector介绍 -为了便于大家理解整个 CoAgent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 - -
    - 图片 -
    - - -
    下面,我们先介绍相关的核心组件
    - -### Agent -在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用 -1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出 - -
    - 图片 -
    - -2. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 -3. ReactAgent:提供标准React的功能,根据问题实现当前任务 -4. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. - -输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理 - -### Chain -基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理 - -### Phase -基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理 - -### Prompt Manager -Mutli-Agent链路中每一个agent的prompt创建 -- 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置 -- 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt - -### Memory Manager -主要用于 chat history 的管理,暂未完成 -- 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -- 对 chat history 进行关键信息总结 summary context,作为 prompt context -- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 diff --git a/content/zh/coagent/overview/multi-agent.md b/content/zh/coagent/overview/multi-agent.md deleted file mode 100644 index 8774cac..0000000 --- a/content/zh/coagent/overview/multi-agent.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: CoAgent 概览 -slug: CoAgent 概览 -url: "coagent/coagent-概览" -aliases: -- "/coagent/coagent-概览" -- "/coagent/multi-agent-zh" -- "/coagent/coagent-zh" ---- - -## 简介 - -为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 - -但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。 - -经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 - -因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 - -本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。 - -
    - 图片 -
    - -以下模块将从5个方面介绍Multi Agent框架所需要素: -- Agent Communication在Multi Agent框架中,确保Agent可以有效地进行信息交流对于管理上下文以及提高问答效率至关重要。 - a. 遵循简洁直观易于理解的链式对话原则,将Agent以线性方式排列串连成一个执行链路。 - b. 借鉴metaGPT中的Message Pool框架,允许Agent对Message Pool进行推送和订阅,使链路更加灵活。有利于精细化Prompt工程的场景,但难以把握复杂链路的关系分析。 -- Standard Operation Process(SOP):对LLM的生成结果进行标准化解析和处理。 - a. 定义Agent的 Input 和 Output 范围,能够组装和解析相关Action和Status,保证框架运行的稳定性 - b. 封装多种基础Action执行模块,如Tool Using、Planning、Coding、Direct Answering、final answer等SOP标识,以满足Agent的基本工作需求。 -- Plan and Executor:增加LLM的Tool使用、Agent调度、代码的生成。设置了几种基本链路,例如: - a. 单轮问答,也可以扩展到CoT、ToT、GoT等形式。 - b. ReAct,基础的响应决策过程,模型设置SOP 状态以终止循环 - c. TaskPlaning - Executor,任务完成即可结束 -- Long-short term memory Management:Multi-Agent与单Agent的关键区别在于,Multi-Agent需要处理大量的交流信息,类似人类团队协作的过程。增加一个专门负责内容总结(类似于会议助理)的Agent,对长期记忆进行总结并提更有效信息传递给下一位Agent,而非传递所有内容给下一位Agent。 -- Human-agent interaction:面对复杂场景时,需要人类介入Agent交互过程并提供反馈。通过上述 Long-short term memory Management 和 Agent Communication 过程,使LLM能准确理解人类的意图,从而更有效地完成任务。 - -总的来说,这五个要素共同构建了一个Multi Agent框架,确保Agent之间的协作更加紧密和高效,同时也能够适应更复杂的任务需求和更多样的交互场景。通过组合多个Agent链路来实现一个完整且复杂的项目上线场景(Dev Phase),如Demand Chain(CEO)、Product Arguement Chain(CPO、CFO、CTO)、Engineer Group Chain(Selector、Developer1~N)、QA Engineer Chain(Developer、Tester)、Deploy Chain(Developer、Deploer)。 - - -## 模块分类 -- [connector](/coagent/connector-zh) -- document_loaders -- embeddings -- llm_models -- orm -- sandbox -- service -- text_splitter -- tools -- utils diff --git a/content/zh/coagent/overview/prompt-manager.md b/content/zh/coagent/overview/prompt-manager.md deleted file mode 100644 index 72e6dfa..0000000 --- a/content/zh/coagent/overview/prompt-manager.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Prompt 管理器 -slug: Prompt 管理器 -url: "coagent/prompt-管理器" -aliases: -- "/coagent/prompt-管理器" -- "/coagent/prompt-manager-zh" ---- - - -### 提示管理器(Prompt Manager) -管理多智能体链路中的prompt创建 -- 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 -- 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 - -### Prompt预设模板结构 - -- Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 -- Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 - - Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 - - Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 - - Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 -- Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 -- Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 - -### Prompt自定义配置 - -#### Prompt模块参数 -- field_name:唯一的字段名称标识,必须提供。 -- function:指定如何处理输入数据的函数,必须提供。 -- title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 -- description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 -- is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 -- omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 - -#### Prompt配置示例 - -Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 - -在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 - -context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 -``` -[ - {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False}, - {"field_name": 'context_placeholder', "function_name": '', "is_context": True}, - {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": True}, - {"field_name": 'reference_documents', "function_name": 'handle_doc_info'}, - {"field_name": 'session_records', "function_name": 'handle_session_records'}, - {"field_name": 'task_records', "function_name": 'handle_task_records'}, - {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False}, - {"field_name": 'response', "function_name": 'handle_response', "title"="begin!!!", "is_context": False, "omit_if_empty": False} -] -``` - -### 未来规划 - -#### Prompt配置简化 - -未来的Prompt配置简化旨在降低用户面对复杂配置的难度。通过引入更直观的配置方法,我们计划使得Prompt配置不仅对高级用户友好,还能让初学者轻松上手。简化计划可能包括: - -- 预设配置短语:将复杂的配置字典转换为简洁的短语,每个短语都预定义了一个Prompt模块。用户将能够使用简单的字符串指令来快速配置Prompt,而无需深入了解所有参数。 -- 配置校验和建议:增加配置的即时校验,如果检测到配置错误或不一致性,自动提供修改建议,帮助用户优化Prompt结构。 - -#### 动作(Action)注册的改进计划 - -在现行系统中,智能体必须在其角色提示(role prompt)内定义所有的动作(actions)。这意味着智能体需要同时处理动作的意图识别和生成动作所需的输入数据,这一过程对语言模型的理解和推理能力提出了更高要求。 - -为了优化这一流程,我们打算在后续版本中对动作的输入生成和执行进行模块化。这将使智能体的工作重点转移至判断当前情境下应执行哪些动作,而不必负责具体的操作指令。在这种新的架构下,当需要执行某个动作时,将有专门的机制负责生成相应动作的具体输入指令。 - -这种分离将显著降低单个模块的复杂性,使得整个系统更加灵活、易于扩展,同时也提升了动作执行的效率和准确性。 diff --git a/content/zh/contribution/acknowledgements/d1.acknowledgements.md b/content/zh/contribution/acknowledgements/d1.acknowledgements.md deleted file mode 100644 index 2fdbd53..0000000 --- a/content/zh/contribution/acknowledgements/d1.acknowledgements.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: 致谢 -slug: 致谢 -description: 介绍主要功能 -url: "contribution/致谢" -aliases: -- "/contribution/致谢" -- "/contribution/zh-acknowledgements" ---- - -CodeFuse-ai 文档主页基于[docura](https://github.com/docura/docura)构建! - -ChatBot 项目基于[langchain-chatchat](https://github.com/chatchat-space/Langchain-Chatchat)和[codebox-api](https://github.com/shroominic/codebox-api)! - -...... - -在此深深感谢他们的开源贡献! diff --git a/content/zh/contribution/contribute/d1.contribution.md b/content/zh/contribution/contribute/d1.contribution.md deleted file mode 100644 index 4288299..0000000 --- a/content/zh/contribution/contribute/d1.contribution.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: 贡献指南 -slug: 贡献指南 -description: 介绍主要功能 -url: "contribution/贡献指南" -aliases: -- "/contribution/贡献指南" -- "/contribution/contribution-guide-zh" ---- - - -

    - 中文  |  English  -

    - - -非常感谢您对 Codefuse 项目感兴趣,我们非常欢迎您对 Codefuse 项目的各种建议、意见(包括批评)、评论和贡献。 - -您对 Codefuse 的各种建议、意见、评论可以直接通过 GitHub 的 Issues 提出。 - -参与 Codefuse 项目并为其作出贡献的方法有很多:代码实现、测试编写、流程工具改进、文档完善等等。任何贡献我们都会非常欢迎,并将您加入贡献者列表. - -进一步,有了足够的贡献后,您还可以有机会成为 Codefuse 的 Committer。 - -任何问题,您都可以联系我们得到及时解答,联系方式包括微信、Gitter(GitHub提供的即时聊天工具)、邮件等等。 - - -## 初次接触 -初次来到 Codefuse 社区,您可以: - -- 关注 Codefuse Github 代码库 -- 加入 Codefuse 相关的微信群 随时提问; -通过以上方式及时了解 Codefuse 项目的开发动态并为您关注的话题发表意见。 - - -## 贡献方式 -这份贡献指南并不仅仅关于编写代码。我们重视并感激在各个领域的帮助。以下是一些您可以贡献的方式 -- 文档 -- Issue -- PR - -### 改进文档 -文档是您了解 Codefuse 的最主要的方式,也是我们最需要帮助的地方! - -浏览文档,可以加深您对 Codefuse 的了解,也可以帮助您理解 Codefuse 的功能和技术细节,如果您发现文档有问题,请及时联系我们; - -如果您对改进文档的质量感兴趣,不论是修订一个页面的地址、更正一个链接、以及写一篇更优秀的入门文档,我们都非常欢迎! - -我们的文档大多数是使用 markdown 格式编写的,您可以直接通过在 GitHub 中的 docs/ 中修改并提交文档变更。如果提交代码变更,可以参阅 Pull Request。 - -### 如果发现了一个 Bug 或问题 -如果发现了一个 Bug 或问题,您可以直接通过 GitHub 的 Issues 提一个新的 Issue,我们会有人定期处理。详情见[Issue模板](/contribution/issuereport-zh) - -您也可以通过阅读分析代码自己修复(当然在这之前最好能和我们交流下,或许已经有人在修复同样的问题了),然后提交一个 Pull Request。 - -### 修改代码和提交PR(Pull Request) -您可以下载代码,编译安装,部署运行试一试(可以参考编译文档,看看是否与您预想的一样工作。如果有问题,您可以直接联系我们,提 Issue 或者通过阅读和分析源代码自己修复。详情见[如何提交pr](/contribution/pull-request-zh) - -无论是修复 Bug 还是增加 Feature,我们都非常欢迎。如果您希望给 Doris 提交代码,您需要从 GitHub 上 fork 代码库至您的项目空间下,为您提交的代码创建一个新的分支,添加源项目为upstream,并提交PR。 提交PR的方式可以参考文档 Pull Request。 - diff --git a/content/zh/contribution/contribute/d1.issue.md b/content/zh/contribution/contribute/d1.issue.md deleted file mode 100644 index 5c39633..0000000 --- a/content/zh/contribution/contribute/d1.issue.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 如何提交Issue -slug: 如何提交Issue -description: 介绍主要功能 -url: "contribution/如何提交issue" -aliases: -- "/contribution/如何提交issue" -- "/contribution/issue-report-zh" ---- - - -

    - 中文  |  English  -

    - - -## Issue Type -Issue分为三种类型 -- Bug: 代码或者执行示例存在bug或缺少依赖导致无法正确执行 -- Documentation:文档表述存在争议、文档内容与代码不一致等 -- Feature:在当前代码基础继续演进的新功能 - -## Issue Template -### Issue: Bug Template - -**提交Issue前的确认清单** -
    要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 -- 我搜索了Codefuse相关的所有文档。 -- 我使用GitHub搜索寻找了一个类似的问题,但没有找到。 -- 我为这个问题添加了一个非常描述性的标题。 - -**系统信息** -
    确认系统,如 mac -xx 、windwos-xx、linux-xx - -**代码版本** -
    确认代码版本或者分支,master、release等 - -**问题描述** -
    描述您碰到的问题,想要实现的事情、或代码执行Bug - -**代码示例** -
    附上你的执行代码和相关配置,以便能够快速介入进行复现 - -**报错信息、日志** -
    执行上述代码示例后的报错日志和相关信息 - -**相关依赖的模块** -
    以chatbot项目为例 -- connector -- codechat -- sandbox -- ... - - -### Issue: Documentation Template -**Issue with current documentation:** -
    请帮忙指出当前文档中的问题、错别字或者令人困惑的地方 - -**Idea or request for content** -
    您觉得合理的文档表述方式应该是什么样的 - - -### Issue: Feature Template -**提交Issue前的确认清单** -
    要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 -- 我搜索了Codefuse相关的所有文档。 -- 我使用GitHub Issue搜索寻找了一个类似的问题,但没有找到。 -- 我为这个问题添加了一个非常描述性的标题。 - -**功能描述** -
    描述这个功能作何用途 - -**相关示例** -
    提供参考的文档、仓库等信息,Please provide links to any relevant GitHub repos, papers, or other resources if relevant. - -**动机** -
    描述下这个feature的动机,为什么需要这个功能,提供足够的上下文信息帮助理解这个feature的诉求 - -**Contribution** -
    你如何参与到这个feature的构建(如果参与的话) \ No newline at end of file diff --git a/content/zh/contribution/contribute/d1.pr.md b/content/zh/contribution/contribute/d1.pr.md deleted file mode 100644 index 8daba5f..0000000 --- a/content/zh/contribution/contribute/d1.pr.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 如何提交PR -slug: 如何提交PR -description: 介绍主要功能 -url: "contribution/如何提交pr" -aliases: -- "/contribution/pull-request-zh" -- "/contribution/如何提交pr" ---- - - -

    - 中文  |  English  -

    - - -## Contribution - -### Pre-Checklist -- 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 -- 找到你想处理的GitHub问题。如果不存在,创建一个问题或草案PR,并请求维护者进行检查。 -- 检查相关的、相似的或重复的拉取请求。 -- 创建一个草案拉取请求。 -- 完成PR模板中的描述。 -- 链接任何被你的PR解决的GitHub问题。 - -### Description -PR的描述信息,用简洁的语言表达PR完成的事情,具体规范见[Commit 格式规范](#commit-格式规范) - -### Related Issue -`#xx` if has - -### Test Code with Result -请提供相关的测试代码如果有必要的话 - - - - -## Commit 格式规范 -Commit 分为“标题”和“内容”。原则上标题全部小写。内容首字母大写。 - - -### 标题 -commit message的标题:`[]() (#pr)` - - -### type 可选值 - -本次提交的类型,限定在以下类型(全小写) -- fix:bug修复 -- feature:新增功能 -- feature-wip:开发中的功能,比如某功能的部分代码。 -- improvement:原有功能的优化和改进 -- style:代码风格调整 -- typo:代码或文档勘误 -- refactor:代码重构(不涉及功能变动) -- performance/optimize:性能优化 -- test:单元测试的添加或修复 -- deps:第三方依赖库的修改 -- community:社区相关的修改,如修改 Github Issue 模板等。 - -几点说明: - -如在一次提交中出现多种类型,需增加多个类型。 -如代码重构带来了性能提升,可以同时添加 [refactor][optimize] -不得出现如上所列类型之外的其他类型。如有必要,需要将新增类型添加到这个文档中。 - -### scope 可选值 -本次提交涉及的模块范围。因为功能模块繁多,在此仅罗列部分,后续根据需求不断完善。 -
    以 chatbot的框架为例 -- connector -- codechat -- sandbox -- ... - -几点说明: - -尽量使用列表中已存在的选项。如需添加,请及时更新本文档。 - -### subject 内容 -标题需尽量清晰表明本次提交的主要内容。 - -例: -`[feature](coagent)<增加antflow兼容和增加coagent demo>` - -## 示例 -comming soon - - -## Reference -[doris-commit-format](https://doris.apache.org/zh-CN/community/how-to-contribute/commit-format-specification) \ No newline at end of file diff --git a/content/zh/docs/a1.overview.md b/content/zh/docs/a1.overview.md deleted file mode 100644 index a458373..0000000 --- a/content/zh/docs/a1.overview.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: 概览 -description: Learn more about the team maintaining Docura, how and why the project started, and how to get involved. -url: "docs/zh_overview" -aliases: -- "/docs/概览" ---- - -

    - -

    -
    - -[ **HuggingFace** ](https://huggingface.co/codefuse-ai)|[ **魔搭社区** ](https://modelscope.cn/organization/codefuse-ai) |[ **产品主页** ](https://codefuse.alipay.com/) -
    - -Hello World! This is CodeFuse! - -**CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs),涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。** - -

    - -

    - -我们非常有激情去构建创新的解决方案来支持全生命周期AI驱动的软件开发,如上图所示。同时,我们也诚邀志同道合的工程师和研究人员加入这个社区,共同构建和增强CodeFuse。 \ No newline at end of file diff --git a/content/zh/docs/b2.codefuseDevopsEval.md b/content/zh/docs/b2.codefuseDevopsEval.md deleted file mode 100644 index 65d6c1f..0000000 --- a/content/zh/docs/b2.codefuseDevopsEval.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-DevOps-Eval -slug: CodeFuse-DevOps-Eval-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-eval-zh" ---- - -## codefuse-devops-eval -codefuse-devops-eval \ No newline at end of file diff --git a/content/zh/docs/b2.xxx.md b/content/zh/docs/b2.xxx.md deleted file mode 100644 index 8f1a929..0000000 --- a/content/zh/docs/b2.xxx.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-DevOps -slug: CodeFuse-DevOps -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops" ---- - -## CodeFuse-DevOps -CodeFuse-DevOps \ No newline at end of file diff --git a/content/zh/docs/b3.codefuseDevopsModel.md b/content/zh/docs/b3.codefuseDevopsModel.md deleted file mode 100644 index 9d7c7af..0000000 --- a/content/zh/docs/b3.codefuseDevopsModel.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-DevOps-Model -slug: CodeFuse-DevOps-Model-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-model-zh" ---- - -## codeFuse-devops-model -codeFuse-devops-model \ No newline at end of file diff --git a/content/zh/docs/b4.MFTCoder.md b/content/zh/docs/b4.MFTCoder.md deleted file mode 100644 index 74db6db..0000000 --- a/content/zh/docs/b4.MFTCoder.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: MFTCoder -slug: MFTCoder-zh -description: 介绍主要功能 -aliases: -- "/docs/mftcoder-zh" ---- - -## MFTCoder -MFTCoder \ No newline at end of file diff --git a/content/zh/docs/b5.CodeFuseModelCache.md b/content/zh/docs/b5.CodeFuseModelCache.md deleted file mode 100644 index ce88595..0000000 --- a/content/zh/docs/b5.CodeFuseModelCache.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-ModelCache -slug: CodeFuse-ModelCache-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-modelcache-zh" ---- - -## CodeFuse-ModelCache -CodeFuse-ModelCache \ No newline at end of file diff --git a/content/zh/docs/b6.FasterTransformer4CodeFuse.md b/content/zh/docs/b6.FasterTransformer4CodeFuse.md deleted file mode 100644 index e108a66..0000000 --- a/content/zh/docs/b6.FasterTransformer4CodeFuse.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: FasterTransformer4CodeFuse -slug: FasterTransformer4CodeFuse-zh -description: 介绍主要功能 -aliases: -- "/docs/fastertransformer4codefuse-zh" ---- - -## FasterTransformer4CodeFuse -FasterTransformer4CodeFuse \ No newline at end of file diff --git a/content/zh/docs/b7.TestAgent.md b/content/zh/docs/b7.TestAgent.md deleted file mode 100644 index 1a5201e..0000000 --- a/content/zh/docs/b7.TestAgent.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Test-Agent -slug: Test-Agent-zh -description: 介绍主要功能 -aliases: -- "/docs/test-agent-zh" ---- - -## Test-Agent -Test-Agent \ No newline at end of file diff --git a/content/zh/docs/b8.CodeFuseQuery.md b/content/zh/docs/b8.CodeFuseQuery.md deleted file mode 100644 index 8181aad..0000000 --- a/content/zh/docs/b8.CodeFuseQuery.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CodeFuse-Query -slug: CodeFuse-Query-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-query-zh" ---- - -## CodeFuse-Query -CodeFuse-Query \ No newline at end of file diff --git a/content/zh/docs/chatbot/c1.quickstart.md b/content/zh/docs/chatbot/c1.quickstart.md deleted file mode 100644 index 4263889..0000000 --- a/content/zh/docs/chatbot/c1.quickstart.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 快速开始 -slug: 快速开始 -description: 介绍主要功能 -url: "docs/codefuse-chatbot-quickstart-zh" -aliases: -- "/docs/快速开始" -- "/docs/codefuse-chatbot-quickstart-zh" ---- - -

    - 中文  |  English  -

    - - -## 🚀 快速使用 - -如需使用私有化模型部署,请自行安装 nvidia 驱动程序,本项目已在 Python 3.9.18,CUDA 11.7 环境下,Windows、X86 架构的 macOS 系统中完成测试。 - -Docker安装、私有化LLM接入及相关启动问题见:[快速使用明细](/docs/start-detail-zh) - -### python 环境准备 - -- 推荐采用 conda 对 python 环境进行管理(可选) -```bash -# 准备 conda 环境 -conda create --name devopsgpt python=3.9 -conda activate devopsgpt -``` - -- 安装相关依赖 -```bash -cd codefuse-chatbot -pip install -r requirements.txt -``` - -### 基础配置 - -```bash -# 修改服务启动的基础配置 -cd configs -cp model_config.py.example model_config.py -cp server_config.py.example server_config.py - -# model_config#11~12 若需要使用openai接口,openai接口key -os.environ["OPENAI_API_KEY"] = "sk-xxx" -# 可自行替换自己需要的api_base_url -os.environ["API_BASE_URL"] = "https://api.openai.com/v1" - -# vi model_config#LLM_MODEL 你需要选择的语言模型 -LLM_MODEL = "gpt-3.5-turbo" -LLM_MODELs = ["gpt-3.5-turbo"] - -# vi model_config#EMBEDDING_MODEL 你需要选择的私有化向量模型 -EMBEDDING_ENGINE = 'model' -EMBEDDING_MODEL = "text2vec-base" - -# 向量模型接入示例,修改 model_config#embedding_model_dict -# 若模型地址为: -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese -# 配置如下 -"text2vec-base": "shibing624/text2vec-base-chinese" - -# vi server_config#8~14, 推荐采用容器启动服务,避免使用codeInterpreter功能时安装其它依赖导致环境冲突 -DOCKER_SERVICE = True -# 是否采用容器沙箱 -SANDBOX_DO_REMOTE = True -``` - -### 启动服务 - -默认只启动webui相关服务,未启动fastchat(可选)。 -```bash -# 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁 -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py -# examples/llm_api.py#258 修改为 kwargs={"gptq_wbits": 4}, - -# start llm-service(可选) -python examples/llm_api.py -``` -更多LLM接入方法见[详情...](/docs/fastchat-zh) -
    - -```bash -# 完成server_config.py配置后,可一键启动 -cd examples -python start.py -``` diff --git a/content/zh/docs/chatbot/roadmap.md b/content/zh/docs/chatbot/roadmap.md deleted file mode 100644 index 4b10632..0000000 --- a/content/zh/docs/chatbot/roadmap.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: ChatBot 技术路线 -slug: ChatBot 技术路线 -description: 介绍主要功能 -url: "docs/chatbot-技术路线" -aliases: -- "/docs/chatbot-技术路线" -- "/docs/chatbot-roadmap-zh" ---- - -

    - 中文  |  English  -

    - - -## RoadMap - -
    - 图片 -
    -
    - - -完整路线 -- [x] Sandbox 环境 ✅ - - [x] 环境隔离的sandbox环境与代码执行 ✅ - - [x] 上传、下载文件 ✅ - - [ ] 支持java执行环境 -- [ ] Vector Database & Retrieval - - [x] task retrieval ✅ - - [x] tool retrieval ✅ -- [x] Prompt Management ✅ -- [x] memory Management ✅ -- [x] Multi Agent ✅ - - [ ] PRD需求文档、系分、接口设计 ⬜ - - [ ] 根据需求文档、系分、接口设计生产代码 ⬜ - - [ ] 自动测试、自动debugger ⬜ - - [ ] 运维流程接入(ToolLearning)⬜ - - [ ] 全流程自动 ⬜ -- [x] 基于fastchat接入LLM ✅ -- [x] 基于sentencebert接入Text Embedding ✅ - - [x] 向量加载速度提升 ✅ -- [x] Connector ✅ - - [x] 基于langchain的react模式 ✅ - - [x] 基于langchain完成tool检索 ✅ -- [x] Web Crawl 通用能力 ✅ - - [x] 技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ - - [ ] issue document ⬜ - - [ ] SDK Library Document ⬜ - - -

    - -- v0.0 -- [x] Sandbox 环境 ✅ - - [x] 环境隔离的sandbox环境与代码执行 ✅ -- [x] 基于fastchat接入LLM ✅ -- [x] 基于sentencebert接入Text Embedding ✅ -- [x] Web Crawl 通用能力:技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ -
    -- v0.1 -- [x] Sandbox 环境: 上传、下载文件 ✅ -- [x] Vector Database & Retrieval ✅ - - [x] task retrieval ✅ - - [x] tool retrieval ✅ -- [x] Connector ✅ - - [x] 基于langchain的react模式 ✅ -- [x] 基于sentencebert接入Text Embedding: 向量加载速度提升 ✅ - -Done -
    - -- v0.2 -- [x] Prompt Management ✅ -- [x] memory Management ✅ -- [x] Vector Database & Retrieval ✅ - -DDL: 2024.01.31 -
    - -- v0.3 -- [x] Sandbox 环境 ✅ - - [ ] 支持java执行环境 ⬜ -- [x] Multi Agent Framework ✅ - - [ ] PRD需求文档、系分、接口设计 ⬜ - - [ ] 根据需求文档、系分、接口设计生产代码 ⬜ - - [ ] 自动测试、自动debugger ⬜ - - [ ] 运维流程接入(ToolLearning) ⬜ - - [ ] 全流程自动 ⬜ -- [x] Web Crawl 通用能力 ✅ - - [ ] issue document ⬜ - - [ ] SDK Library Document ⬜ - -DDL: 2024.12.31 -
    diff --git a/content/zh/docs/codefuse-modelcache/3_config.md b/content/zh/docs/codefuse-modelcache/3_config.md deleted file mode 100644 index 78d004a..0000000 --- a/content/zh/docs/codefuse-modelcache/3_config.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: 最佳配置 -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-config-zh" -aliases: -- "/docs/codefuse-modelcache-config-zh" ---- - -## 环境依赖 -- python版本: 3.8及以上 -- 依赖包安装: - ```pip install requirements.txt ``` - -## 服务启动 -- 在启动服务前,应该进行如下环境配置: -- 安装关系数据库 mysql, 导入sql创建数据表,sql文件: reference_doc/create_table.sql -- 安装向量数据库milvus -- 在配置文件中添加数据库访问信息,配置文件为: - - modelcache/config/milvus_config.ini - - modelcache/config/mysql_config.ini -- 离线模型bin文件下载, 参考地址:https://huggingface.co/shibing624/text2vec-base-chinese/tree/main,并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 -- 通过flask4modelcache.py脚本启动后端服务。 - diff --git a/content/zh/docs/codefuse-modelcache/4_release_note.md b/content/zh/docs/codefuse-modelcache/4_release_note.md deleted file mode 100644 index 02b7784..0000000 --- a/content/zh/docs/codefuse-modelcache/4_release_note.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: 最佳配置 -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-release-zh" -aliases: -- "/docs/codefuse-modelcache-release-zh" ---- - - - -| 时间 |功能 |版本号| -| ----- | ------ | ----- | -| 20230430| 完成GPTCache调研,开源流程在OpenAI接口上跑通,单节点形式 |无| -| 20230509| 1、完成技术选型及上下游交互方案
    2、重新开发数据库模块,替换SQLalchemy框架
    3、重构llm_handler模块,兼容codegpt,适配codegpt模型参数| V0.1.0| -| 20230519| 1、根据环境动态选择codegpt服务模式
    2、模型本地加载能力,以及预加载能力
    3、增加本地路径依据环境动态加载能力| V0.1.1| -| 20230522| 1、架构优化,调整为类redis结构,解藕大模型调用
    2、关系数据库由sqlite切换至OceanBase
    3、向量数据库由faiss切换至milvus
    4、模型数据隔离能力
    5、增加核心模块adapter_query、adapter_insert |V0.2.0| -| 20230531| 1、线上环境上线,动态感知能力
    2、embedding模型评测及选型
    3、增加预发环境及数据隔离能力
    4、增加原始query字段透出能力| V0.2.1| -| 20230607| 1、优化关系数据库访问性能
    2、优化环境和模型隔离能力| V0.2.2| -| 20230630| 1、在modelCache中增加大模型embedding层适配模块
    2、增加采纳率统计能力 |V0.2.3| -| 20230730| 1、增加缓存统计功能
    2、增加数据删除功能接口
    3、缓存一键清空能力上线
    4、多轮会话能力研发,支持system指令和多轮对话| v0.3.0| -| 20230830| 1、增加异步处理能力,性能提升超20%
    2、架构变更,解藕embedding推理和业务处理逻辑
    3、黑名单过滤功能 |V0.3.1| \ No newline at end of file diff --git a/content/zh/docs/codefuse-query/1_abstract.md b/content/zh/docs/codefuse-query/1_abstract.md deleted file mode 100644 index 8a82567..0000000 --- a/content/zh/docs/codefuse-query/1_abstract.md +++ /dev/null @@ -1,17 +0,0 @@ -# 引言 -随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。 - -这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 -在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 -此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。 - -在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。 - -CodeQuery 的特点和优势可以概括为以下几点: - -- **高度可扩展**:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。 -- **以数据为中心**:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。 -- **高度集成**:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。 -- **支持多元化的需求**:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。 - -CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。 diff --git a/content/zh/docs/codefuse-query/2_introduction.md b/content/zh/docs/codefuse-query/2_introduction.md deleted file mode 100644 index 9362795..0000000 --- a/content/zh/docs/codefuse-query/2_introduction.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: CodeFuse-Query 介绍 -slug: CodeFuse-Query -description: 介绍主要功能 -url: docs/codefuse-query-introduction-zh -aliases: -- "/docs/codefuse-query-introduction-zh" ---- - - -# 概述 -CodeFuse-Query 是一个支持对 **各种编程语言** 进行 **结构化分析** 的 **代码数据平台**。核心思想是利用各种语言解析器将所有代码转化为数据,并将其结构化存储到代码数据库中。通过使用自定义查询语言,按照业务需求进行数据分析。如下图所示: -![image.png](/images/codefuse-query/introduction01.png) - -## 2.1 CodeFuse-Query的架构 -从整体上来说,CodeFuse-Query代码数据平台分为三大部分:代码数据模型、代码查询DSL、平台产品化服务。主要工作流程如下图所示: -### ![image.png](/images/codefuse-query/introduction02.png) - -### 代码数据化和标准化:COREF -我们定义了一种代码数据化和标准化的模型:COREF,要求所有代码都要能通过各种语言抽取器转化到该模型。 -COREF主要包含以下几种信息: -**COREF** = AST (抽象语法树) + ASG(抽象语义图) + CFG(控制流图) + PDG(程序依赖图)+ Call Graph(函数调用图) + Class Hierarchy (类继承关系)+ Documentation(文档/注释信息) -注:由于每种信息的计算难度不一,所以并不是所有语言的COREF信息均包含以上全部信息,基础信息主要有AST、ASG、Call Graph、Class Hierarchy和Documentation,其他信息( CFG 和 PDG )仍在建设中,后续会逐步支持。 -### 代码查询DSL -基于生成的COREF代码数据,CodeFuse-Query 使用一种自定义的DSL语言 **Gödel** 来进行查询,从而完成代码分析需求。 -Gödel是一种逻辑推理语言,它的底层实现是基于逻辑推理语言Datalog,通过描述“事实”和“规则”, 程序可以不断地推导出新的事实。Gödel也是一个声明式语言,相较于命令式编程,声明式编程更加着重描述“要什么”,而把如何实现交给计算引擎。 -既然代码已经转化为关系型数据(COREF数据以关系型数据表的形式存储),相信大家会有疑问,为什么不直接用SQL,或者是直接使用SDK,而是又要专门去学习一个新的DSL语言呢?因为Datalog的计算具备单调性和终止性,简单理解就是,Datalog是在牺牲了表达能力的前提下获得了更高的性能,而Gödel继承了这个特点。 - -- 相比较SDK,Gödel的主要优点是易学易用,声明式的描述,用户不需要关注中间的运算过程,只需要像SQL一样简单描述清楚需求即可。 -- 相比较SQL,Gödel的优点主要是描述能力更强、计算速度更快,例如描述递归算法和多表联合查询,而这些对于SQL来说都是比较困难的。 -### 平台化、产品化 -CodeFuse-Query 包括**Sparrow CLI **和CodeFuse-Query**在线服务Query中心**。Sparrow CLI包含了所有组件和依赖,例如抽取器,数据模型,编译器等,用户完全可以通过使用Sparrow CLI在本地进行代码数据生成和查询(Sparrow CLI的使用方式请见 第3节 安装、配置、运行)。如果用户有在线查询的需求,可以使用Query中心进行实验。 -## 2.2 CodeFuse-Query支持的分析语言 -截至2023-10-31为止,CodeFuse-Query支持对11种编程语言进行数据分析。其中对5种编程语言( Java、JavaScript、TypeScript、XML、Go )的支持度非常成熟,对剩余6种编程语言(Object-C、C++、Python3、Swift、SQL、Properties )的支持度处于beta阶段,还有进一步提升和完善的空间,具体的支持情况见下表: - -| 语言 | 状态 | COREF模型节点数 | -| --- | --- | --- | -| Java | 成熟 | 162 | -| XML | 成熟 | 12 | -| TS/JS | 成熟 | 392 | -| Go | 成熟 | 40 | -| OC/C++ | beta | 53/397 | -| Python3 | beta | 93 | -| Swift | beta | 248 | -| SQL | beta | 750 | -| Properties | beta | 9 | - -注:以上语言状态的成熟程度判断标准是根据COREF包含的信息种类和实际落地情况来进行判定,除了OC/C++外,所有语言均支持了完整的AST信息和Documentation信息,以Java为例,COREF for Java还支持了ASG、Call Graph、Class Hierarchy、以及部分CFG信息。 -## 2.3 CodeFuse-Query的使用场景 -### 查询代码特征 -小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Godel 如下,交给 CodeFuse-Query 系统给他返回了结果。 -```rust -// script -use coref::java::* - -fn out(var: string) -> bool { - for(v in Variable(JavaDB::load("coref_java_src.db"))) { - if (v.getType().getName() = "String" && var = v.getName()) { - return true - } - } -} - -fn main() { - output(out()) -} -``` -类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。 - -### 输出静态分析能力 -小安全是 XX 团队的安全同学,他做了**一套系统**交叉验证日志数据和代码数据是否一致。为了完成某个分析任务,他计划通过写 Godel 查询出来静态数据 D1,合并动态数据 D2,联合分析得出结论 C。小安全通过在 CodeFuse-Query 上面编写 Godel Query 测试技术上可行之后,使用 CodeFuse-Query 提供的标准 API 将系统对接了起来。 -类似需求:通过静态分析进行系统的卡点,提高测试的效率,通过分析出来的数据合并成说明文档。 -### 代码规则检查器 -小 TL 同学发现团队总是写出很多类似的 Bug A,**他想针对 Bug A 制定一个代码规则和其检查器**,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 -类似需求:编写静态缺陷扫描规则进行代码风险拦截。 -### 分析代码特性 -研发部同学小框架想知道目前代码仓库中Spring工程和Spring Boot工程比例。 好量化新框架的推广情况。小架构通过编写 Godel Query 描述不同项目分析特征,**然后一次性 Query 了 11 万个代码仓库**,过了几十分钟后就拿到了所有代码的数据,开开心心做 KPI 去了。 -类似需求:应用画像,代码画像,架构分析。 -### 获取统计数据 -小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Godel 实现出来以后,**发现不怎么优化就已经性能非常高了**,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,**不知道方便了多少。** -类似需求:代码统计,代码度量,算法设计,学术研究。 -### 架构分析 -小架构同学最近推行了一种新的基于 txt 文件的消息中间件,目前已有的分析平台都不能支持分析此类系统的上下游依赖。小架构通过 Godel**快速建模了该消息格式**,并马上获取到了目前系统中不同组件的依赖关系。 -类似需求:系统 Overview,架构治理,血缘分析。 -### 模型验证 -小促销设计的系统里面要求用户一定是先玩游戏再领券。他通过 Godel 描述了**该模型的验证逻辑**,然后通过 CodeFuse-Query 系统**保障当前以及未来系统的代码实现**,都是完全符合该模型的。从此再不担心游戏出资损~ -类似需求:系统验证,网络验证,权限验证 -## 2.4 CodeFuse-Query的应用领域 -目前,CodeFuse-Query在蚂蚁集团已经支持 **CodeFuse大语言模型数据清洗**、**代码度量评估**、**研发风险控制**、**隐私安全分析**、**代码智能**、**终端包大小治理 **等多个场景的落地应用,服务月均调用量超过百万。 -![image.png](/images/codefuse-query/introduction03.png) - -### 高质量代码数据清洗 - CodeFuse代码大模型 -CodeFuse代码大模型是蚂蚁集团对外开源的处理代码相关问题的模型,对于CodeFuse大语言模型而言,训练的数据质量直接影响模型的推理结果。低质量的代码数据会直接污染语言模型的输出,例如:模型可能会学习到错误的代码模式,从而生成错误的代码;数据中只包含某种编程语言的代码,模型可能无法很好地适应其他编程语言的代码。 -为了把控进入模型的代码数据质量,进而提升模型的推理能力。我们基于蚂蚁程序分析团队多年的实践积累结合业界共识,梳理了高质量代码的定义方式,并利用已有程序分析技术实现了自动化、大规模的代码数据清洗。 -CodeFuse-Query为CodeFuse代码大模型提供了以下数据清洗能力: - -- 高质量代码数据清洗:对代码数据进行清洗,包括对 Python,Java,JavaScript,TypeScript,Go,C,C++ 7 种语言进行漏洞扫描,对语言种类 / star 数进行筛选,过滤有效代码行数为 0 的数据等。目前已沉淀清洗后的 GitHub 和蚂蚁内部代码数据总共约 **2TB**。 -- 代码画像:实现对大规模代码进行高性能多维度的自动标注,支持 Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go 等 **10** 种语言,**77** 种通用标签,**40** 种蚂蚁特有标签,共 **117** 种标签。目前自动标注性能能够达到 **40MB/s**。 -- 其他原子能力 - - 高级代码特征提取,包括提取 AST(抽象语法树),DFG(数据流图)数据等。目前 AST 信息已用于 SFT 训练,准确率 97% 左右。 - - 代码片段识别,用于针对文本数据中的代码进行提取,方便进行代码格式化或加上 Markdown 格式: - - 文本提取代码:从文本中提取代码块信息,支持主流语言的解析,函数及类定义,仅验证二分类问题,就是说仅验证文本是否含有代码块准确率 83% 左右。 - - 识别代码片段的编程语言种类:识别任意代码片段的编程语言种类,支持 30+ 种语言,准确率80%左右。 - - 代码注释对提取:支持提取方法级别的注释-代码对信息,覆盖 **15 种** GitHub 最流行的语言,用于 Text To Code/Code To Text 的 SFT 训练。 -### 代码数据指标 - 广目 -广目是蚂蚁内部一款面向不同职能的研发同学和团队管理者,对代码力进行评估、展示客观数据和分析结果的数据产品。 -广目提供了个人代码力评估报告、日常代码力指标数据分析、团队代码力管理、代码评优荣誉展示等功能,旨在帮助蚂蚁研发工程师不断提升代码品质、减少代码负债,更长远的提升研发效能。 -CodeFuse-Query为广目提供的能力分为两部分: - -- 代码评估指标:代码复杂度、代码注释率、标准开发量等 -- 代码评优指标:代码复用度 -### 变更分析-优酷服务端研发效能 -优酷质量保障团队从2023年开始针对服务端精准测试的探索,经过半年的技术沉淀和体系搭建,形成了具备**变更内容识别、变更影响分析、测试能力推荐、测试覆盖评估**的精准测试体系。 -在此过程中,CodeFuse-Query能提供的能力主要有: - -- 根据代码变更内容(文件+行号),分析出影响的对象:方法、入口(http入口、hsf入口)、调用链路(从入口到变更方法的所有调用链路)、数据库操作(表、操作类型) -- 结合线上动态调用链路(方法链路)、CodeFuse-Query静态分析调用链路的影响面精准分析能力,提升变更分析影响面的有效性、准备率 - -到目前为止,优酷已通过CodeFuse-Query接入所有核心应用,并基于静态分析采集数据,构建了服务端完整的代码知识库和流量知识库。 - diff --git a/content/zh/docs/codefuse-query/5_toolchain.md b/content/zh/docs/codefuse-query/5_toolchain.md deleted file mode 100644 index b041d4e..0000000 --- a/content/zh/docs/codefuse-query/5_toolchain.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: VSCode插件 -slug: VSCode插件 -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-toolchain-zh -aliases: -- "/docs/codefuse-query-toolchain-zh" ---- - - -# 开发插件(VSCode) -## 安装 -### 从VSCode官方插件市场安装(推荐) -[插件地址](https://marketplace.visualstudio.com/items?itemName=CodeFuse-Query.codefuse-query-extension) -### 使用VSIX安装包安装 -1. 下载插件 -2. 手动从 vsix 安装: -![image.png](/images/codefuse-query/toolchain01.png) -3. 或者使用指令直接从终端安装: -```bash -code --install-extension [扩展vsix文件路径] -``` -## 环境准备 - -- Sparrow CLI ,参照 3 安装、配置、运行 -## 扩展特性 -本扩展提供了以下功能模块: - -- COREF AST Viewer -- Gödel Language Server -- Gödel Language Runner -### COREF AST Viewer -以下功能需要在扩展设置中设置相关项后启用。目前仅支持于Java语言 -#### Java 文件转成树状的 COREF Node -![](/images/codefuse-query/toolchain02.gif) -#### Node 与代码位置的相互定位 -![](/images/codefuse-query/toolchain03.gif) -#### 在Lib API Viewer 查看 Node 的API,Node 复制 -![](/images/codefuse-query/toolchain04.gif) -#### Lib API Viewer:查询与复制使用 -![](/images/codefuse-query/toolchain05.gif) -### Gödel Language Server Features -以下功能均需要在设置扩展后启用。不设置相关项的情况下,语法高亮仍然可用。 -#### 错误信息提示 -错误信息会随着代码的更新而自动更新。 -![](/images/codefuse-query/toolchain06.gif) -#### 符号信息提示和补全 -包含local变量和全局符号信息的补全提示,关键字等信息会提供对应的使用样例,全局符号信息会提供更详细的内部信息,如包含的成员变量、成员方法、静态方法。 - -![](/images/codefuse-query/toolchain07.gif) - -- 关键字补全和使用样例提示 -- local 变量类型信息和符号补全 -- `.` 跟随的符号信息和补全 -- `::` 跟随的符号信息和补全 -- 注解使用样例提示 -- 全局符号类型信息 (内部结构,成员方法,静态方法) -#### 跳转到定义 -可以通过右键跳转定义或者`ctrl`/`command`+`left click`直接跳转到准确的符号定义位置。 - -![](/images/codefuse-query/toolchain08.gif) -#### 代码片段 (Snippets) -扩展提供了一些代码片段补齐以供快速编写 Gödel 1.0/script 代码。 - -![](/images/codefuse-query/toolchain09.gif) -### GödelScript Runner -需要在扩展中设置 sparrow cli 路径后使用。运行脚本之前需要先加载数据库。关于如何生成数据库 参考 3.4.章节 运行 中的数据抽取部分。 -#### 运行脚本 -![panel.gif](/images/codefuse-query/toolchain10.gif) -提供了四种不同的脚本运行按钮: -1. 在要运行的脚本处右键执行。 -2. 在 extension `GodelScript Runner` 面板上选择 `Run GödelScript`。 -3. 在 extension `GodelScript Runner Setting` 面板上选择 `Run`。 -4. 在 extension `GodelScript Runner Setting` 面板右上角点击运行按钮。 -#### 数据库文件夹加载 -1. 在要运行的脚本处右键选择包含数据库的文件夹进行加载。 -2. 在 extension `GodelScript Runner` 面板上选择 `Load Database Directory`。 -3. 在 extension `GodelScript Runner Setting` 面板上选择 `Database`。 -4. 在 extension `GodelScript Runner Setting` 面板右上角点击数据库加载按钮。 -## 扩展设置 -### COREF AST Viewer 设置 - -- `corefASTViewer.sparrowCliRoot` - - 指定 Sparrow CLI 的根目录,参照第3章节的安装部分 -### Gödel Language Server 设置 -扩展启动时,以下两项中存在任意一项未被设置,则会弹出提示。点击`configure`按钮会跳转至相应配置页面。 - -- `godelScript.executablePath` - - 用于指定 GödelScript 的可执行文件路径,默认为空。需要时请替换为实际的 GödelScript 可执行文件的绝对路径。 - - 如果已经下载 Sparrow CLI ,则 GödelScript 可执行文件为 `[sparrow cli root]/godel-script/usr/bin/godel`。 -- `godelScript.libraryDirectoryPath` - - 用于指定 GödelScript 的库文件夹路径,默认为空。需要时请替换为 GödelScript 库文件夹绝对路径。 - - 如果已经下载 Sparrow CLI ,则库文件夹路径为 `[sparrow cli root]/lib-1.0`。 - -# 智能助手 - -待开放,尽情期待! diff --git a/content/zh/docs/devops_eval/tool_learning_info_zh.md b/content/zh/docs/devops_eval/tool_learning_info_zh.md deleted file mode 100644 index d3db092..0000000 --- a/content/zh/docs/devops_eval/tool_learning_info_zh.md +++ /dev/null @@ -1,87 +0,0 @@ -### 数据样例 -在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下: - -**Function Call的数据格式** - -| Input Key | Input Type | Input Description | -| --- | --- | --- | -| functions | List[Swagger] | 工具集合 | -| chatrounds | List[chatround] | 多轮对话数据 | - -**chatrounds的数据格式** - -| Input Key | Input Type | Input Description | -| --- | --- | --- | -| role | string | 角色名称,包含三种类别,user、assistant、function | -| name | string | 若role为function,则存在name字段,为function的名称 | -| content | string | role的返回内容 | -| function_call | dict | 工具调用 | - -``` -{ - "functions": - [ - { - "name": "get_fudan_university_scoreline", - "description": "查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线", - "parameters": - { - "type": "object", - "properties": - { - "year": - { - "type": "string", - "description": "年份,例如:2020,2019,2018" - } - }, - "required": - [ - "year" - ] - } - } - ], - "chatrounds": - [ - { - "role": "system", - "content": "CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。" - }, - { - "role": "user", - "content": "查询2020年复旦大学的分数线" - }, - { - "role": "assistant", - "content": null, - "function_call": - { - "name": "get_fudan_university_scoreline", - "arguments": "{\n \"year\": \"2020\"\n}" - } - }, - { - "role": "function", - "name": "get_fudan_university_scoreline", - "content": "{\n \"scoreline\":{\n \"文科一批\": 630, \n \"文科二批\": 610, \n \"理科一批\": 650, \n \"理科二批\": 630 \n }\n}" - }, - { - "role": "assistant", - "content": "2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分" - } - ] -} -``` - -上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 - - -### 评测指标 -由于一般通用模型无法具备工具调用的能力,因此在进行Tool Learn-Eval评测之前需要对通用模型进行微调,先让模型学会工具使用的基本范式 - -下面,我们定义了几种评估工具使用的指标: - - - -②③④⑤的和为1,代表工具调用失败的总数,⑤工具幻觉是工具名识别失败的一种特殊情况 \ No newline at end of file diff --git a/content/zh/docs/mftcoder/1_introduction.md b/content/zh/docs/mftcoder/1_introduction.md deleted file mode 100644 index 625a21b..0000000 --- a/content/zh/docs/mftcoder/1_introduction.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: MFTCoder 介绍 -slug: MFTCoder 介绍 -description: MFTCoder 介绍 -url: /docs/mftcoder-introduction-zh -aliases: -- "/docs/mftcoder-introduction-zh" ---- - -## 项目简介 -**国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架;** - -**Codefuse-MFTCoder** 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。 - -### 项目框架 -![img_1.jpg](/images/mftcoder/img_1.jpg) - -### 项目优势 -:white_check_mark: **多任务**:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去; - -:white_check_mark: **多模型**:支持最新的多个开源模型,包括gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2等; - -:white_check_mark: **多框架**:既支持主流开源的Accelerate+DeepSpeed/FSDP,也支持新开源的[ATorch 框架](https://github.com/intelligent-machine-learning/dlrover); - -:white_check_mark: **高效微调**:支持LoRA和QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景; - - -本项目主要内容如下: -- 同时支持单任务SFT(Supervised FineTuning)和MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等 -- 支持QLoRA低成本高效指令微调、LoRA高效指令微调、全量参数高精度微调。 -- 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA等。 -- 支持lora与base model进行权重合并,推理更便捷。 -- 整理并开源2个指令微调数据集:[Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k)和[CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k)。 -- 开源多个[Codefuse系列指令微调模型权重],具体参见我们的huggingface组织和modelscope组织下的模型:[codefuse-ai huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai 魔搭](https://modelscope.cn/organization/codefuse-ai)。 \ No newline at end of file diff --git a/content/zh/docs/mftcoder/2_quickstart.md b/content/zh/docs/mftcoder/2_quickstart.md deleted file mode 100644 index 32cc86c..0000000 --- a/content/zh/docs/mftcoder/2_quickstart.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: QuickStart -slug: QuickStart -description: QuickStart Document -url: /docs/mftcoder-quickstart-zh -aliases: -- "/docs/mftcoder-quickstart-zh" ---- - - -## 环境 -首先, 你需要将CUDA(>=11.4, 推荐11.7)及其相关驱动安装成功,并确保其工作正常, 并且安装基本的torch(>=2.0.0) -在requirements.txt下固定了几个主要的python包的版本,执行如下脚本即可: -```bash -sh init_env.sh -``` -我们强烈建议您安装flash attention(>=2.1.0, 推荐2.3.6), 安装请参考 https://github.com/Dao-AILab/flash-attention - -## 训练 -如果你熟悉大模型训练的各种主流开源资源,例如 ```transformers```, ```DeepSpeed```, ```FSDP```等, 为了用开源项目快速上手高性能微调,我们建议您尝试: - -🚀🚀 [MFTCoder-accelerate: Accelerate + DeepSpeed/FSDP Codebase for MFT(Multi-task Finetuning)](/docs/mftcoder-accelerate-zh) - - -如果你想探索一些新兴的训练框架,可以尝试: - -🚀 [MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning)](/docs/mftcoder-atorch-zh) - - -## 模型 - -使用本项目的训练代码,以及上述训练数据,我们训练并在huggingface, modelscope开源了以下模型。 - -| 模型 | HuggingFace链接 | 魔搭 链接 | 基座模型 | 训练数据 | Batch Size | Seq Length | -|--------------------------------------|---------------------------------------------------------------------------|---------------------------------------------------------------------------------|----------------------|------|------------|------------| -| 🔥🔥🔥 CodeFuse-DeepSeek-33B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-DeepSeek-33B) | DeepSeek-coder-33B | 60万 | 80 | 4096 | -| 🔥🔥🔥 CodeFuse-Mixtral-8x7B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-Mixtral-8x7B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-Mixtral-8x7B) | Mixtral-8x7B | 60万 | 80 | 4096 | -| 🔥🔥🔥 CodeFuse-CodeLlama-34B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B) | CodeLlama-34b-Python | 60万 | 80 | 4096 | -| 🔥🔥🔥 CodeFuse-CodeLlama-34B-4bits | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | CodeLlama-34b-Python | | | 4096 | -| 🔥🔥🔥 CodeFuse-StarCoder-15B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-StarCoder-15B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-StarCoder-15B) | StarCoder-15B | 60万 | 80 | 4096 | -| 🔥🔥🔥 CodeFuse-QWen-14B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-QWen-14B) | Qwen-14b | 110万 | 256 | 4096 | -| 🔥🔥🔥 CodeFuse-CodeGeex2-6B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeGeex2-6B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeGeex2-6B) | CodeGeex2-6B | 110万 | 256 | 4096 | - - - - -## 数据集 -目前本项目主要整理了如下指令数据集,并将其整理成统一的数据格式,这两个指令微调数据集是我们多任务训练中数十个任务中的2个,未来我们会陆续开源更多的代码任务指令微调数据集: - -| 数据集 | 介绍 | -|---------------------------------------------------------------|--------------------------------------------------------------------| -| [⭐ Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) | 基于开源open-evol-instruction-80k过滤低质量,重复和human eval相似的数据后得到的高质量代码类微调数据 | -| [⭐ CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k) | 高质量python练习题数据 \ No newline at end of file diff --git a/content/zh/docs/mftcoder/3_accelerate.md b/content/zh/docs/mftcoder/3_accelerate.md deleted file mode 100644 index e65a2b4..0000000 --- a/content/zh/docs/mftcoder/3_accelerate.md +++ /dev/null @@ -1,294 +0,0 @@ ---- -title: "MFTCoder: Accelerate + DeepSpeed/FSDP 框架篇" -description: 介绍主要功能 -url: /docs/mftcoder-accelerate-zh -aliases: -- "/docs/mftcoder-accelerate-zh" ---- - - -[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai) - - GitHub - - -[**中文**] [[English]](/docs/mftcoder-accelerate) - -## 1. 更新 -🔥 MFTCoder-accelerate 新增支持accelerate + FSDP框架, 支持全量微调和LoRA; - -🔥 MFTCoder-accelerate 支持最新更多主流开源模型: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; - -🔥 MFTCoder-accelerate 新增self-paced Loss, 用于收敛均衡; - -🔥 MFTCoder-accelerate 支持使用accelerate + DeepSpeed框架下支持 全量参数/QLoRA/LoRA微调; - -🔥 MFTCoder-accelerate 在训练中支持了多任务微调MFT, 可以同时平衡多个任务的训练,训练的模型支持多任务推理; - -🔥 MFTCoder-accelerate 在训练中支持多种模型基座: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen等 - -## 2. 数据格式 -### 2.1 训练数据格式 -训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 -可以参考项目中的xxx.jsonl文件。 -```json -{ - "id":0, - "data_name":"code-helper", - "chat_rounds":[ - { - "role": "system", - "content": "你是一个智能代码助手,可以回复用户与代码相关的问题" - }, - { - "role": "human", - "content": "写一个快速排序" - }, - { - "role": "bot", - "content": "以下是一个快速排序算法xxxxxx" - }, - { - "role": "human", - "content": "解释一下这段代码" - }, - { - "role": "bot", - "content": "好的,这段代码xxx" - } - ] -} -``` - -### 2.2 推理数据格式 -推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式: -``` -""" -system -这是System指令 -human -这是第1轮用户输入的问题 -bot -这是第1轮模型生成的内容{EOS_TOKEN} -human -这是第2轮用户输入的问题 -bot -这是第2轮模型生成的内容{EOS_TOKEN} -... -... -... -human -这是第n轮用户输入的问题 -bot -{模型现在要生成的内容}{EOS_TOKEN} -""" -``` - - -## 3. 模型训练 -目前支持全量参数(Full-parameters)指令微调、QLoRA指令微调,LoRA指令微调。 -一些优秀的代码预训练模型权重,理论上,HuggingFace上开源的模型,均可使用本项目进行训练: - -🤗 [最新代码预训练SOTA,CodeLlama](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) :code-llama-34b, code-llama-34b-python, 新的SOTA基座。 - -🤗 [10B级别最佳代码预训练模型Starcoder](https://huggingface.co/bigcode/starcoder) wizardCoder-15B, PanGu-coder2等前SOTA的基座模型。 - -🤗 [多语言能手Qwen-7b](https://huggingface.co/Qwen/Qwen-7B) :适用于多语言任务,也适用中文任务。进行指令微调时。 - -**mftcoder_accelerate文件结构** -``` -mftcoder_accelerate - | - src - configs - | - data - | - model - | - *pefts* - | - tokenizer - | - utils - | - evals -``` -我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化, 详见```src```目录下的实现。 - -训练入口文件是```mftcoder_accelerate/src/pefts/mft_accelerate.py``` - -参数配置存储在```mftcoder_accelerate/src/configs```目录下,方便统一管理和更改。 - -**_所以,在你开启训练之前,请进入src目录_** -``` -cd mftcoder_accelerate/src -``` - - - -### 3.1 数据tokenization -训练时,我们将多轮对话拼接成如下格式(也是上文中的推理数据格式),然后进行tokenize。 -其中,默认情况下: - -```human\n```作为human/user的起始符,```bot\n```作为bot/assistant的起始符,```{EOS_TOKEN}``` 表示eos_token。 -其中eos_token可以根据不同模型修改替换。不同角色的起始符可以配置,用来实现不同的对话/问答模版。 -``` -"human\n{input1}bot\n{target1}{EOS_TOKEN}human\n{input2}bot\n{target2}{EOS_TOKEN}\n" -``` -在计算loss时,我们通过loss mask的方式,input部分的loss不参与参数更新,只有“target{EOS_TOKEN}”部分的loss参与参数更新。 -这种方式充分利用了模型并行计算的优势,训练更加高效,同时也充分利用了decoder-only模型从左到右attention的特性,一次性将多轮对话中的每个target部分都参与了训练,训练更充分高效。 - -### 3.2 LoRA/QLoRA微调 - -#### LoRA/QLoRA微调简介 -关于LoRA的详细介绍可参考论文:[LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf) - -关于QLoRA的详细介绍可参考论文:[QLORA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/pdf/2305.14314.pdf) - -QLoRA通过4-bit的nf4量化,且加入更多adapter,在大幅减少显存消耗的同时,尽可能逼近全量参数微调的效果。 -QLoRA论文指出,该方法可以在一张V100上对33B的模型进行微调,并且性能逼近全量参数微调。 - -执行如下命令即可进行 Lora/QLora/全量 微调: -#### Launch via Deepspeed -DeepSpeed配置在accelerate_ds_config.yaml中。 -```bash -accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "DeepSpeed" -``` -或者 - -DeepSpeed配置在脚本中通过命令行输入。 -```bash -sh ds_single_launch.sh -``` - -#### Launch via FSDP -FSDP配置在accelerate_fsdp_config.yaml中。 -```bash -accelerate launch --config_file accelerate_fsdp_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "FSDP" -``` -或者 - -FSDP配置在脚本中通过命令行输入。 -```bash -sh fsdp_single_launch.sh -``` - -#### 训练参数 -_**训练需要的参数配置在```configs/*_train_config```中,主要参数说明如下:**_ - -- **load_raw_dataset**: 需要保持true,后续会支持其它模式数据,当前仅支持jsonl输入 -- **data_paths**: "[path1,path2,path3]" 输入数据地址,字符串,开头结尾用[],中间用```,```间隔不同path,每个path是一个目录,目录的最后一级名字作为任务名称,下面包含1到多个jsonl数据 -- **output_dir**:训练输出目录,存储checkpoint(全量训练时)、lora_adaptor(Lora或者Qlora时)等 -- **tb_dir**: 存储tensorboard等 -- **model_type**: "mixtral|mistral|deepseek|llama|starcoder|chatglm2|qwen|gpt_neox" -- **attn_implementation**: "flash_attention_2" 或者 "eager" -- **peft_type**: lora或者qlora或者null(全量微调) -- **lora_rank**: lora rank -- **lora_alpha**: lora alpha -- **lora_dropout**: lora dropout -- **target_modules**: List[str], lora目标模块,如果null,会使用默认,参考model_mapping.py -- **quantization**: 是否量化,"4bit", "8bit" 或者null, qlora推荐4bit量化 -- **pretrained_model_path**:预训练模型的本地目录,或者在huggingface上的模型名称。 -- **weighted_loss_mode**: 多任务loss加权模式, "case3"是当前推荐。 -- **padding_mode**: 数据的样本组织方式, "padding"是将每个原始样本填充到seq_length, "pack"是将尽量多的样本打包到每个seq_length的序列中。 -- **num_train_epochs**:训练的轮次。如果数据量足够大,一般建议只训1-2个epoch。 -- **per_device_train_batch_size**:每张显卡train的batch size。 -- **per_device_eval_batch_size**:每张显卡eval的batch size。 -- **gradient_accumulation_steps**:梯度累计步数。global batch=num_gpus * per_device_train_batch_size * gradient_accumulation_steps。 -- **learning_rate**:学习率。全量参数微调的时候,建议小一些,1e-5或5e-6。qlora中的学习率设置更大一些,一般为1e-4、2e-4。 -- **min_lr**: 最低学习率, 一般是learning_rate的十分之一 -- **seq_length**:训练时的最大长度。按照自己的设备进行设置,越长需要占用越多显存。 -- **log_interval**:每隔多少步统计一次train loss。 -- **checkpointing_steps**:每隔多少步保存一个模型。 -- **evaluation_steps**:每隔多少步在验证集上evaluate一次。 -- **early_stopping** : 是否执行early_stop -- **early_stopping_stall_num**: 多少个eval point不继续收敛,则停止训练 -- **lr_scheduler_type**:学习率变化策略。常用"cosine" -- **warmup_steps**:warm up步数。学习率经过多少步,增长到指定的数值。 -- **seed**:随机种子,用于复现实验结果。 -- **saving_limit**:整数,ckpt存储数量上限, 全量训练必须设置。默认null即不限制数量。 -- **role_markers**: null,即使用{"system": "\system\n", "user": "\human\n", "assistant": "\bot\n"}。 你可以自定义 "system", "user" and "assistant"的模板, 用于定制自己的问答或者对话模板,比如 {"system": "### System:\n", "user": "### Instruction:\n", "assistant": "### Response:\n"} - -## 4. 模型使用 - -### 4.1 权重合并 -如果使用LoRA或者QLoRA进行训练,本项目仅保存adapter的权重和配置文件,需要将adapter权重与base model进行合并。 -可以使用如下merge_base_and_lora_to_hf.py脚本。 -``` -python pefts/merge_base_and_lora_to_hf.py \ - --base_model_or_path model_path \ - --adaptor_path lora_adapter_path \ - --model_type model_type \ - --merged_output_path output_path -``` - -### 4.2 模型推理 -我们提供了单轮对话和多轮对话的如下脚本,该脚本可同时兼容大部分huggingface格式的模型。 -```python -from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, -) -model_name_or_path = "codefuse-ai/CodeFuse-Deepseek-33B" -tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, padding_side="left") -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("<|end▁of▁sentence|>") -tokenizer.pad_token_id = tokenizer.eos_token_id -model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True) - -HUMAN_ROLE_START_TAG = "human\n" -BOT_ROLE_START_TAG = "bot\n" -texts = ["write a python function of quick sort."] -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] - -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") -outputs = model.generate( - inputs=inputs["input_ids"], - attention_mask=inputs["attention_mask"], - max_new_tokens=512, - top_p=0.95, - temperature=0.1, - do_sample=True, - eos_token_id=tokenizer.eos_token_id, - pad_token_id=tokenizer.pad_token_id - ) -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) -print(gen_text) -``` - - -生成脚本中的top_p、temperature、repetition_penalty、do_sample等参数对模型的生成效果影响较大,可按照自己的使用场景进行调试修改。 -实践中,在代码生成场景中,如果采样模式,do_sample=True, top_p=0.95, temperature=0.1是pass@1指标的不错选择; -如果非采样模式, do_sample=False, beam_num=1或者3是不错的选择,其中beam_num=1即为greedy decoding。 - -## 5. FAQ -#### 问题1:OOM如何解决? -如果发生OOM,可以缩小per_device_train_batch_size、seq_length等参数来缓解。由于面对的模型普遍较大(6b, 13b, 34b, 70b等)我们已经默认使用gradient_checkpointing技术,可以大幅降低显存占用,但训练速度会稍慢一些。 - -#### 问题2:安装包错误 -参考init_env.sh和requirements.txt - -#### 问题3:如何指定使用某些卡训练? -通过如下方式,即可指定使用0和1号卡进行训练: -```bash -CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file pefts/accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "deepspeed" -``` - -#### 问题4:关于Flash Attention, 该如何配置训练? -首先,我们强烈建议您安装Flash Attention 2(FA2),(>=2.1.0, 2.3.6功能更齐全)。 - -训练参数中"attn_implementation" 设置成 "eager" 可以用naive attention,也就是未经加速的attention。 - -训练参数中"attn_implementation" 设置成 "flash_attention_2" 可以用FA2,速度快,省显存。 - -如果你可以自行安装环境并使用torch>=2.1.1,可以尝试设置参数"attn_implementation"为 "sdpa"。这样会尝试使用transformers兼容的torch.nn.functional.scaled_dot_product_attention。支持的模型还不全面。 - -#### 问题5:推荐的分布式框架是怎样的? -对于LoRA/QLoRA, 我们推荐使用DeepSpeed作为底层分布式框架,它具有易用性和兼容性好的特点,并且速度很快。 -FSDP 不支持QLoRA, 因为bitsandbytes暂不支持FSDP。 - -对于全量微调,我们推荐使用FSDP, 因为它在全量训练时可以发挥fully sharding的优势,达到更快的训练速度。 - -#### 问题6:当前支持的模型中,有什么区别 -国产大模型比如chatglm2, chatglm3, baichuan2, qwen, aquila2等,使用的是和模型共同发布的modeling_xxx.py. -其它被transformers官方支持的大模型,由于已经升级支持flash attention等,所以全面切换到官方的modeling支持训练,之前的自定义modeling会被deprecated diff --git a/content/zh/docs/mftcoder/4_atorch.md b/content/zh/docs/mftcoder/4_atorch.md deleted file mode 100644 index f7e3816..0000000 --- a/content/zh/docs/mftcoder/4_atorch.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -title: "MFTCoder训练: Atorch框架篇" -description: 介绍主要功能 -url: /docs/mftcoder-atorch-zh -aliases: -- "/docs/mftcoder-atorch-zh" ---- - -[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai) - - GitHub - - -[**中文**] [[English]](/docs/mftcoder-atorch) - -## 1. 更新 - -🔥 MFTCoder在Atorch框架下支持GPTNeoX模型的微调; - -🔥 MFTCoder支持全量的有监督微调; - -🔥 MFTCoder支持LoRA微调; - -## 2. 数据格式 - -### 2.1 训练数据格式 -训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 -可以参考项目中的xxx.jsonl文件。 -```json -{ - "id":0, - "data_name":"code-helper", - "chat_rounds":[ - { - "role": "system", - "content": "你是一个智能代码助手,可以回复用户与代码相关的问题", - "chat_round_id": 0 - }, - { - "role": "human", - "content": "写一个快速排序", - "chat_round_id": 1 - }, - { - "role": "bot", - "content": "以下是一个快速排序算法xxxxxx", - "chat_round_id": 1 - }, - { - "role": "human", - "content": "解释一下这段代码", - "chat_round_id": 2 - }, - { - "role": "bot", - "content": "好的,这段代码xxx", - "chat_round_id": 2 - } - ] -} -``` - -### 2.2 推理数据格式 -推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式: -```python -""" -<|role_start|>system<|role_end|>这是System指令 -<|role_start|>human<|role_end|>这是第1轮用户输入的问题 -<|role_start|>bot<|role_end|>这是第1轮模型生成的内容 -<|role_start|>human<|role_end|>这是第2轮用户输入的问题 -<|role_start|>bot<|role_end|>这是第2轮模型生成的内容 -... -... -... -<|role_start|>human<|role_end|>这是第n轮用户输入的问题 -<|role_start|>bot<|role_end|>{模型现在要生成的内容} -""" -``` - - -## 3. 模型训练 -目前 "MFTCoder/mft_atorch" 代码库支持全量参数指令微调和LoRA指令微调。 -目前仅支持GPTNeoX模型的训练,理论上,HuggingFace上开源的GPTNeoX模型权重,均可使用本项目进行训练。 - -我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化,详见主目录下的实现。微调训练的入口目录是```train/```, 训练入口文件是```train/run_train.py```, 参数配置存储在启动脚本```train/run_gpt_*.sh```等文件中,方便统一管理和更改。 - -### 3.1 数据格式 -训练时,我们将多轮对话拼接成如下格式,然后进行tokenize。其中<|role_start|>human<|role_end|>表示human输入提示符,<|role_start|>bot<|role_end|>表示bot输出提示符,`````````` 表示eos_token。 -``` -"<|role_start|>human<|role_end|>input1target1input2target2... -``` -在计算loss时,我们通过mask的方式,input部分的loss不参与参数更新,只有“target”部分的loss参与参数更新。 -这种方式充分利用了模型并行计算的优势,训练更加高效,且多轮对话中的每个target部分都参与了训练,训练更充分。 -否则,就需要把一个n轮对话,拆分成n条数据,且只计算最后一个target的loss,大大降低了训练效率。 - -### 3.2 全量SFT - -执行如下命令即可进行全量SFT: -```bash -sh run_gpt_mft.sh 10 1 8 5 -``` - -需注意,启动脚本后的四个参数,分别是: -- 第一个参数是总的per gpu batch size -- 第二个参数是tensor parallel数(暂时只支持1) -- 第三个参数是data parallel数,与所用GPU数保持一致 -- 第四个参数是训练epoch数 - -后面其他的训练方式启动脚本,也同样需要配置这四个参数 - -### 3.3 LoRA微调 - -执行如下命令即可进行Lora微调: -```bash -sh run_gpt_mft_peft.sh 10 1 8 5 -``` - -### 3.4 启动脚本中主要参数说明 -```train/run_gpt_*.sh```中的主要参数说明如下,以下参数可以根据需求进行修改,其他参数建议不做修改: -- tokenize_mode: 目前仅支持"sft"。 - -- train_mode: 目前仅支持"sft"。 - -- load_raw_dataset: 需要保持"True",后续会支持其它模式数据,当前仅支持jsonl输入 - -- data_paths: "[path1,path2,path3]" 输入数据地址,字符串,开头结尾用[],中间用```,```间隔不同path,每个path是一个目录,目录的最后一级名字作为任务名称,下面包含1到多个jsonl数据。 - -- output_dir: 训练输出目录,存储checkpoint、lora_adaptor checkpoint等。 - -- tensorboard_dir: 可以暂时忽略,实际tensorboard存储在output_dir的runs目录下。 - -- model_type: 目前仅支持 gpt_neox。 - -- peft_type: 目前仅支持 lora。 - -- pretrained_model_path: 预训练模型的本地目录。 - -- total_train_batch_size: 所有显卡train的batch size的总和,会根据启动脚本时输入的per gpu batch size自动计算。 - -- per_device_valid_batch_size: 每张显卡eval的batch size,会根据启动脚本时输入的per gpu batch size自动计算。 - -- gradient_accumulation_steps: 梯度累计步数。global batch=num_gpus * per_device_train_batch_size * gradient_accumulation_steps。 - -- checkpoint_activations: 如果显存捉襟见肘,可以开启。以时间换空间,模型不缓存激活状态,会进行两次forward计算,以节省显存。 - -- learning_rate: 学习率。全量参数微调的时候,建议小一些,1e-5或5e-6。qlora中的学习率设置更大一些,一般为1e-4、2e-4。 - -- min_lr: 最低学习率, 一般是learning_rate的十分之一。 - -- seq_length: 训练时的最大长度。按照自己的设备进行设置,越长需要占用越多显存。 - -- log_interval: 每隔多少步统计一次train loss。 - -- checkpointing_steps: 每隔多少步保存一个模型。 - -- evalation_steps: 每隔多少步在验证集上evaluate一次。 - -- early_stopping_patience: 多少个eval point不继续收敛,则停止训练。 - -- lr_scheduler_type: 学习率变化策略。 - -- num_warmup_steps: warm up步数,学习率经过多少步,增长到指定的数值。 - -- seed: 随机种子,用于复现实验结果。 - -- train_iters: 可以暂时设为比较小的数,如10,实际上不会影响训练步数,留作后面拓展读取其他形式数据集的功能。 - -- valid_iters: 可以暂时设为比较小的数,如10,实际上不会影响训练步数,留作后面拓展读取其他形式数据集的功能。 - -- evaluation_strategy: 训练期间evaluate的策略,"steps"表示每隔"valid_interval"步做一次evaluate,"epoch"表示每隔一个epoch做一次evaluate,支持同时开启。 - -- save_strategy: 训练期间保存模型权重的策略,"steps"表示每隔"checkpointing_steps"步保存一次。 - -- extra_save_by_epoch: 每过一个epoch是否要保存一个epoch级别的checkpoint。 - -- save_total_limit: 最多保留的模型checkpoint个数,一般设置为2,会保留valid loss最低,以及最新的checkpoint,注意epoch级别的checkpoint会一直保留,且不受限制。 - -- weighted_loss_mode: 多任务训练的loss加权方式。 - - -## 4. 模型使用 - -### 4.1 权重合并 -如果使用LoRA进行训练,本项目仅保存adapter的权重和配置文件,需要将adapter权重与base model进行合并。脚本见```utils/merge_base_and_lora_to_hf.py``` - -### 4.2 模型推理 -我们提供了单轮对话和多轮对话的如下脚本,该脚本可同时兼容大部分huggingface格式的模型。 -```python -from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, -) -tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False) -tokenizer.padding_side = "left" -tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") -model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True) - -HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>" -BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>" -texts = ["write a python function of quick sort."] -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] - -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") -outputs = model.generate( - inputs=inputs["input_ids"], - attention_mask=inputs["attention_mask"], - max_new_tokens=512, - top_p=0.95, - temperature=0.1, - do_sample=True, - eos_token_id=tokenizer.eos_token_id, - pad_token_id=tokenizer.pad_token_id - ) -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) -print(gen_text) -``` - -生成脚本中的top_p、temperature、repetition_penalty、do_sample等参数对模型的生成效果影响较大,可按照自己的使用场景进行调试修改。 -实践中,在代码生成场景中,如果采样模式,do_sample=True, top_p=0.95, temperature=0.1是pass@1指标的不错选择; -如果非采样模式, do_sample=False, beam_num=1或者3是不错的选择,其中beam_num=1即为greedy decoding。 - -## 5. FAQ -#### 问题1:OOM如何解决? -如果发生OOM,可以缩小per GPU batch size (启动训练脚本时的第一个参数)、seq_length等参数来缓解。也可以设gradient_checkpointing=true,可以大幅降低显存占用,但训练速度会变慢一些。 \ No newline at end of file diff --git a/content/zh/docs/overview/b1.codefusechatbot.md b/content/zh/docs/overview/b1.codefusechatbot.md deleted file mode 100644 index 9997cf0..0000000 --- a/content/zh/docs/overview/b1.codefusechatbot.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: CodeFuse-ChatBot Development by Private Knowledge Augmentation -slug: CodeFuse-ChatBot-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-chatbot-zh" ---- - -

    - 中文  |  English  -

    - -DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。 - - -## 📜 目录 -- [🤝 介绍](#-介绍) -- [🎥 演示视频](#-演示视频) -- [🧭 技术路线](#-技术路线) - -## 🤝 介绍 - -💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 - -本项目核心差异技术、功能点: -- **🧠 智能调度核心:** 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 [使用说明](/docs/multi-agent-zh) -- **💻 代码整库分析:** 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。 -- **📄 文档分析增强:** 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。 -- **🔧 垂类专属知识:** 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 -- **🤖 垂类模型兼容:** 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。 - -🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。[接入Demo](/docs/fastchat-zh) - -👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。 - -
    - 图片 -
    - - -## 🎥 演示视频 - -为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。 - - -- 知识库导入和问答:[演示视频](https://www.youtube.com/watch?v=UGJdTGaVnNY&t=2s&ab_channel=HaotianZhu) -- 本地代码库导入和问答:[演示视频](https://www.youtube.com/watch?v=ex5sbwGs3Kg) - - -## 🧭 技术路线 -
    - Image -
    - -- 🧠 **Multi-Agent Schedule Core:** 多智能体调度核心,简易配置即可打造交互式智能体。 -- 🕷️ **Multi Source Web Crawl:** 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。 -- 🗂️ **Data Processor:** 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。 -- 🔤 **Text Embedding & Index:**:文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。 -- 🗄️ **Vector Database & Graph Database:** 向量与图数据库,提供灵活强大的数据管理解决方案。 -- 📝 **Prompt Control & Management:**:Prompt 控制与管理,精确定义智能体的上下文环境。 -- 🚧 **SandBox:**:沙盒环境,安全地执行代码编译和动作。 -- 💬 **LLM:**:智能体大脑,支持多种开源模型和 LLM 接口。 -- 🛠️ **API Management::** API 管理工具,实现对开源组件和运维平台的快速集成。 - -具体实现明细见:[技术路线明细](/docs/chatbot-roadmap) - diff --git a/content/zh/docs/overview/b10.codefuse-evalution.md b/content/zh/docs/overview/b10.codefuse-evalution.md deleted file mode 100644 index d28e775..0000000 --- a/content/zh/docs/overview/b10.codefuse-evalution.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "CodeFuseEval: 代码大语言模型的多任务评估基准" -description: 介绍主要功能 -aliases: -- "/docs/codefuse-evalution-zh" ---- - - - - -CodeFuseEval在HumanEval-x、MBPP的基准上,结合CodeFuse大模型多任务场景,开发的编程领域多任务的评测基准, 可用于评估模型在代码补全,自然语言生成代码,测试用例生成、跨语言代码翻译,中文指令生成代码等多类任务的性能。持续开放中,敬请期待! - -![img](/images/codefuse-evalution/中文介绍.png) diff --git a/content/zh/docs/overview/b2.codefuseDevopsEval.md b/content/zh/docs/overview/b2.codefuseDevopsEval.md deleted file mode 100644 index 1d3afc1..0000000 --- a/content/zh/docs/overview/b2.codefuseDevopsEval.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: CodeFuse-DevOps-Eval -slug: CodeFuse-DevOps-Eval-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-eval-zh" ---- - -

    - - - -DevOps-Eval是一个专门为DevOps领域大模型设计的综合评估数据集。我们希望DevOps-Eval能够帮助开发者,尤其是DevOps领域的开发者,追踪进展并分析他们拥有的DevOps大模型的优势和不足之处。 - -📚 该仓库包含与DevOps和AIOps相关的问题和练习, 还添加了关于ToolLearning相关的样本。 - -💥 目前有 **7486** 个多项选择题,根据DevOps的通用流程将其归纳未8个模块,如[下图](/images/devops_eval/data_info.png)所示。 - -🔥 AIOps样本总计 **2840** 个,覆盖的场景包括**日志解析**、**时序异常检测**、**时序分类**、**时序预测**和**根因分析**。 - -🔧 ToolLearning样本 **1509** 个,涵盖59个领域,总计 239 种工具类别。 - -

    - -## 🏆 排行榜 -以下是我们获得的初版评测结果,包括多个开源模型的zero-shot和five-shot准确率。我们注意到,对于大多数指令模型来说,five-shot的准确率要优于zero-shot。 - -### 👀 DevOps -#### Zero Shot - -| **模型** | plan | code | build | test | release | deploy | operate | monitor | **平均分** | -|:------------------------:|:-----:|:-----:|:-----:|:------:|:--------:|:------:|:-------:|:--------:|:---------:| -| DevOpsPal-14B-Chat | 60.61 | 78.35 | 84.86 | 84.65 | 87.26 | 82.75 | 69.89 | 79.17 | 78.23 | -| DevOpsPal-14B-Base | 54.55 | 77.82 | 83.49 | 85.96 | 86.32 | 81.96 | 71.18 | 82.41 | 78.23 | -| Qwen-14B-Chat | 60.61 | 75.4 | 85.32 | 84.21 | 89.62 | 82.75 | 69.57 | 80.56 | 77.18 | -| Qwen-14B-Base | 57.58 | 73.81 | 84.4 | 85.53 | 86.32 | 81.18 | 70.05 | 80.09 | 76.19 | -| Baichuan2-13B-Base | 60.61 | 69.42 | 79.82 | 79.82 | 82.55 | 81.18 | 70.37 | 83.8 | 73.73 | -| Baichuan2-13B-Chat | 60.61 | 68.43 | 77.98 | 80.7 | 81.6 | 83.53 | 67.63 | 84.72 | 72.9 | -| DevOpsPal-7B-Chat | 54.55 | 69.11 | 83.94 | 82.02 | 76.89 | 80 | 64.73 | 77.78 | 71.92 | -| DevOpsPal-7B-Base | 54.55 | 68.96 | 82.11 | 78.95 | 80.66 | 76.47 | 65.54 | 78.7 | 71.69 | -| Qwen-7B-Base | 53.03 | 68.13 | 78.9 | 75.44 | 80.19 | 80 | 65.06 | 80.09 | 71.09 | -| Qwen-7B-Chat | 57.58 | 66.01 | 80.28 | 79.82 | 76.89 | 77.65 | 62.64 | 79.17 | 69.75 | -| Baichuan2-7B-Chat | 54.55 | 63.66 | 77.98 | 76.32 | 71.7 | 73.33 | 59.42 | 79.63 | 66.97 | -| Internlm-7B-Chat | 60.61 | 62.15 | 77.06 | 76.32 | 66.98 | 74.51 | 60.39 | 78.24 | 66.27 | -| Baichuan2-7B-Base | 56.06 | 62.45 | 75.69 | 70.61 | 74.06 | 69.8 | 61.67 | 75.93 | 66.21 | -| Internlm-7B-Base | 54.55 | 58.29 | 79.36 | 78.95 | 77.83 | 70.59 | 65.86 | 75.93 | 65.99 | - - -#### Five Shot - -| **模型** | plan | code | build | test | release | deploy | operate | monitor | **平均分** | -|:------------------------:|:-----:|:-----:|:-----:|:------:|:--------:|:------:|:-------:|:--------:|:---------:| -| DevOpsPal-14B-Chat | 63.64 | 79.49 | 81.65 | 85.96 | 86.79 | 86.67 | 72.95 | 81.48 | 79.69 | -| DevOpsPal-14B-Base | 62.12 | 80.55 | 82.57 | 85.53 | 85.85 | 84.71 | 71.98 | 80.09 | 79.63 | -| Qwen-14B-Chat | 65.15 | 76 | 82.57 | 85.53 | 84.91 | 84.31 | 70.85 | 81.48 | 77.81 | -| Qwen-14B-Base | 66.67 | 76.15 | 84.4 | 85.53 | 86.32 | 80.39 | 72.46 | 80.56 | 77.56 | -| Baichuan2-13B-Base | 63.64 | 71.39 | 80.73 | 82.46 | 81.13 | 84.31 | 73.75 | 85.19 | 75.8 | -| Qwen-7B-Base | 75.76 | 72.52 | 78.9 | 81.14 | 83.96 | 81.18 | 70.37 | 81.94 | 75.36 | -| Baichuan2-13B-Chat | 62.12 | 69.95 | 76.61 | 84.21 | 83.49 | 79.61 | 71.98 | 80.56 | 74.12 | -| DevOpsPal-7B-Chat | 66.67 | 69.95 | 83.94 | 81.14 | 80.19 | 82.75 | 68.6 | 76.85 | 73.61 | -| DevOpsPal-7B-Base | 69.7 | 69.49 | 82.11 | 81.14 | 82.55 | 82.35 | 67.15 | 79.17 | 73.35 | -| Qwen-7B-Chat | 65.15 | 66.54 | 82.57 | 81.58 | 81.6 | 81.18 | 65.38 | 81.02 | 71.69 | -| Baichuan2-7B-Base | 60.61 | 67.22 | 76.61 | 75 | 77.83 | 78.43 | 67.31 | 79.63 | 70.8 | -| Internlm-7B-Chat | 60.61 | 63.06 | 79.82 | 80.26 | 67.92 | 75.69 | 60.06 | 77.31 | 69.21 | -| Baichuan2-7B-Chat | 60.61 | 64.95 | 81.19 | 75.88 | 71.23 | 75.69 | 64.9 | 79.17 | 69.05 | -| Internlm-7B-Base | 62.12 | 65.25 | 77.52 | 80.7 | 74.06 | 78.82 | 63.45 | 75.46 | 67.17 | - - -### 🔥 AIOps - -
    - -#### Zero Shot -| **模型** | 日志解析 | 根因分析 | 时序异常检测 | 时序分类 | 时序预测 | **平均分** | -|:-------------------:|:-----:|:----:|:------:|:----:|:-----:|:-------:| -| Qwen-14B-Base | 66.29 | 58.8 | 25.33 | 43.5 | 62.5 | 52.25 | -| DevOpsPal-14B—Base | 63.14 | 53.6 | 23.33 | 43.5 | 64.06 | 50.49 | -| Qwen-14B-Chat | 64.57 | 51.6 | 22.67 | 36 | 62.5 | 48.94 | -| DevOpsPal-14B—Chat | 60 | 56 | 24 | 43 | 57.81 | 48.8 | -| Qwen-7B-Base | 50 | 39.2 | 22.67 | 54 | 43.75 | 41.48 | -| DevOpsPal-7B—Chat | 56.57 | 30.4 | 25.33 | 45 | 44.06 | 40.92 | -| Baichuan2-13B-Chat | 64 | 18 | 21.33 | 37.5 | 46.88 | 39.3 | -| Qwen-7B-Chat | 57.43 | 38.8 | 22.33 | 39.5 | 25.31 | 36.97 | -| Internlm-7B—Chat | 58.86 | 8.8 | 22.33 | 28.5 | 51.25 | 36.34 | -| Baichuan2-7B-Chat | 60.86 | 10 | 28 | 34.5 | 39.06 | 36.34 | -| Baichuan2-7B-Base | 53.43 | 12.8 | 27.67 | 36.5 | 40.31 | 35.49 | -| Baichuan2-13B-Base | 54 | 12.4 | 23 | 34.5 | 42.81 | 34.86 | -| DevOpsPal-7B—Base | 46.57 | 20.8 | 25 | 34 | 38.75 | 33.94 | -| Internlm-7B—Base | 48.57 | 18.8 | 23.33 | 37.5 | 33.75 | 33.1 | - -#### One Shot -| **模型** | 日志解析 | 根因分析 | 时序异常检测 | 时序分类 | 时序预测 | **平均分** | -|:-------------------:|:-----:|:----:|:------:|:----:|:-----:|:-------:| -| DevOpsPal-14B—Chat | 66.29 | 80.8 | 23.33 | 44.5 | 56.25 | 54.44 | -| DevOpsPal-14B—Base | 60 | 74 | 25.33 | 43.5 | 52.5 | 51.13 | -| Qwen-14B-Base | 64.29 | 74.4 | 28 | 48.5 | 40.31 | 50.77 | -| Qwen-7B-Base | 56 | 60.8 | 27.67 | 44 | 57.19 | 49.44 | -| Qwen-14B-Chat | 49.71 | 65.6 | 28.67 | 48 | 42.19 | 46.13 | -| Baichuan2-13B-Base | 56 | 43.2 | 24.33 | 41 | 46.88 | 42.89 | -| Baichuan2-7B-Chat | 58.57 | 31.6 | 27 | 31.5 | 51.88 | 41.83 | -| DevOpsPal-7B—Base | 52.86 | 44.4 | 28 | 44.5 | 36.25 | 41.2 | -| Baichuan2-7B-Base | 48.29 | 40.4 | 27 | 42 | 40.94 | 39.86 | -| Qwen-7B-Chat | 54.57 | 52 | 29.67 | 26.5 | 27.19 | 38.73 | -| Baichuan2-13B-Chat | 57.43 | 44.4 | 25 | 25.5 | 30.63 | 37.75 | -| DevOpsPal-7B—Chat | 56.57 | 27.2 | 25.33 | 41.5 | 33.44 | 37.46 | -| Internlm-7B—Chat | 62.57 | 12.8 | 22.33 | 21 | 50.31 | 36.69 | -| Internlm-7B—Base | 48 | 33.2 | 29 | 35 | 31.56 | 35.85 | - -
    - -### 🔧 ToolLearning -
    - -| **FuncCall-Filler** | dataset_name | fccr | 1-fcffr | 1-fcfnr | 1-fcfpr | 1-fcfnir | aar | -|:-------------------:| :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| Qwen-14b-chat | luban | 61 | 100 | 97.68 | 63.32 | 100 | 69.46 | -| Qwen-7b-chat | luban | 50.58 | 100 | 98.07 | 52.51 | 100 | 63.59 | -| Baichuan-7b-chat | luban | 60.23 | 100 | 97.3 | 62.93 | 99.61 | 61.12 | -| Internlm-chat-7b | luban | 47.88 | 100 | 96.14 | 51.74 | 99.61 | 61.85 | -| Qwen-14b-chat | fc_data | 98.37 | 99.73 | 99.86 | 98.78 | 100 | 81.58 | -| Qwen-7b-chat | fc_data | 99.46 | 99.86 | 100 | 99.59 | 100 | 79.25 | -| Baichuan-7b-chat | fc_data | 97.96 | 99.32 | 100 | 98.64 | 100 | 89.53 | -| Internlm-chat-7b | fc_data | 94.29 | 95.78 | 100 | 98.5 | 100 | 88.19 | -| CodeLLaMa-7b | fc_data | 98.78 | 99.73 | 100 | 99.05 | 100 | 94.7 | -| CodeLLaMa-7b-16 | fc_data | 98.1 | 99.87 | 99.73 | 98.5 | 100 | 93.14 | -| CodeFuse-7b-4k | fc_data | 98.91 | 99.87 | 99.87 | 99.18 | 100 | 89.5 | - -
    \ No newline at end of file diff --git a/content/zh/docs/overview/b4.MFTCoder.md b/content/zh/docs/overview/b4.MFTCoder.md deleted file mode 100644 index ac3e7a5..0000000 --- a/content/zh/docs/overview/b4.MFTCoder.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "MFTCoder: 高效准确的多任务大模型微调框架" -slug: MFTCoder-zh -description: 介绍主要功能 -aliases: -- "/docs/mftcoder-zh" ---- - - -
    - -

    - 🤗 HuggingFace - • 🤖 魔搭 -

    - -[**中文**] [[English]](/docs/mftcoder) - -
    - - - -## 目录 -- [新闻](#新闻) -- [文章](#文章) -- [项目简介](#项目简介) -- [环境](#环境) -- [训练](#训练) -- [模型](#模型) -- [数据集](#数据集) - - -## 新闻 -🔥🔥🔥 [2024/01/17] **MFTCoder-v0.3.0**发布。新增对Mixtral(MoE), DeepSeek等模型的支持;新增支持FSDP(Fully Sharded Data Parallel);新增Self-paced Loss, 支持多任务收敛均衡。 感兴趣详见微信公众号CodeFuse的文章[MFTCoder 重磅升级v0.3.0发布](https://mp.weixin.qq.com/s/xI3f0iUKq9TIIKZ_kMtcQg) - -🔥🔥🔥 [2024/01/17] 开源了[CodeFuse-DeepSeek-33B](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B)模型,在HumanEval pass@1(greedy decoding)上可以达到78.7%。该模型在Big Code榜单的结果近期发布,请关注公众号获取最新信息。 - -🔥🔥🔥 [2024/01/17] 开源了[CodeFuse-Mixtral-8x7B](https://huggingface.co/codefuse-ai/CodeFuse-Mixtral-8x7B)模型,在HumanEval pass@1(greedy decoding)上可以达到56.1%。感兴趣详见微信公众号CodeFuse的文章[MFTCoder提升Mixtral-8x7B混合专家模型的代码能力实践](https://mp.weixin.qq.com/s/xI3f0iUKq9TIIKZ_kMtcQg) - -🔥🔥 [2023/11/07] [MFTCoder论文](https://arxiv.org/abs/2311.02303)在Arxiv公布,介绍了多任务微调的技术细节。 - -🔥🔥 [2023/10/20] 开源了[CodeFuse-QWen-14B](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B)模型,在HumanEval pass@1(greedy decoding)上可以达到48.8%。相比较与基座模型Qwen-14b提升16%。感兴趣详见微信公众号CodeFuse[文章](https://mp.weixin.qq.com/s/PCQPkvbvfxSPzsqjOILCDw) - -🔥🔥 [2023/09/27] 开源了[CodeFuse-StarCoder-15B](https://huggingface.co/codefuse-ai/CodeFuse-StarCoder-15B)模型,在HumanEval pass@1(greedy decoding)上可以达到54.9%。 - -🔥🔥 [2023/09/26] [CodeFuse-CodeLlama-34B-4bits](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B-4bits)量化版本发布,量化后模型在HumanEval pass@1指标为73.8% (贪婪解码)。 - -🔥🔥 [2023/09/07]MFTCoder微调的模型**CodeFuse-CodeLlama-34B**在[HumanEval Benchmarks](https://github.com/openai/human-eval)的Python **Pass@1** 取得了**74.4%**(greedy decoding)的开源SOTA成绩。 - -🔥🔥 [2023/08/26]MFTCoder-v0.1.0 支持使用LoRA/QLoRA对Code Llama、Llama、Llama2、StarCoder、ChatGLM2、CodeGeeX2、Qwen和GPT-NeoX模型进行微调。 - -### HumanEval表现 -| 模型 | HumanEval(Pass@1) | 日期 | -|:---------------------------------|:-----------------:|:-------:| -| **CodeFuse-DeepSeek-33B** | **78.7%** | 2024/01 | -| **CodeFuse-CodeLlama-34B** | **74.4%** | 2023/09 | -| **CodeFuse-CodeLlama-34B-4bits** | **73.8%** | 2023/09 | -| WizardCoder-Python-34B-V1.0 | 73.2% | 2023/08 | -| GPT-4(zero-shot) | 67.0% | 2023/03 | -| PanGu-Coder2 15B | 61.6% | 2023/08 | -| **CodeFuse-Mixtral-8x7B** | **56.1%** | 2024/01 | -| **CodeFuse-StarCoder-15B** | **54.9%** | 2023/08 | -| CodeLlama-34b-Python | 53.7% | 2023/08 | -| **CodeFuse-QWen-14B** | **48.8%** | 2023/10 | -| CodeLlama-34b | 48.8% | 2023/08 | -| GPT-3.5(zero-shot) | 48.1% | 2022/11 | -| OctoCoder | 46.2% | 2023/08 | -| StarCoder-15B | 33.6% | 2023/05 | -| QWen-14B | 32.3% | 2023/10 | - - -## 文章 -🔥 [CodeFuse-MFTCoder提升CodeGeeX2-6B代码能力](https://mp.weixin.qq.com/s/kWMtHIoe3ytN8pRVi_CHZg) - -🔥 [CodeFuse-MFTCoder提升Qwen-14B代码能力](https://mp.weixin.qq.com/s/PCQPkvbvfxSPzsqjOILCDw) - - -## 项目简介 -**国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架;** - -**Codefuse-MFTCoder** 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。 - -### 项目框架 -![img_1.jpg](/images/mftcoder/img_1.jpg) - -### 项目优势 -:white_check_mark: **多任务**:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去; - -:white_check_mark: **多模型**:支持最新的多个开源模型,包括gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2等; - -:white_check_mark: **多框架**:既支持主流开源的Accelerate+DeepSpeed/FSDP,也支持新开源的[ATorch 框架](https://github.com/intelligent-machine-learning/dlrover); - -:white_check_mark: **高效微调**:支持LoRA和QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景; - - -本项目主要内容如下: -- 同时支持单任务SFT(Supervised FineTuning)和MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等 -- 支持QLoRA低成本高效指令微调、LoRA高效指令微调、全量参数高精度微调。 -- 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA等。 -- 支持lora与base model进行权重合并,推理更便捷。 -- 整理并开源2个指令微调数据集:[Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k)和[CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k)。 -- 开源多个[Codefuse系列指令微调模型权重],具体参见我们的huggingface组织和modelscope组织下的模型:[codefuse-ai huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai 魔搭](https://modelscope.cn/organization/codefuse-ai)。 - | - -## 引用 -如果你觉得我们的工作对你有帮助,请引用我们的论文 -``` -@article{mftcoder2023, - title={MFTCoder: Boosting Code LLMs with Multitask Fine-Tuning}, - author={Bingchang Liu and Chaoyu Chen and Cong Liao and Zi Gong and Huan Wang and Zhichao Lei and Ming Liang and Dajun Chen and Min Shen and Hailian Zhou and Hang Yu and Jianguo Li}, - year={2023}, - journal={arXiv preprint arXiv}, - archivePrefix={arXiv}, - eprint={2311.02303} -} -``` \ No newline at end of file diff --git a/content/zh/docs/overview/b5.CodeFuseModelCache.md b/content/zh/docs/overview/b5.CodeFuseModelCache.md deleted file mode 100644 index e287de6..0000000 --- a/content/zh/docs/overview/b5.CodeFuseModelCache.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: CodeFuse-ModelCache -slug: CodeFuse-ModelCache-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-modelcache-zh" ---- - - -

    -

    -

    -

    - 中文 | - English -

    -

    -
    - -## Contents -- [新闻](#新闻) -- [项目简介](#项目简介) -- [架构大图](#架构大图) -- [致谢](#致谢) -- [Contributing](#Contributing) - -## 新闻 -- 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 -- 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 -- [2023.10.31] codefuse-ModelCache... - -## 项目简介 -Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。 - -## 架构大图 -![modelcache modules](/images/codefuse-modelcache/modelcache_modules_20231114.png) - -## 致谢 -本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。
    [GPTCache](https://github.com/zilliztech/GPTCache) - -## Contributing -ModelCache是一个非常有趣且有用的项目,我们相信这个项目有很大的潜力,无论你是经验丰富的开发者,还是刚刚入门的新手,都欢迎你为这个项目做出一些贡献,包括但不限于:提交问题和建议,参与代码编写,完善文档和示例。你的参与将会使这个项目变得更好,同时也会为开源社区做出贡献。 \ No newline at end of file diff --git a/content/zh/docs/overview/b6.FasterTransformer4CodeFuse.md b/content/zh/docs/overview/b6.FasterTransformer4CodeFuse.md deleted file mode 100644 index e108a66..0000000 --- a/content/zh/docs/overview/b6.FasterTransformer4CodeFuse.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: FasterTransformer4CodeFuse -slug: FasterTransformer4CodeFuse-zh -description: 介绍主要功能 -aliases: -- "/docs/fastertransformer4codefuse-zh" ---- - -## FasterTransformer4CodeFuse -FasterTransformer4CodeFuse \ No newline at end of file diff --git a/content/zh/docs/overview/b7.TestAgent.md b/content/zh/docs/overview/b7.TestAgent.md deleted file mode 100644 index 74a95af..0000000 --- a/content/zh/docs/overview/b7.TestAgent.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "Test-Agent: 您的智能测试助理" -slug: Test-Agent-zh -description: 介绍主要功能 -aliases: -- "/docs/test-agent-zh" ---- - -### 本地Mac M1体验效果 -![图片](https://github.com/codefuse-ai/Test-Agent/assets/103973989/8dba860f-c1bb-49d5-b9dd-a58e541562a6) - -### 魔搭体验效果 -魔搭模型访问链接:[ModelScope TestGPT-7B](https://modelscope.cn/models/codefuse-ai/TestGPT-7B/summary) -![MS](https://github.com/codefuse-ai/Test-Agent/assets/103973989/0e50b258-44f9-4dc6-8e30-0a01cf62d02b) - - -## 什么是Test Agent?(Introduction) - -**Test Agent** 旨在构建测试领域的“智能体”,融合大模型和质量领域工程化技术,促进质量技术代系升级。我们期望和社区成员一起合作,打造创新的测试领域解决方案,构建24小时在线的测试助理服务,让测试如丝般顺滑。 -## 本期特性(Features) - -* **模型** 本期我们开源了测试领域模型TestGPT-7B。模型以CodeLlama-7B为基座,进行了相关下游任务的微调: - * **多语言测试用例生成(Java/Python/Javascript)** 一直以来都是学术界和工业界非常关注的领域,近年来不断有新产品或工具孵化出来,如EvoSuite、Randoop、SmartUnit等。然而传统的用例生成存在其难以解决的痛点问题,基于大模型的测试用例生成在测试用例可读性、测试场景完整度、多语言支持方面都优于传统用例生成工具。本次重点支持了多语言测试用例生成,在我们本次开源的版本中首先包含了Java、Python、Javascript的测试用例生成能力,下一版本中逐步开放Go、C++等语言。 - * **测试用例Assert补全** 对当前测试用例现状的分析与探查时,我们发现代码仓库中存在一定比例的存量测试用例中未包含Assert。没有Assert的测试用例虽然能够在回归过程中执行通过,却无法发现问题。因此我们拓展了测试用例Assert自动补全这一场景。通过该模型能力,结合一定的工程化配套,可以实现对全库测试用例的批量自动补全,智能提升项目质量水位。 - -* **工程框架** 本地模型快速发布和体验工程化框架 - - ChatBot页面 - - 模型快速启动 - - 私有化部署,本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100%安全 - -**后续我们会持续迭代模型和工程化能力:** -- 不断加入更多令人激动的测试域应用场景,如领域知识问答、测试场景分析等 -- 支撑面向测试场景的copilot 工程框架开放,如测试领域知识智能embedding、测试通用工具API体系、智能测试Agent等,敬请期待! -- 以7B为基础,逐步扩展至13B、34B模型。欢迎关注! - -## 性能最强的7B测试领域大模型(Model) -目前在TestAgent中,我们默认使用了TestGPT-7B模型。与当前已有开源模型相比,**TestGPT-7B模型在用例执行通过率(pass@1)、用例场景覆盖(平均测试场景数)上都处于业界领先水平。** -TestGPT-7B模型核心能力的评测结果如下: -- 多语言测试用例生成 -针对模型支持的三种语言:Java、Python、Javascript,Pass@1评测结果如下: - -| Model | Java pass@1 | Java Average number of test scenarios | Python pass@1 | Python Average number of test scenarios | Javascript pass@1 | Javascript Average number of test scenarios | -| --- | --- | --- | --- | --- | --- | --- | -| TestGPT-7B | 48.6% | 4.37 | 35.67% | 3.56 | 36% | 2.76 | -| CodeLlama-13B-Instruct | 40.54% | 1.08 | 30.57% | 1.65 | 31.7% | 3.13 | -| Qwen-14B-Chat | 10.81% | 2.78 | 15.9% | 1.32 | 9.15% | 4.22 | -| Baichuan2-13B-Chat | 13.5% | 2.24 | 12.7% | 2.12 | 6.1% | 3.31 | - - -- 测试用例Assert补全 -目前模型支持Java用例的Assert补全,Pass@1评测结果如下: - -| Model | pass@1 | Percentage of strong validation | -| --- | --- | --- | -| Codefuse-TestGPT-7B | 71.1% | 100% | - - -## 工程架构(Engineering Architecture) -![JG](https://github.com/codefuse-ai/Test-Agent/assets/103973989/1b61beff-df59-4ab3-843c-266413c8dbc4) - -大模型的号角已经吹响,测试领域大模型也在不断进化中,通过预训练过程中积累的丰富世界知识,在复杂交互环境中展现出了非凡的推理与决策能力。 - -尽管在测试领域中基础模型取得了显著的成果,但仍然存在一些局限性,特定领域的测试任务通常需要专业化的工具或领域知识来解决。例如,基础模型可以通过预训练知识完成单次测试代码生成和测试文本生成等任务,但处理复杂的集成用例生成、特定领域用例生成和测试流程pipeline交互等问题时,需要更专业的工具和领域知识。因此将专用工具与基础模型整合在一起,可以充分发挥它们各自的优势。专用工具可以解决模型时效性不足、增强专业知识、提高可解释性和鲁棒性的问题。而基础模型则具备类人的推理规划能力,可以理解复杂的数据和场景,并与现实世界进行交互。 - -在本期开放模型工程化部署和ChatBot基础上,我们将继续在测试开源领域深耕投入。协同社区志趣相投开发者们,一起打造测试领域最领先的Tools工程体系、智能测试助理和测试开源工程! - diff --git a/content/zh/docs/overview/b9.mftvlm.md b/content/zh/docs/overview/b9.mftvlm.md deleted file mode 100644 index b59ea59..0000000 --- a/content/zh/docs/overview/b9.mftvlm.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: CodeFuse-MFT-VLM -slug: CodeFuse-MFT-VLM -description: 介绍主要功能 -aliases: -- "/docs/codefuse-mft-vlm-zh" ---- - -## CodeFuse-VLM -CodeFuse-VLM 是一个多模态大语言模型框架,该框架为用户提供多种视觉编码器,模态对齐模块和大语言模型的选择,以适配用户对不同任务的需求。 - -随着huggingface开源社区的不断更新,会有更多的vision encoder 和 LLM 底座发布,这些vision encoder 和 LLM底座都有各自的强项,例如 code-llama 适合生成代码类任务,但是不适合生成中文类的任务;因此我们搭建了CodeFuse-VLM 框架,支持多种视觉模型和语言大模型,使得CodeFuse-VLM可以适应不同种类的任务。 - -![img.jpg](/images/mft-vlm/CodeFuse-VLM-arch.png) - -我们在CodeFuse-VLM 框架下, 使用Qwen-VL的视觉编码器, cross attention模态对齐模块, 和 Qwen-14B 模型训练了 CodeFuse-VLM-14B - -CodeFuse-VLM-14B 在多个benchmarks 上的性能超过了Qwen-VL和LLAVA-1.5 -![img.jpg](/images/mft-vlm/CodeFuse-VLM-14B-performance.png) - -各个模型得分如下表所示: -模型 | MMBench | MMBench-CN | VqaV2 | GQA | TextVQA | Vizwiz -| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | -LLAVA-1.5 | 67.7 | 63.6 | 80.0 | 63.3 | 61.3 | 53.6 -Qwen-VL | 60.6 | 56.7 | 78.2 | 57.5 | 63.8 | 38.9 -CodeFuse-VLM-14B | 75.7 | 69.8 | 79.3 | 59.4 | 63.9 | 45.3 - -我们的模型在MMBenchmark 多模态大模型榜单上取得了很高的排名: https://mmbench.opencompass.org.cn/leaderboard - -这是我们模型的展示视频 - -https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo - diff --git a/content/zh/docs/testagent/1_quickstart.md b/content/zh/docs/testagent/1_quickstart.md deleted file mode 100644 index 04efa4e..0000000 --- a/content/zh/docs/testagent/1_quickstart.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "快速使用" -slug: "快速使用" -description: 介绍主要功能 -url: "/docs/test-agent-quickstart-zh" -aliases: -- "/docs/test-agent-quickstart-zh" ---- - -## 快速使用(QuickStart) -### 前置准备 - -#### 模型下载 - -您可在[modelscope](https://modelscope.cn/models/codefuse-ai/TestGPT-7B)或[huggingface](https://huggingface.co/codefuse-ai/TestGPT-7B)上获取到模型的详细信息并下载模型文件。 -需要注意的是: -1)如果您通过modelscope下载模型,下载方式可参考:[下载说明](https://www.modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%8B%E8%BD%BD#%E4%BD%BF%E7%94%A8Git%E4%B8%8B%E8%BD%BD%E6%A8%A1%E5%9E%8B); -2)如果您通过huggingface下载模型,请确保您可以正常访问huggingface。 - -#### 环境安装 - -- python>=3.8 -- transformers==4.33.2 - -```plain -git clone https://github.com/codefuse-ai/Test-Agent -cd Test-Agent -pip install -r requirements.txt -``` - -在开始运行TestGPT-7B模型之前,请确保你的执行环境拥有大约14GB的显存。 -### 启动服务 - -项目提供了网页端快速搭建UI的能力能够更直观的展示模型交互和效果,我们可以使用简单的几个命令把前端页面唤醒并实时调用模型能力。在项目目录下,依次启动以下服务: - -1.**启动controller** -![controller](https://github.com/codefuse-ai/Test-Agent/assets/103973989/e68ce187-c9f1-4ce8-9d59-ff9d8348d0ac) -python3 -m chat.server.controller - -2.**启动模型worker** -![work](https://github.com/codefuse-ai/Test-Agent/assets/103973989/073e4e79-4005-4c98-87f7-0eaa0b2b1e22) -python3 -m chat.server.model_worker --model-path models/TestGPT-7B --device mps - -(models/TestGPT-7B 为实际模型文件路径) - -对于启动方式,可以按需选择以下几种配置选项: -- --device mps 用于在Mac电脑上开启GPU加速的选项(Apple Silicon或AMD GPUs); -- --device xpu 用于在Intel XPU上开启加速的选项(Intel Data Center and Arc A-Series GPUs); - - 需安装[Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html) - - 设置OneAPI环境变量:source /opt/intel/oneapi/setvars.sh -- --device npu 用于在华为AI处理器上开启加速的选项; - - 需安装[Ascend PyTorch Adapter](https://github.com/Ascend/pytorch) - - 设置CANN环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh -- --device cpu 单独使用CPU运行的选项,不需要GPU; -- --num-gpus 2 指定并发gpu运行的选项。 - -3. **启动web服务** -python3 -m chat.server.gradio_testgpt -![web](https://github.com/codefuse-ai/Test-Agent/assets/103973989/340dae35-573b-4046-a3e8-e87a91453601) -待服务准备就绪后,我们可以打开本地启动的web服务地址 http://0.0.0.0:7860 ,就能看到完整的前端页面了。在页面下方包含了【单测生成】和【Assert补全】的两个例子,点击按钮后会自动生成一段样例文本到输入框中,点击Send按钮就会触发模型运行,之后耐心等待一段时间后(运行时间视本机性能而定)即可看到完整的回答了。 -![demo](https://github.com/codefuse-ai/Test-Agent/assets/103973989/fd24274c-729b-4ce7-8763-a083b39300fb) - diff --git a/content/zh/muagent/overview/agent-flow.md b/content/zh/muagent/overview/agent-flow.md deleted file mode 100644 index 46d1795..0000000 --- a/content/zh/muagent/overview/agent-flow.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Agent 编排 -slug: Agent 编排 -url: "muagent/agent-编排" -aliases: -- "/muagent/agent-编排" -- "/muagent/agent-flow-zh" ---- - - - -## 核心Connector介绍 -为了便于大家理解整个 muagent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 - -
    - 图片 -
    - - -
    下面,我们先介绍相关的核心组件
    - -### Agent -在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用 -1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出 - -2. ReactAgent:提供标准React的功能,根据问题实现当前任务 - -3. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 - -4. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. - - -输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理 - -### Chain -基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理 - -### Phase -基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理 - -### Prompt Manager -Mutli-Agent链路中每一个agent的prompt创建 -- 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置 -- 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt - -### Memory Manager -主要用于 chat history 的管理 -- 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -- 对 chat history 进行关键信息总结 summary context,作为 prompt context -- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 diff --git a/content/zh/muagent/overview/multi-agent.md b/content/zh/muagent/overview/multi-agent.md deleted file mode 100644 index da907d0..0000000 --- a/content/zh/muagent/overview/multi-agent.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: MuAgent 概览 -slug: MuAgent 概览 -url: "muagent/muagent-概览" -aliases: -- "/muagent/muagent-概览" -- "/muagent/multi-agent-zh" -- "/muagent/muagent-zh" ---- - - -# 简介 - -为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 - -但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。 - -经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 - -因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 - -本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。 - -
    - 图片 -
    - - -# MuAgent框架 -在MuAgent中,我们除了定义Agent交互链路和AgentBase基础执行流以外,还额外设计了 Prompt Manager 和 Memory Manager 两个基础组件,分别用于自动化构建Prompt和chat history管理。最终构建出一个可扩展、易于使用的Multi-Agent框架,包括以下内容 -- Agent Base:构建了四种基本的Agent类型BaseAgent、ReactAgent、ExecutorAgent、SelectorAgent,支撑各种场景的基础活动 -- Communication:通过Message和Parse Message 实体完成Agent间的信息传递,并与Memory Manager交互再Memory Pool完成记忆管理 -- Prompt Manager:通过Role Handler、Doc/Tool Handler、Session Handler、Customized Handler,来自动化组装Customized 的Agent Prompt -- Memory Manager: 用于支撑 chat history 的存储管理、信息压缩、记忆检索等管理,最后通过Memory Pool在数据库、本地、向量数据库中完成存储 -- Component:用于构建Agent的辅助生态组件,包括Retrieval、Tool、Action、Sandbox等 -- Customized Model:支持私有化的LLM和Embedding的接入 - - - -## Agent Base -在Agent层面,提供四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用。所有的Action都由Agent执行。 - -1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出 - -
    - 图片 -
    - -2. ReactAgent:提供标准React的功能,根据问题实现当前任务 -
    - 图片 -
    - -3. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 -Agent接受到任务清单([List[task]),对这个任务清单Task进行循环执行(中间也可添加 Feedback Agent来进行任务重新优化),直到任务完成 -
    - 图片 -
    - -4. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. -
    - 图片 -
    - - -## Communication -为了让Agent之间进行更好的交互,以及能够让每一个Agent接受到足够的信息完成它们特定任务,我们将Message信息体分成了多个部分,System Content、Info Content、LLM Content和LLM Parsed Content等 -- System Content:用于存储管理当前LLM输出的时间,Role信息等 -- Info Content:LLM辅助信息,比如像知识库查询信息、代码库检索信息、工具信息、Agent信息等 -- LLM Content:直接存储和传递LLM 产生的信息 -- LLM Parsed Content:对LLM进行解析转成更易操作的key-value数据结构,方便对LLM内容进行过滤 -- Customized Content:用于管理自定义action产生的key-value数据内容,用于后续自定义Prompt模板的组装构建 - -通过对以上消息格式的定义,我们便可以完成通用消息的传递和管理。具体组装见Prompt Manager模块 - -## Context Manager -### Memory Manager -主要用于 chat history 的管理 -- 存储管理:在数据库或本地实现对chat history进行save和load管理,包括user input、 llm output、observation ouput -- 信息压缩:对 chat history 进行关键信息压缩总结 summary context,比如说单文本概况、侧重不同角度进行文本概况、关键信息提取、多文本概况,作为 Prompt context -- 记忆检索:提供基础检索功能,检索 chat history 或者 Summary Context 中与问题相关信息,辅助问答 -- LLM自动触发:后续定义策略或通过LLM来 触发 压缩总结和检索的功能 - -### Prompt Manager -提问LLM已经成为一种常见的实践,但如何让多个大模型分工并协调好LLM间的规划、调用工具、代码编写能力,来引导它们产生期望的输出,成为了一个关键的问题,其本质就是将业务问题抽象并拆解到可执行的Prompt,那与其说我们是在设计Agents,不如说是对当前需求的深入理解后进行框架设计。 -在LLM介入到实际业务场景(不涉及SFT过程),我们能通过设计Agent Prompt的内容来指定LLM完成相应任务得到相应输出。在MuAgent这个过程中,将这个Prompt分成了三个部分,System Prompt、Context Prompt、Customized Prompt - -- System Prompt 包括 Role Name、Role Description、Task等 -- Context Prompt 包括 Doc Context、Code Context、Tool Context、Agent Context、Session Context等 -- Customized Prompt 则是 自定义的一些 Input 和 Ouput,比如说 ... -我们还可以要求模型输出结构化的文本,比如说tool的json串、*code\ncode_content*等来完成特定工作流。 - -**Automatic Prompt Assemble** -在按照上述结构定义后,我们便可以通过以下方式来完成Prompt的自动化组装,不需要每次去做大量的prompt调整工作 -1. 定义Agent时直接配置 Role Name、Role Description、Task等来决定Agent需要做的事情 -2. 预封装一些可复用的Context Prompt 通用策略,比如说可筛选 Role 的 SessionContext、可配置的Tool、Code Retrieval、Doc Retrieval、Search Retrieval、Agent来完成对应的组装 -3. 由于Agent的Prompt是相对个性化的操作,所以也支持在Prompt Manager 模块内新增新的 key-context 设计,实现个性化的 Agent Prompt。 - - -**Automatic Prompt Design** -能根据role description、task、query等来自动化设计出最优的prompt;待定义... - -**Multi Prompt Design** -根据前面Prompt的定义,我们可以了解到Prompt 由 System Prompt、Context Prompt、Customized Prompt 三个部分组成,三个部分的任一变化都有可能会引起LLM最终输出结果的变化。 -对于同种任务而言,即它们的System Prompt是相同的。那么在不考虑Customiezd Prompt 变化时,就可实现不同上下文的组装差异,比如说Prompt A获取10轮的chat history,而Pormpt B采用5轮的chat history,又或者是对chat history进行信息过滤、信息压缩等。 -待实现... - -## Component -### Retrieval -在所有Prompt的Context中,除了Chat History的会话信息外,还需要依赖于从外界文档知识库、代码库、互联网搜索得来的相关信息,这些模型参数知识外的知识体系能够极大提升Agent完成复杂任务的能力。 -于是在MuAgent中我们集成了Doc、Internet Search、Code Retrieval三种检索信息的方式,并定义了一个抽象IMRetrieval类,可支持开发者自定义个性化的知识库,来完成Agent的知识库注册。 - -**Doc Retrieval** -文档向量数据库是当前最主流的知识库构建方法,使用Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。 - -**Code Retrieval** -LLM在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为LLM提供知识体系外的代码。 - -**Search Retrieval** -除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了duckduckgosearch这款开源的搜索工具,能够为LLM提供知识储备以外的内容。 - -### Tool -随着OpenAI推出了Function Call功能,通过LLM生成指定工具的参数并执行调用,使机器能更好地理解和回应人类的需求,从而解决实际问题和重复性的工作。现如今工具学习能力越来越作为开源模型的标配。那在MuAgent中也支持Agent完成Tool的注册,通过Python注册模板`BaseToolModel`类,编写Tool_name、Tool_description、ToolInputArgs、ToolOutputArgs、run等相关属性和方法即可实现工具的快速接入,同时支持langchain Tool接口的直接使用。 -例如像上述 XXRetrieval 的功能也可以注册为Tool,最终由LLM执行调用。 - -### Action -在MuAgent的定义里,Action是作为LLM具体要执行的动作或动作流,会包括LLM信息处理、知识检索、工具调用以及代码执行等一个综合性的复杂过程,是一个动态过程。比如在React过程中,我们通过LLM获取到了一个Tool参数,接下来"将工具参数放入到Tool并执行调用"这个过程就是Action,它去实践性的调用了Tool。又或者说我们定义了一个Agent,它编排在一个固定Agent的Action步骤之中,这个Agent的输入参数由Action特殊指定。也就是说无论是由LLM产生参数还是工程设定参数,只有涉及具体的执行过程,就是一个Action。 - - -## 模块分类 -- [connector](/muagent/connector-agent-zh) 主要介绍这块Agent框架的工作 -- llm_models -- retrieval -- tools -- sandbox -- utils diff --git a/content/zh/muagent/overview/quick-start.md b/content/zh/muagent/overview/quick-start.md deleted file mode 100644 index 4a40899..0000000 --- a/content/zh/muagent/overview/quick-start.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: 快速开始 -slug: 快速开始 -url: "muagent/快速开始" -aliases: -- "/muagent/快速开始" -- "/muagent/quick-start-zh" ---- - - - -## Quick Start -完整示例见,[examples/muagent_examples](htpps://) -### 首先,准备相关配置信息 -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -### 然后,设置LLM配置和Embedding模型配置 -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.connector.phase import BasePhase -from muagent.connector.schema import Message - - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### 最后选择一个已有场景进行执行 -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# 选择一个场景 -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -# round-1 需要通过代码解释器来完成 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", tools=[], input_query=query_content, -) - -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 需要执行工具 -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下" -query = Message( - role_name="human", role_type="user", tools=tools, input_query=query_content, -) - -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -``` -## 场景自定义 -见[如何自定义场景](/muagent/customed-examples-zh) - -## 场景介绍和使用 - -下面是一些具体的场景介绍和使用。 - -也欢迎大家开脑洞构造一些有趣的case。 - -### baseTaskPhase -xAgents的任务拆分及多步骤执行场景 - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# -phase_name = "baseTaskPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) -# round-1 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### codeReactPhase -基于 React 的代码解释器场景 - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# then, create a data analyze phase -phase_name = "codeReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -# round-1 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", input_query=query_content, - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### codeToolReactPhase -基于 React 模板的工具调用和代码解释器场景 - -``` -TOOL_SETS = [ - "StockName", "StockInfo", - ] -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "codeToolReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析" - -query = Message(role_name="human", role_type="user", input_query=query_content, tools=tools) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### docChatPhase -知识库检索问答链路 -- example 1 -``` -# create your knowledge base -from muagent.service.kb_api import create_kb, upload_files2kb -from muagent.utils.server_utils import run_async -from muagent.orm import create_tables - - -# use to test, don't create some directory -create_tables() -# create a knowledge base -kb_name = "example_test" -run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH)) -# add doc to knowledge base -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") -files = [file] -upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH) - - - -## start to chat with knowledge base -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "0" - -## exmaple 1 -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, -) - -# round-1 -query_content = "langchain有哪些模块" -query = Message( - role_name="human", role_type="user", input_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content = "提示(prompts)有什么用?" -query = Message( - role_name="human", role_type="user", input_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -``` - -- exmaple 2 -``` -ustomized register demo -from muagent.tools import DocRetrieval -class BaseDocRetrieval(IMRertrieval): - - def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): - self.knowledge_base_name = knowledge_base_name - self.search_top = search_top - self.score_threshold = score_threshold - self.embed_config = embed_config - self.kb_root_path = kb_root_path - - def run(self, query: str, search_top=None, score_threshold=None, ): - docs = DocRetrieval.run( - query=query, knowledge_base_name=self.knowledge_base_name, - search_top=search_top or self.search_top, - score_threshold=score_threshold or self.score_threshold, - embed_config=self.embed_config, - kb_root_path=self.kb_root_path - ) - return docs - - -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) - -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, - doc_retrieval=doc_retrieval -) - -# round-1 -query_content = "langchain有哪些模块" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content = "提示(prompts)有什么用?" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### metagpt_code_devlop -metagpt的代码构造链路 - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "metagpt_code_devlop" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -query_content = "create a snake game" -query = Message(role_name="human", role_type="user", input_query=query_content) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### searchChatPhase -固定场景链路,先搜索后基于LLM直接回答 - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# 当duckduckgo连接不通的时候可以配置这个 -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5h://127.0.0.1:13659" - -phase_name = "searchChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -# round-1 -query_content1 = "美国当前总统是谁?" -query = Message( - role_name="human", role_type="user", input_query=query_content1, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content2 = "美国上一任总统是谁,两个人有什么关系没?" -query = Message( - role_name="human", role_type="user", input_query=query_content2, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### toolReactPhase -基于 React 模板的工具调用场景 - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "toolReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -# round-1 -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下" -query = Message( - role_name="human", role_type="user", tools=tools, input_query=query_content, - ) - -# phase.pre_print(query) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file diff --git a/data/en/coagent/sidebar.yml b/data/en/coagent/sidebar.yml deleted file mode 100644 index d7954f4..0000000 --- a/data/en/coagent/sidebar.yml +++ /dev/null @@ -1,16 +0,0 @@ -- title: ❤️ CoAgent - pages: - - title: CoAgent Overview - - title: Agent Flow - - title: Prompt Manager - - title: Quick Start - - -- title: Modules - pages: - - title: Connector-Agent - - title: Connector-Chain - - title: Connector-Phase - - title: Connector-Prompt - - title: Connector-Memory - - title: Customed Examples \ No newline at end of file diff --git a/data/en/contribution/sidebar.yml b/data/en/contribution/sidebar.yml deleted file mode 100644 index 3f2f3b5..0000000 --- a/data/en/contribution/sidebar.yml +++ /dev/null @@ -1,11 +0,0 @@ - - -- title: ❤️ Contributing Guide - pages: - - title: Contribution Guide - - title: Issue Report - - title: Pull Request - -- title: ❤️ Acknowledgments - pages: - - title: Acknowledgements \ No newline at end of file diff --git a/data/en/docs/sidebar.yml b/data/en/docs/sidebar.yml deleted file mode 100644 index d91c288..0000000 --- a/data/en/docs/sidebar.yml +++ /dev/null @@ -1,87 +0,0 @@ -- title: 📖 CodeFuse-AI Overview - pages: - - title: overview - -- title: 📖 CodeFuse-AI Module - pages: - - title: CodeFuse-Query - - title: MFTCoder - - title: CodeFuse-MFT-VLM - - title: Test-Agent - - title: CodeFuse-ModelCache - - title: CodeFuse-ChatBot - # rename: quickstart # 用于title重命名,title作为跳转地址 docs/title - - title: CodeFuse-DevOps-Eval - - title: CodeFuse-DevOps-Model - - title: CodeFuse-Evalution - -- title: CodeFuse-Query - pages: - - title: codefuse-query-introduction - rename: Introduction - - title: codefuse-query-quickstart - rename: QuickStart - - title: codefuse-query-GodelLanguage - rename: GodelLanguage - - title: codefuse-query-toolchain - rename: Toolchain - - title: codefuse-query-usercase - rename: UserCase - -- title: MFTCoder - pages: - - title: mftcoder-introduction - rename: Introduction - - title: mftcoder-quickstart - rename: QuickStart - - title: mftcoder-accelerate - rename: Accelerate + DeepSpeed/FSDP Framework - - title: mftcoder-atorch - rename: Atorch Framework - -- title: CodeFuse-MFT-VLM - pages: - - title: codefuse-mft-vlm-quickstart - rename: QuickStart - -- title: 🌱 Test Agent - pages: - - title: test-agent-quickstart - rename: QuickStart - -- title: 🌱 CodeFuse-ModelCache - pages: - - title: CodeFuse-ModelCache-quickstart - rename: QuickStart - - title: CodeFuse-ModelCache-feature - rename: Feature - - title: CodeFuse-ModelCache-config - rename: Config - - title: CodeFuse-ModelCache-release - rename: Release Note - -- title: 🌱 CodeFuse-ChatBot - pages: - - title: CodeFuse-ChatBot-QuickStart - rename: QuickStart - - title: Start-Detail - - title: LLM-Configuration - - title: ChatBot-RoadMap - -- title: 🌱 CodeFuse-DevOps-Model - pages: - - title: CodeFuse-DevOps-Model-Train - rename: TrainDetail - - title: CodeFuse-DevOps-Model-QuickStart - rename: QuickStart - -- title: 🌱 CodeFuse-DevOps-Eval - pages: - - title: Data - - title: CodeFuse-DevOps-Eval-QuickStart - rename: QuickStart - -- title: 🌱 CodeFuse-evalution - pages: - - title: CodeFuse-evalution-quickstart - rename: QuickStart diff --git a/data/en/muagent/sidebar.yml b/data/en/muagent/sidebar.yml deleted file mode 100644 index 4d9819c..0000000 --- a/data/en/muagent/sidebar.yml +++ /dev/null @@ -1,33 +0,0 @@ -- title: ❤️ MuAgent - pages: - - title: MuAgent Overview - - title: Agent Flow - - title: Quick Start - - -- title: Modules - pages: - - title: Connector Agent - rename: Agent Builder - - title: Connector Chain - rename: Chain Builder - - title: Connector Phase - rename: Phase Builder - - title: Connector Prompt - rename: Create Prompt - - title: Connector Memory - rename: Memory Builder - - title: Custom Examples - -- title: llm_models - pages: - - title: LLM Model Config - - title: Embedding Model Config - -- title: Tools - pages: - - title: Custom tool - -- title: Retrieval - pages: - - title: Custom retrieval \ No newline at end of file diff --git a/data/zh/coagent/sidebar.yml b/data/zh/coagent/sidebar.yml deleted file mode 100644 index 53440c1..0000000 --- a/data/zh/coagent/sidebar.yml +++ /dev/null @@ -1,16 +0,0 @@ - -- title: ❤️ CoAgent - pages: - - title: CoAgent 概览 - - title: Agent 编排 - - title: Prompt 管理器 - - title: 快速开始 - -- title: Connector - pages: - - title: Connector Agent ZH - - title: Connector Chain ZH - - title: Connector Phase ZH - - title: Connector Prompt ZH - - title: Connector Memory ZH - - title: Customed Examples ZH \ No newline at end of file diff --git a/data/zh/contribution/sidebar.yml b/data/zh/contribution/sidebar.yml deleted file mode 100644 index a4fdf0e..0000000 --- a/data/zh/contribution/sidebar.yml +++ /dev/null @@ -1,11 +0,0 @@ - -- title: ❤️ 贡献指南 - pages: - - title: 贡献指南 - - title: 如何提交Issue - - title: 如何提交PR - - -- title: ❤️ 致谢 - pages: - - title: 致谢 \ No newline at end of file diff --git a/data/zh/docs/sidebar.yml b/data/zh/docs/sidebar.yml deleted file mode 100644 index 6882b52..0000000 --- a/data/zh/docs/sidebar.yml +++ /dev/null @@ -1,95 +0,0 @@ -- title: 📖 CodeFuse-AI 整体介绍 - pages: - - title: 概览 - -- title: 📖 CodeFuse-AI 模块 - pages: - - title: CodeFuse-Query-zh - rename: CodeFuse-Query - - title: MFTCoder-zh - rename: MFTCoder - - title: CodeFuse-MFT-VLM-zh - rename: CodeFuse-MFT-VLM - - title: Test-Agent-zh - rename: Test-Agent - - title: CodeFuse-ModelCache-zh - rename: CodeFuse-ModelCache - - title: CodeFuse-ChatBot-zh - rename: CodeFuse-ChatBot - - title: CodeFuse-DevOps-Eval-zh - rename: CodeFuse-DevOps-Eval - - title: CodeFuse-DevOps-Model-zh - rename: CodeFuse-DevOps-Model - - title: CodeFuse-evalution-zh - rename: CodeFuse-Evalution - -- title: CodeFuse-Query - pages: - - title: CodeFuse-Query-introduction-zh - rename: 基本介绍 - - title: CodeFuse-Query-quickstart-zh - rename: 快速开始 - - title: codefuse-query-GodelLanguage-zh - rename: 查询语言介绍 - - title: codefuse-query-toolchain-zh - rename: VSCode插件 - - title: codefuse-query-usercase-zh - rename: 用户案例 - -- title: MFTCoder - pages: - - title: mftcoder-introduction-zh - rename: 基本介绍 - - title: mftcoder-quickstart-zh - rename: 快速使用 - - title: mftcoder-accelerate-zh - rename: Accelerate + DeepSpeed/FSDP 框架篇 - - title: mftcoder-atorch-zh - rename: Atorch框架篇 - -- title: CodeFuse-MFT-VLM - pages: - - title: codefuse-mft-vlm-quickstart-zh - rename: 快速使用 - -- title: 🌱 Test Agent - pages: - - title: test-agent-quickstart-zh - rename: 快速开始 - -- title: 🌱 CodeFuse-ModelCache - pages: - - title: CodeFuse-ModelCache-quickstart-zh - rename: 快速开始 - - title: CodeFuse-ModelCache-feature-zh - rename: 功能特性 - - title: CodeFuse-ModelCache-config-zh - rename: 最佳配置 - - title: CodeFuse-ModelCache-release-zh - rename: 版本记录 - -- title: 🌱 CodeFuse-ChatBot - pages: - - title: CodeFuse-ChatBot-quickstart-zh - rename: 快速开始 - - title: 启动明细 - - title: 本地私有化&大模型接口接入 - - title: ChatBot 技术路线 - -- title: 🌱 CodeFuse-DevOps-Model - pages: - - title: CodeFuse-DevOps-Model-Train-zh - rename: 训练解析 - - title: CodeFuse-DevOps-Model-QuickStart-zh - rename: 快速使用 - -- title: 🌱 CodeFuse-DevOps-Eval - pages: - - title: 数据介绍 - - title: CodeFuse-DevOps-Eval-quickstart-zh - rename: 快速开始 - -- title: 🌱 CodeFuse-evalution - pages: - - title: CodeFuse-evalution-quickstart-zh - rename: 快速开始 \ No newline at end of file diff --git a/data/zh/muagent/sidebar.yml b/data/zh/muagent/sidebar.yml deleted file mode 100644 index 25a49c4..0000000 --- a/data/zh/muagent/sidebar.yml +++ /dev/null @@ -1,40 +0,0 @@ - -- title: ❤️ MuAgent - pages: - - title: MuAgent 概览 - rename: MuAgent - - title: Agent 编排 - rename: Agents Flow - - title: 快速开始 - -- title: Connector - pages: - - title: Connector Agent ZH - rename: Agent构建 - - title: Connector Chain ZH - rename: Chain构建 - - title: Connector Phase ZH - rename: Phase构建 - - title: Connector Prompt ZH - rename: Prompt编写 - - title: Connector Memory ZH - rename: Memory构建 - - title: Custom Examples ZH - rename: 自定义示例 - -- title: llm_models - pages: - - title: LLM Model Config ZH - rename: LLM 配置 - - title: Embedding Model Config ZH - rename: Embedding 配置 - -- title: Tools - pages: - - title: Custom tool zh - rename: 自定义 Tool - -- title: Retrieval - pages: - - title: Custom retrieval zh - rename: 自定义 Retrieval \ No newline at end of file diff --git a/docs/aboutDocs/aboutdocs.en-US.md b/docs/aboutDocs/aboutdocs.en-US.md new file mode 100644 index 0000000..931fa78 --- /dev/null +++ b/docs/aboutDocs/aboutdocs.en-US.md @@ -0,0 +1,17 @@ +--- +title: about +nav: + title: About + order: 1 +bannerTitle: https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*i0atTYYpUEMAAAAAAAAAAAAADlHYAQ/original +contentTitle: Qwen Team +# group: +# title: ❤️ Contribution Guide +# index: true +# order: -1 +toc: content +--- + +## Qwen Team + +The Qwen team aims at chasing artificial general intelligence and now focuses on building generalist models, including large language models and large multimodal models. We embrace opensource and previously we have released the Qwen model series, including the language models, e.g., Qwen-7B , Qwen-14B, and Qwen-72B, as well as their chat models, and multimodal models, such as Qwen-VL and Qwen-Audio. Additionally, we have built web service and APP for users to benefit from the assistance of Qwen for your daily work and life. We are a group of people with diverse talents and interests. Feel free to chat with us and welcome to join us! diff --git a/docs/aboutDocs/aboutdocs.zh-CN.md b/docs/aboutDocs/aboutdocs.zh-CN.md new file mode 100644 index 0000000..949c0a6 --- /dev/null +++ b/docs/aboutDocs/aboutdocs.zh-CN.md @@ -0,0 +1,18 @@ +--- +title: about +nav: + title: 关于 + order: 1 +bannerTitle: https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*10l4RpTepNIAAAAAAAAAAAAADlHYAQ/original +contentTitle: Qwen 团队 + +# group: +# title: ❤️ Contribution Guide +# index: true +# order: -1 +toc: content +--- + +## Qwen Team + +Qwen 团队的目标是追逐通用人工智能,目前专注于构建通用模型,包括大型语言模型和大型多模态模型。我们拥抱开源,之前我们已经发布了 Qwen 模型系列,包括语言模型,例如 Qwen-7B、Qwen-14B 和 Qwen-72B,以及它们的聊天模型,以及多模态模型,例如 Qwen-VL 和 Qwen-音频。此外,我们还构建了 Web 服务和 APP,供用户在日常工作和生活中受益于 Qwen 的帮助。我们是一群拥有不同才能和兴趣的人。欢迎与我们聊天,欢迎加入我们! diff --git a/docs/categories/index.xml b/docs/categories/index.xml deleted file mode 100644 index 0cc8c74..0000000 --- a/docs/categories/index.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - Categories on CodeFuse-AI - /categories/ - Recent content in Categories on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - diff --git a/docs/coagent/agent-flow-zh/index.html b/docs/coagent/agent-flow-zh/index.html deleted file mode 100644 index 43e0daf..0000000 --- a/docs/coagent/agent-flow-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - - - - - - diff --git a/docs/coagent/agent-flow/index.html b/docs/coagent/agent-flow/index.html deleted file mode 100644 index 3ea8987..0000000 --- a/docs/coagent/agent-flow/index.html +++ /dev/null @@ -1,401 +0,0 @@ - - - - - - - - -Agent Flow · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Agent Flow

    -
    -
    - - -

    Introduction to Core Connectors

    -

    To facilitate everyone’s understanding of the entire CoAgent link, we use a Flow format to detail how to build through configuration settings.

    -
    - 图片 -
    -


    Below, we will first introduce the related core components

    -

    Agent

    -

    At the design level of the Agent, we provide four basic types of Agents, which allows for the basic role settings of these Agents to meet the interaction and usage of a variety of common scenarios.

    -
      -
    1. BaseAgent: Provides basic question and answer, tool usage, and code execution functions. It implements Input => Output according to the Prompt format.
    2. -
    -
    - 图片 -
    -
      -
    1. ExecutorAgent: Executes tasks in sequence from a task list based on the plan arranged by the User or the previous Agent, completing the related tasks.
    2. -
    3. ReactAgent: Provides standard React functionality, based on the issue to perform the current task.
    4. -
    5. electorAgent: Provides the functionality of choosing an Agent.
    6. -
    -

    It selects the appropriate Agent to respond based on the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which is subsequently managed by the Memory Manager.

    -

    Chain

    -

    Basic Chain: BaseChain, which connects the interaction of agents, completing the management of related messages and memory.

    -

    Phase

    -

    Basic Phase: BasePhase, which connects the interaction of chains, completing the management of related messages and memory.

    -

    Prompt Manager

    -

    Creation of prompts for each agent in a Multi-Agent link:

    -
      -
    • By simply setting prompt_input_keys and prompt_output_keys, one can reuse the preset Prompt Context creation logic, thus achieving rapid configuration of the agent prompt.
    • -
    • The prompt manager module can also be redesigned with new key-context designs to implement a personalized Agent Prompt.
    • -
    -

    Memory Manager

    -

    Mainly used for the management of chat history, which is not yet completed:

    -
      -
    • Manages the reading and writing of chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval.
    • -
    • Summarizes key information from the chat history to form a summary context, which serves as prompt context.
    • -
    • Provides a search function to retrieve information related to the question from the chat history or the summary context, aiding in question and answer sessions.
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/coagent/agent-\347\274\226\346\216\222/index.html" "b/docs/coagent/agent-\347\274\226\346\216\222/index.html" deleted file mode 100644 index 43e0daf..0000000 --- "a/docs/coagent/agent-\347\274\226\346\216\222/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - - - - - - diff --git a/docs/coagent/coagent-overview/index.html b/docs/coagent/coagent-overview/index.html deleted file mode 100644 index ffcfd22..0000000 --- a/docs/coagent/coagent-overview/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /coagent/coagent/ - - - - - - diff --git a/docs/coagent/coagent-zh/index.html b/docs/coagent/coagent-zh/index.html deleted file mode 100644 index d9de663..0000000 --- a/docs/coagent/coagent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - - - - - - diff --git "a/docs/coagent/coagent-\346\246\202\350\247\210/index.html" "b/docs/coagent/coagent-\346\246\202\350\247\210/index.html" deleted file mode 100644 index d9de663..0000000 --- "a/docs/coagent/coagent-\346\246\202\350\247\210/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - - - - - - diff --git a/docs/coagent/coagent/index.html b/docs/coagent/coagent/index.html deleted file mode 100644 index 9f84004..0000000 --- a/docs/coagent/coagent/index.html +++ /dev/null @@ -1,401 +0,0 @@ - - - - - - - - -CoAgent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CoAgent

    -
    -
    - - -

    简介

    -

    To enhance the performance of large language models (LLMs) in terms of inference accuracy, the industry has seen various innovative approaches to utilizing LLMs. From the earliest Chain of Thought (CoT), Text of Thought (ToT), to Graph of Thought (GoT), these methods have continually expanded the capability boundaries of LLMs. In dealing with complex problems, we can use the ReAct process to select, invoke, and execute tool feedback, achieving multi-round tool usage and multi-step execution.

    -

    However, for more complex scenarios, such as the development of intricate code, single-function LLM Agents are clearly insufficient. Thus, the community has begun to develop combinations of multiple Agents, such as projects focused on metaGPT, GPT-Engineer, chatDev in the development domain, and AutoGen projects focused on automating the construction of Agents and Agent dialogue.

    -

    After in-depth analysis of these frameworks, it has been found that most Agent frameworks are highly coupled, with poor usability and extensibility. They achieve specific scenarios in preset environments, but expanding these scenarios is fraught with difficulty.

    -

    Therefore, we aim to build an extensible, user-friendly Multi-Agent framework to support ChatBots in retrieving knowledge base information while assisting with various common tasks such as daily office work, data analysis, and development operations.

    -

    This Multi-Agent framework project incorporates excellent design elements from multiple frameworks, such as the message pool from metaGPT and the agent selector from autogen.

    -
    - 图片 -
    -

    The following modules will introduce the necessary components of the Multi Agent framework from five aspects:

    -
      -
    • -

      Agent Communication: In the Multi-Agent framework, ensuring effective information exchange among Agents is crucial for managing context and improving Q&A efficiency.

      -
        -
      • Follow a straightforward and intuitive chain-based dialogue principle, arranging Agents in a linear fashion to form an execution chain.
      • -
      • Drawing from the Message Pool framework in metaGPT, Agents are allowed to push and subscribe to the Message Pool, making the chain more flexible. This is beneficial for fine-tuning the scenario of Prompt engineering but challenging to manage complex chain relationship analysis.
      • -
      -
    • -
    • -

      Standard Operation Process (SOP): Standardizing the parsing and handling of LLM’s generated results.

      -
        -
      • Define the input and output scope of an Agent, assembling and parsing relevant Actions and Statuses to ensure the stability of the framework.
      • -
      • Encapsulate a variety of fundamental Action execution modules, such as Tool Using, Planning, Coding, Direct Answering, final answer, etc., to meet the basic work requirements of an Agent.
      • -
      -
    • -
    • -

      Plan and Executor: Enhance LLM’s tool usage, Agent scheduling, and code generation. Several basic chains have been set up, for example:

      -
        -
      • a. Single-round Q&A, which can also be expanded to forms like CoT, ToT, GoT, etc.
      • -
      • b. ReAct, a basic response decision-making process where the model sets SOP status to terminate the loop.
      • -
      • c. Task Planning - Executor, where the task is completed and can end.
      • -
      -
    • -
    • -

      Long-short term memory Management: The key difference between Multi-Agent and single Agent is that Multi-Agent needs to handle a large amount of communication information, similar to the process of human teamwork collaboration. Add an Agent specifically responsible for content summarization (similar to a meeting assistant) to summarize long-term memories and provide more effective information to the next Agent, rather than passing all content to the next one.

      -
    • -
    • -

      Human-agent interaction: In the face of complex scenarios, human intervention is required in the Agent interaction process to provide feedback. Through the aforementioned Long-short term memory Management and Agent Communication processes, enable the LLM to accurately understand human intentions, thereby completing tasks more effectively.

      -
    • -
    -

    In summary, these five elements together construct a Multi-Agent framework, ensuring closer and more efficient cooperation between Agents while also adapting to more complex task requirements and a variety of interaction scenarios. By combining multiple Agent chains to implement a complete and complex project launch scenario (Dev Phase), such as Demand Chain (CEO), Product Argument Chain (CPO, CFO, CTO), Engineer Group Chain (Selector, Developer1~N), QA Engineer Chain (Developer, Tester), Deploy Chain (Developer, Deployer).

    -

    模块分类

    -
      -
    • connector
    • -
    • document_loaders
    • -
    • embeddings
    • -
    • llm_models
    • -
    • orm
    • -
    • sandbox
    • -
    • service
    • -
    • text_splitter
    • -
    • tools
    • -
    • utils
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/connector-agent-zh/index.html b/docs/coagent/connector-agent-zh/index.html deleted file mode 100644 index 8f7ff81..0000000 --- a/docs/coagent/connector-agent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/connector-agent-zh/ - - - - - - diff --git a/docs/coagent/connector-agent/index.html b/docs/coagent/connector-agent/index.html deleted file mode 100644 index b18c859..0000000 --- a/docs/coagent/connector-agent/index.html +++ /dev/null @@ -1,601 +0,0 @@ - - - - - - - - -Connector Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Agent

    -
    -
    - - -

    快速构建一个Agent

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.configs import AGETN_CONFIGS
    -from coagent.connector.agents import BaseAgent
    -from coagent.connector.schema import Message, load_role_configs
    -
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    # LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
      -
    • 这里从已有的agent配置选一个role来做示例
    • -
    -
    # 从已有的配置中选择一个config,具体参数细节见下面
    -role_configs = load_role_configs(AGETN_CONFIGS)
    -agent_config = role_configs["general_planner"]
    -# 生成agent实例
    -base_agent = BaseAgent(
    -    role=agent_config.role, 
    -    prompt_config = agent_config.prompt_config,
    -    prompt_manager_type=agent_config.prompt_manager_type,
    -    chat_turn=agent_config.chat_turn,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config,
    -    embed_config=embed_config,
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -    kb_root_path=KB_ROOT_PATH,
    -    ) 
    -# round-1
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message = base_agent.step(query)
    -print(output_message.to_str_content(content_key="parsed_output_list"))
    -

    Agent 参数配置

    -
    # 配置结构在这个目录
    -from coagent.connector.schema import Role, PromptField
    -

    Agent Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    roleRole角色描述
    prompt_configList[PromptField]Enum:PromptManager 也可以继承以上几种Agent然后去构造相关的Agent
    prompt_manager_typeStringEnum:PromptManager 也可以继承以上几种Agent然后去构造自定义的Enum:PromptManager
    focus_agentsList[String]metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name
    focus_message_keysList[String]额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys
    chat_turnint只针对ReactAgent有效
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    -

    Role

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    role_typestr角色类型, Enum: system、user、assistant、function、observation、summary
    role_namestr角色名称
    role_descstr角色描述
    agent_typestr代理类型
    role_promptstr角色提示
    template_promptstr模板提示
    -

    PromptField

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    field_namestr
    function_namestr
    titlestr
    descriptionstr
    is_contextbool
    omit_if_emptybool
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/connector-chain-zh/index.html b/docs/coagent/connector-chain-zh/index.html deleted file mode 100644 index bc1d327..0000000 --- a/docs/coagent/connector-chain-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/connector-chain-zh/ - - - - - - diff --git a/docs/coagent/connector-chain/index.html b/docs/coagent/connector-chain/index.html deleted file mode 100644 index 34dd736..0000000 --- a/docs/coagent/connector-chain/index.html +++ /dev/null @@ -1,523 +0,0 @@ - - - - - - - - -Connector Chain · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Chain

    -
    -
    - - -

    快速构建一个 agent chain

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    # 设置openai的api-key
    -import os, sys
    -import openai
    -import importlib
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xxxx"
    -openai.api_key = "sk-xxxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    # LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
      -
    • 这里从已有的agent配置选多个role组合成 agent chain
    • -
    -
    from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.configs import AGETN_CONFIGS
    -from coagent.connector.chains import BaseChain
    -from coagent.connector.schema import Message, load_role_configs
    -
    -# 构建 agent chain 链路
    -role_configs = load_role_configs(AGETN_CONFIGS)
    -agent_config = role_configs["general_planner"]
    -role1 = role_configs["general_planner"]
    -role2 = role_configs["executor"]
    -agent_module = importlib.import_module("examples.connector.agents")
    -agents = [
    -    getattr(agent_module, role1.role.agent_type)(
    -            role=role1.role, 
    -            prompt_config = role1.prompt_config,
    -            prompt_manager_type=role1.prompt_manager_type,
    -            chat_turn=role1.chat_turn,
    -            focus_agents=role1.focus_agents,
    -            focus_message_keys=role1.focus_message_keys,
    -            llm_config=llm_config,
    -            embed_config=embed_config,
    -            jupyter_work_path=JUPYTER_WORK_PATH,
    -            kb_root_path=KB_ROOT_PATH,
    -        ),
    -    getattr(agent_module, role2.role.agent_type)(
    -            role=role2.role, 
    -            prompt_config = role2.prompt_config,
    -            prompt_manager_type=role2.prompt_manager_type,
    -            chat_turn=role2.chat_turn,
    -            focus_agents=role2.focus_agents,
    -            focus_message_keys=role2.focus_message_keys,
    -            llm_config=llm_config,
    -            embed_config=embed_config,
    -            jupyter_work_path=JUPYTER_WORK_PATH,
    -            kb_root_path=KB_ROOT_PATH,
    -        ),
    -    ]
    -
    -chain = BaseChain(
    -    agents, 
    -    chat_turn=1, 
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -    kb_root_path=KB_ROOT_PATH,
    -    llm_config=llm_config,
    -    embed_config=embed_config,
    -    )
    -
      -
    • 开始执行
    • -
    -
    # round-1
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = chain.step(query)
    -print(output_memory.to_str_messages(content_key="parsed_output_list"))
    -

    Chain 参数配置

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    agentsList[BaseAgent]
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/connector-memory-zh/index.html b/docs/coagent/connector-memory-zh/index.html deleted file mode 100644 index 2c6f23e..0000000 --- a/docs/coagent/connector-memory-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/connector-memory-zh/ - - - - - - diff --git a/docs/coagent/connector-memory/index.html b/docs/coagent/connector-memory/index.html deleted file mode 100644 index b3e96e5..0000000 --- a/docs/coagent/connector-memory/index.html +++ /dev/null @@ -1,488 +0,0 @@ - - - - - - - - -Connector Memory · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Memory

    -
    -
    - - -

    Memory Manager

    -

    主要用于 chat history 的管理,暂未完成

    -
      -
    • 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval
    • -
    • 对 chat history 进行关键信息总结 summary context,作为 prompt context
    • -
    • 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    • -
    -

    使用示例

    -

    创建 memory manager 实例

    -
    import os
    -import openai
    -
    -from coagent.base_configs.env_config import KB_ROOT_PATH
    -from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.schema import Message
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
    -# LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -# 
    -phase_name = "test"
    -memory_manager = LocalMemoryManager(
    -            unique_name=phase_name, 
    -            do_init=True, 
    -            kb_root_path = KB_ROOT_PATH, 
    -            embed_config=embed_config, 
    -            llm_config=llm_config
    -        )
    -

    支持Message管理

    -
    message1 = Message(
    -    role_name="test1", role_type="user", input_query="hello", origin_query="hello",
    -    parsed_output_list=[{"input": "hello"}]
    -)
    -
    -text = "hi! how can I help you?"
    -message2 = Message(
    -    role_name="test2", role_type="assistant", input_query=text, origin_query=text,
    -    role_content=text, step_content=text, parsed_output_list=[{"answer": text}]
    -)
    -
    -text = "they say hello and hi to each other"
    -message3 = Message(
    -    role_name="test3", role_type="summary",
    -    role_content=text, step_content=text,
    -    parsed_output_list=[{"summary": text}]
    -    )
    -

    支持 memory 检索

    -
    # embedding retrieval test
    -text = "say hi, i want some help"
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "datetime"))
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "embedding"))
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "text"))
    -

    支持 memory 总结

    -
    # recursive_summary test
    -print(memory_manager.recursive_summary(local_memory_manager.recall_memory.messages, split_n=1))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/connector-phase-zh/index.html b/docs/coagent/connector-phase-zh/index.html deleted file mode 100644 index c61656b..0000000 --- a/docs/coagent/connector-phase-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/connector-phase-zh/ - - - - - - diff --git a/docs/coagent/connector-phase/index.html b/docs/coagent/connector-phase/index.html deleted file mode 100644 index 9904dd6..0000000 --- a/docs/coagent/connector-phase/index.html +++ /dev/null @@ -1,523 +0,0 @@ - - - - - - - - -Connector Phase · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Phase

    -
    -
    - - -

    快速构建一个 agent phase

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.configs import AGETN_CONFIGS
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.schema import Message, load_role_configs
    -
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    # LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
      -
    • 这里从已有的 phase 配置中选一个 phase 来做示例
    • -
    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "searchChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1
    -query_content1 = "美国当前总统是谁?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content1, input_query=query_content1, origin_query=query_content1,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content2 = "美国上一任总统是谁,两个人有什么关系没?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content2, input_query=query_content2, origin_query=query_content2,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Phase 参数配置

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    phase_nameString场景名称
    phase_configCompletePhaseConfig默认为None,可直接指定完整的phaseconfig, 暂未实现
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    base_phase_configUnion[dict, str]默认配置:PHASE_CONFIGS,可通过实现对这个变量新增来实现自定义配置
    base_chain_configUnion[dict, str]默认配置:CHAIN_CONFIGS,可通过实现对这个变量新增来实现自定义配置
    base_role_configUnion[dict, str]默认配置:AGETN_CONFIGS,可通过实现对这个变量新增来实现自定义配置
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/connector-prompt-zh/index.html b/docs/coagent/connector-prompt-zh/index.html deleted file mode 100644 index f812868..0000000 --- a/docs/coagent/connector-prompt-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/connector-prompt-zh/ - - - - - - diff --git a/docs/coagent/connector-prompt/index.html b/docs/coagent/connector-prompt/index.html deleted file mode 100644 index ef70305..0000000 --- a/docs/coagent/connector-prompt/index.html +++ /dev/null @@ -1,630 +0,0 @@ - - - - - - - - -Connector Prompt · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Prompt

    -
    -
    - - -

    Prompt 的标准结构

    -

    在整个Prompt的整个结构中,我们需要去定义三个部分

    -
      -
    • Agent Profil
    • -
    • Input Format
    • -
    • Response Output Format
    • -
    -
    #### Agent Profile
    -
    -Agent Description ...
    -
    -#### Input Format
    -
    -**Origin Query:** the initial question or objective that the user wanted to achieve
    -
    -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved.
    -
    -#### Response Output Format
    -**Action Status:** finished or continued
    -If it's 'finished', the context can answer the origin query.
    -If it's 'continued', the context cant answer the origin query.
    -
    -**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step.
    -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion.
    -

    其中,我们整合了部分 Input Format 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。如下所示 -只需要定义如下字段和执行函数,

    -
    AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False}
    -]
    -

    未来我们会也会进一步将 Agent Profile和Response Output Format的部分,实现可配置化操作,降低Prompt编写难度

    -

    自定义 Input Format

    -

    同时,我们也支持 用户自定义 Input Format 的操作

    -
    from coagent.connector.prompt_manager import PromptManager
    -
    -# 增加了两个新处理函数,用于prompt组装
    -class CodeRetrievalPM(PromptManager):
    -    def handle_code_packages(self, **kwargs) -> str:
    -        if 'previous_agent_message' not in kwargs:
    -            return ""
    -        previous_agent_message: Message = kwargs['previous_agent_message']
    -        # 由于两个agent共用了同一个manager,所以临时性处理
    -        vertices = previous_agent_message.customed_kargs.get("RelatedVerticesRetrivalRes", {}).get("vertices", [])
    -        return ", ".join([str(v) for v in vertices])
    -
    -    def handle_retrieval_codes(self, **kwargs) -> str:
    -        if 'previous_agent_message' not in kwargs:
    -            return ""
    -        previous_agent_message: Message = kwargs['previous_agent_message']
    -        return '\n'.join(previous_agent_message.customed_kargs["Retrieval_Codes"])
    -
    -
    -# Design your personal PROMPT INPPUT FORMAT 
    -CODE_RETRIEVAL_PROMPT_CONFIGS = [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'reference_documents', "function_name": 'handle_doc_info'},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'retrieval_codes', "function_name": 'handle_retrieval_codes'},
    -    {"field_name": 'code_packages', "function_name": 'handle_code_packages'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False}
    -    ]
    -
    -# 进行注册
    -import importlib
    -prompt_manager_module = importlib.import_module("coagent.connector.prompt_manager")
    -setattr(prompt_manager_module, 'CodeRetrievalPM', CodeRetrievalPM)
    -
    -# 更新配置
    -from coagent.connector.configs import AGETN_CONFIGS
    -AGETN_CONFIGS.update({
    -    "codeRetrievalJudger": {
    -        "role": {
    -            "role_prompt": codeRetrievalJudger_PROMPT,
    -            "role_type": "assistant",
    -            "role_name": "codeRetrievalJudger",
    -            "role_desc": "",
    -            "agent_type": "CodeRetrievalJudger"
    -            # "agent_type": "BaseAgent"
    -        },
    -        "prompt_config": CODE_RETRIEVAL_PROMPT_CONFIGS,
    -        "prompt_manager_type": "CodeRetrievalPM",
    -        "chat_turn": 1,
    -        "focus_agents": [],
    -        "focus_message_keys": [],
    -    },
    -    })
    -

    在我们构建phase、chain或者agent之后,可以通过函数的预打印功能,实现agents链路确认,避免在执行后才发现问题,可提前进行debug

    -
    llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -phase.pre_print(query)
    -
    -## 完整信息确认 coagent.connector.configs中进行确认
    -##########################
    -<<<<baseGroup's prompt>>>>
    -##########################
    -
    -### Agent Profile
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -### Tool Information
    -
    -### Agent Infomation
    -        Please ensure your selection is one of the listed roles. Available roles for selection:
    -        "role name: tool_react
    -role description:  Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.,"
    -"role name: code_react
    -role description:  Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.,"
    -        Please ensure select the Role from agent names, such as tool_react, code_react
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Current Plan
    -
    -### Response Output Format
    -**Thoughts:** think the reason step by step about why you selecte one role
    -**Role:** Select the role from agent names.
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Role:**
    -
    -
    -###########################
    -<<<<tool_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.
    -Please note that all the tools you can use are listed below. You can only choose from these tools for use.
    -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.
    -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.
    -
    -### Tool Information
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Task Records
    -
    -### Response Output Format
    -**Thoughts:** According the previous observations, plan the approach for using the tool effectively.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -###########################
    -<<<<code_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When users need help with coding, your role is to provide precise and effective guidance.
    -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -### Response Output Format
    -
    -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem,
    -outline the plan for executing this step.
    -
    -**Action Status:** Set to 'stopped' or 'code_executing'.
    -If it's 'stopped', the action is to provide the final answer to the session records and executed steps.
    -If it's 'code_executing', the action is to write the code.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/customed-examples-zh/index.html b/docs/coagent/customed-examples-zh/index.html deleted file mode 100644 index c691d6d..0000000 --- a/docs/coagent/customed-examples-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/customed-examples-zh/ - - - - - - diff --git a/docs/coagent/customed-examples/index.html b/docs/coagent/customed-examples/index.html deleted file mode 100644 index b50391a..0000000 --- a/docs/coagent/customed-examples/index.html +++ /dev/null @@ -1,566 +0,0 @@ - - - - - - - - -Customed Examples · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Customed Examples

    -
    -
    - - -

    如何创建你个性化的 agent phase 场景

    -

    下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建

    -

    设计你的prompt结构

    -
    import os, sys, requests
    -
    -# from configs.model_config import *
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.chains import BaseChain
    -from coagent.connector.schema import Message
    -from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS
    -import importlib
    -
    -
    -# update new agent configs
    -auto_feedback_from_code_execution_PROMPT = """#### Agent Profile
    -
    -You are a helpful AI assistant. Solve tasks using your coding and language skills.
    -In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
    -    1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
    -    2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
    -Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
    -When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
    -If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
    -When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
    -Reply "stopped" in the end when everything is done.
    -
    -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.
    -
    -#### Response Output Format
    -
    -**Thoughts:** Based on the question and observations above, provide the plan for executing this step.
    -
    -**Action Status:** Set to 'stopped' or 'code_executing'. If it's 'stopped', the action is to provide the final answer to the original question. If it's 'code_executing', the action is to write the code.
    -
    -**Action:** 
    -# Write your code here
    -import os
    -...
    -
    -
    -**Observation:** Check the results and effects of the executed code.
    -
    -... (Repeat this Thoughts/Action/Observation cycle as needed)
    -
    -**Thoughts:** I now know the final answer
    -
    -**Action Status:** stopped
    -
    -**Action:** The final answer to the original input question
    -"""
    -

    开始配置 Prompt Configs

    -
    AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False}
    -]
    -

    更新完整的agent、chain、phase配置,以便后续更读取执行

    -
    from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS
    -import os
    -
    -## set a 
    -AGETN_CONFIGS.update({
    -    "auto_feedback_from_code_execution": {
    -        "role": {
    -            "role_prompt": auto_feedback_from_code_execution_PROMPT,
    -            "role_type": "assistant",
    -            "role_name": "auto_feedback_from_code_execution",
    -            "role_desc": "",
    -            "agent_type": "ReactAgent"
    -        },
    -        "prompt_config": AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS,
    -        "chat_turn": 5,
    -        "stop": "\n**Observation:**",
    -        "focus_agents": [],
    -        "focus_message_keys": [],
    -    },
    -})
    -# update new chain configs
    -CHAIN_CONFIGS.update({
    -    "auto_feedback_from_code_executionChain": {
    -        "chain_name": "auto_feedback_from_code_executionChain",
    -        "chain_type": "BaseChain",
    -        "agents": ["auto_feedback_from_code_execution"],
    -        "chat_turn": 1,
    -        "do_checker": False,
    -        "chain_prompt": ""
    -    }
    -})
    -
    -# update phase configs
    -PHASE_CONFIGS.update({
    -    "auto_feedback_from_code_executionPhase": {
    -        "phase_name": "auto_feedback_from_code_executionPhase",
    -        "phase_type": "BasePhase",
    -        "chains": ["auto_feedback_from_code_executionChain"],
    -        "do_summary": False,
    -        "do_search": False,
    -        "do_doc_retrieval": False,
    -        "do_code_retrieval": False,
    -        "do_tool_retrieval": False,
    -        "do_using_tool": False
    -    },
    -})
    -

    接下来就构建 phase 实例,开始执行

    -
    from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.schema import Message
    -import base64, openai
    -
    -#
    -os.environ["API_BASE_URL"] = "http://openai.com/v1/chat/completions"
    -os.environ["OPENAI_API_KEY"] = "sk-xxxx"
    -openai.api_key = "sk-xxxx"
    -
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# 
    -phase_name = "auto_feedback_from_code_executionPhase"
    -phase = BasePhase(
    -    phase_name,
    -    embed_config=embed_config, llm_config=llm_config, 
    -    base_phase_config = PHASE_CONFIGS,
    -    base_chain_config = CHAIN_CONFIGS,
    -    base_role_config = AGETN_CONFIGS,
    -)
    -
    -
    -# round-1
    -query_content = """Plot a chart of META and TESLA's stock prices for the past year and save it as stock_price_ytd.png."""
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/coagent/index.html b/docs/coagent/index.html deleted file mode 100644 index ffcfd22..0000000 --- a/docs/coagent/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /coagent/coagent/ - - - - - - diff --git a/docs/coagent/index.xml b/docs/coagent/index.xml deleted file mode 100644 index b3262f9..0000000 --- a/docs/coagent/index.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - Coagents on CodeFuse-AI - /coagent/ - Recent content in Coagents on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - Agent Flow - /coagent/agent-flow/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/agent-flow/ - Introduction to Core Connectors To facilitate everyone&rsquo;s understanding of the entire CoAgent link, we use a Flow format to detail how to build through configuration settings. Below, we will first introduce the related core components Agent At the design level of the Agent, we provide four basic types of Agents, which allows for the basic role settings of these Agents to meet the interaction and usage of a variety of common scenarios. - - - CoAgent - /coagent/coagent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/coagent/ - 简介 To enhance the performance of large language models (LLMs) in terms of inference accuracy, the industry has seen various innovative approaches to utilizing LLMs. From the earliest Chain of Thought (CoT), Text of Thought (ToT), to Graph of Thought (GoT), these methods have continually expanded the capability boundaries of LLMs. In dealing with complex problems, we can use the ReAct process to select, invoke, and execute tool feedback, achieving multi-round tool usage and multi-step execution. - - - Connector Agent - /coagent/connector-agent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-agent/ - 快速构建一个Agent 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.agents import BaseAgent from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选一个role来做示例 # 从已有的配置中选择一个config,具体参数细节见下面 role_configs = load_role_configs(AGETN_CONFIGS) agent_config = role_configs[&#34;general_planner&#34;] # 生成agent实例 base_agent = BaseAgent( role=agent_config. - - - Connector Chain - /coagent/connector-chain/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-chain/ - 快速构建一个 agent chain 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) # 设置openai的api-key import os, sys import openai import importlib os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxxx&#34; openai.api_key = &#34;sk-xxxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选多个role组合成 agent chain from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent. - - - Connector Memory - /coagent/connector-memory/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-memory/ - Memory Manager 主要用于 chat history 的管理,暂未完成 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 使用示例 创建 memory manager 实例 import os import openai from coagent.base_configs.env_config import KB_ROOT_PATH from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.schema import Message os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os. - - - Connector Phase - /coagent/connector-phase/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-phase/ - 快速构建一个 agent phase 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.phase import BasePhase from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的 phase 配置中选一个 phase 来做示例 # log-level,print prompt和llm predict os. - - - Connector Prompt - /coagent/connector-prompt/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-prompt/ - Prompt 的标准结构 在整个Prompt的整个结构中,我们需要去定义三个部分 Agent Profil Input Format Response Output Format #### Agent Profile Agent Description ... #### Input Format **Origin Query:** the initial question or objective that the user wanted to achieve **Context:** the current status and history of the tasks to determine if Origin Query has been achieved. #### Response Output Format **Action Status:** finished or continued If it&#39;s &#39;finished&#39;, the context can answer the origin query. If it&#39;s &#39;continued&#39;, the context cant answer the origin query. - - - Customed Examples - /coagent/customed-examples/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/customed-examples/ - 如何创建你个性化的 agent phase 场景 下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建 设计你的prompt结构 import os, sys, requests # from configs.model_config import * from coagent.connector.phase import BasePhase from coagent.connector.chains import BaseChain from coagent.connector.schema import Message from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS import importlib # update new agent configs auto_feedback_from_code_execution_PROMPT = &#34;&#34;&#34;#### Agent Profile You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - - - Prompt Manager - /coagent/prompt-manager/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/prompt-manager/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 Prompt自定义配置 Prompt模块参数 field_name:唯一的字段名称标识,必须提供。 function:指定如何处理输入数据的函数,必须提供。 title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 Prompt配置示例 Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 [ {&#34;field_name&#34;: &#39;agent_profile&#39;, &#34;function_name&#34;: &#39;handle_agent_profile&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;context_placeholder&#39;, &#34;function_name&#34;: &#39;&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;tool_information&#39;,&#34;function_name&#34;: &#39;handle_tool_data&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;reference_documents&#39;, &#34;function_name&#34;: &#39;handle_doc_info&#39;}, {&#34;field_name&#34;: &#39;session_records&#39;, &#34;function_name&#34;: &#39;handle_session_records&#39;}, {&#34;field_name&#34;: &#39;task_records&#39;, &#34;function_name&#34;: &#39;handle_task_records&#39;}, {&#34;field_name&#34;: &#39;output_format&#39;, &#34;function_name&#34;: &#39;handle_output_format&#39;, &#39;title&#39;: &#39;Response Output Format&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;response&#39;, &#34;function_name&#34;: &#39;handle_response&#39;, &#34;title&#34;=&#34;begin! - - - Quick Start - /coagent/quick-start/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/quick-start/ - Quick Start First, set up the LLM configuration import os, sys import openai # llm config os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; Next, configure the LLM settings and vector model from coagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) Finally, choose a pre-existing scenario to execute from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from coagent. - - - diff --git a/docs/coagent/multi-agent-zh/index.html b/docs/coagent/multi-agent-zh/index.html deleted file mode 100644 index d9de663..0000000 --- a/docs/coagent/multi-agent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - - - - - - diff --git a/docs/coagent/multi-agent/index.html b/docs/coagent/multi-agent/index.html deleted file mode 100644 index ffcfd22..0000000 --- a/docs/coagent/multi-agent/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /coagent/coagent/ - - - - - - diff --git a/docs/coagent/prompt-manager-zh/index.html b/docs/coagent/prompt-manager-zh/index.html deleted file mode 100644 index 7291cea..0000000 --- a/docs/coagent/prompt-manager-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - - - - - - diff --git a/docs/coagent/prompt-manager/index.html b/docs/coagent/prompt-manager/index.html deleted file mode 100644 index 5274682..0000000 --- a/docs/coagent/prompt-manager/index.html +++ /dev/null @@ -1,518 +0,0 @@ - - - - - - - - -Prompt Manager · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Prompt Manager

    -
    -
    - - -

    提示管理器(Prompt Manager)

    -

    管理多智能体链路中的prompt创建

    -
      -
    • 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。
    • -
    • 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。
    • -
    -

    Prompt预设模板结构

    -
      -
    • Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。
    • -
    • Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 -
        -
      • Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。
      • -
      • Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。
      • -
      • Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。
      • -
      -
    • -
    • Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。
    • -
    • Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。
    • -
    -

    Prompt自定义配置

    -

    Prompt模块参数

    -
      -
    • field_name:唯一的字段名称标识,必须提供。
    • -
    • function:指定如何处理输入数据的函数,必须提供。
    • -
    • title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。
    • -
    • description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。
    • -
    • is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。
    • -
    • omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。
    • -
    -

    Prompt配置示例

    -

    Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。

    -

    在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。

    -

    context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。

    -
    [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": True},
    -    {"field_name": 'reference_documents', "function_name": 'handle_doc_info'},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'task_records', "function_name": 'handle_task_records'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'response', "function_name": 'handle_response', "title"="begin!!!", "is_context": False, "omit_if_empty": False}
    -]
    -

    未来规划

    -

    Prompt配置简化

    -

    未来的Prompt配置简化旨在降低用户面对复杂配置的难度。通过引入更直观的配置方法,我们计划使得Prompt配置不仅对高级用户友好,还能让初学者轻松上手。简化计划可能包括:

    -
      -
    • 预设配置短语:将复杂的配置字典转换为简洁的短语,每个短语都预定义了一个Prompt模块。用户将能够使用简单的字符串指令来快速配置Prompt,而无需深入了解所有参数。
    • -
    • 配置校验和建议:增加配置的即时校验,如果检测到配置错误或不一致性,自动提供修改建议,帮助用户优化Prompt结构。
    • -
    -

    动作(Action)注册的改进计划

    -

    在现行系统中,智能体必须在其角色提示(role prompt)内定义所有的动作(actions)。这意味着智能体需要同时处理动作的意图识别和生成动作所需的输入数据,这一过程对语言模型的理解和推理能力提出了更高要求。

    -

    为了优化这一流程,我们打算在后续版本中对动作的输入生成和执行进行模块化。这将使智能体的工作重点转移至判断当前情境下应执行哪些动作,而不必负责具体的操作指令。在这种新的架构下,当需要执行某个动作时,将有专门的机制负责生成相应动作的具体输入指令。

    -

    这种分离将显著降低单个模块的复杂性,使得整个系统更加灵活、易于扩展,同时也提升了动作执行的效率和准确性。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/coagent/prompt-\347\256\241\347\220\206\345\231\250/index.html" "b/docs/coagent/prompt-\347\256\241\347\220\206\345\231\250/index.html" deleted file mode 100644 index 7291cea..0000000 --- "a/docs/coagent/prompt-\347\256\241\347\220\206\345\231\250/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - - - - - - diff --git a/docs/coagent/quick-start-zh/index.html b/docs/coagent/quick-start-zh/index.html deleted file mode 100644 index 2dd5b1c..0000000 --- a/docs/coagent/quick-start-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - - - - - - diff --git a/docs/coagent/quick-start/index.html b/docs/coagent/quick-start/index.html deleted file mode 100644 index 4f34122..0000000 --- a/docs/coagent/quick-start/index.html +++ /dev/null @@ -1,711 +0,0 @@ - - - - - - - - -Quick Start · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Quick Start

    -
    -
    - - -

    Quick Start

    -

    First, set up the LLM configuration

    -
    import os, sys
    -import openai
    -
    -# llm config
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -

    Next, configure the LLM settings and vector model

    -
    from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -

    Finally, choose a pre-existing scenario to execute

    -
    from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.schema import Message
    -
    -# Copy the data to a working directory; specify the directory if needed (default can also be used)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# Choose a scenario to execute
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1: Use a code interpreter to complete tasks
    -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart"
    -query = Message(
    -    role_name="human", role_type="user", tools=[],
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -# phase.pre_print(query)  # This function is used to preview the Prompt of the Agents' execution chain
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2: Execute tools
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -query_content = "Please check if there were any issues with the server at 127.0.0.1 at 10 o'clock; help me make a judgment"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools,
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -# phase.pre_print(query)  # This function is used to preview the Prompt of the Agents' execution chain
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Phase Introduction and Usage

    -

    Below are some specific Phase introduced and how to use them.

    -

    Feel free to brainstorm and create some interesting cases.

    -

    baseGroupPhase

    -

    The group usage Phase in autogen

    -
    # Copy the data to a working directory; specify the directory if needed (default can also be used)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# Set the log level to control the printing of the prompt, LLM output, or other information
    -os.environ["log_verbose"] = "0"
    -
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1
    -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart"
    -
    -query = Message(
    -    role_name="human", role_type="user", tools=[],
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -# phase.pre_print(query) # This function is used to preview the Prompt of the Agents' execution chain
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    baseTaskPhase

    -

    The task splitting and multi-step execution scenario in xAgents

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "baseTaskPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -# round-1
    -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeReactPhase

    -

    The code interpreter scenario based on React

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# then, create a data analyze phase
    -phase_name = "codeReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -)
    -
    -# round-1
    -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeToolReactPhase

    -

    The tool invocation and code interpreter scenario based on the React template

    -
    TOOL_SETS = [
    -     "StockName", "StockInfo", 
    -    ]
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "codeToolReactPhase"
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析"
    -
    -query = Message(
    -  role_name="human", role_type="user", 
    -  input_query=query_content, role_content=query_content, 
    -  origin_query=query_content, tools=tools
    -  )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    docChatPhase

    -

    The knowledge base retrieval Q&A Phase

    -
    # create your knowledge base
    -from io import BytesIO
    -from pathlib import Path
    -
    -from coagent.service.kb_api import create_kb, upload_doc
    -from coagent.service.service_factory import get_kb_details
    -from coagent.utils.server_utils import run_async
    -kb_list = {x["kb_name"]: x for x in get_kb_details(KB_ROOT_PATH)}
    -
    -
    -# create a knowledge base
    -kb_name = "example_test"
    -data = {
    -    "knowledge_base_name": kb_name,
    -    "vector_store_type": "faiss", # default
    -    "kb_root_path": KB_ROOT_PATH, 
    -    "embed_model": embed_config.embed_model,
    -    "embed_engine": embed_config.embed_engine, 
    -    "embed_model_path": embed_config.embed_model_path,
    -    "model_device": embed_config.model_device,
    -}
    -run_async(create_kb(**data))
    -
    -# add doc to knowledge base
    -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl")
    -files = [file]
    -# if embedding init failed, you can use override = True
    -data = [{"override": True, "file": f, 
    -         "knowledge_base_name": kb_name, "not_refresh_vs_cache": False,
    -         "kb_root_path": KB_ROOT_PATH, "embed_model": embed_config.embed_model,
    -         "embed_engine": embed_config.embed_engine, "embed_model_path": embed_config.embed_model_path,
    -         "model_device": embed_config.model_device,
    -         } 
    -         for f in files]
    -
    -for k in data:
    -    file = Path(file).absolute().open("rb")
    -    filename = file.name
    -
    -    from fastapi import UploadFile
    -    from tempfile import SpooledTemporaryFile
    -
    -    temp_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024)
    -    temp_file.write(file.read())
    -    temp_file.seek(0)
    -    
    -    k.update({"file": UploadFile(file=temp_file, filename=filename),})
    -    run_async(upload_doc(**k))
    -
    -
    -# start to chat with knowledge base
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -# round-1
    -query_content = "what modules does langchain have?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    origin_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content = "What is the purpose of prompts?"
    -query = Message(
    -    role_name="human", role_type="user",
    -    origin_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    metagpt_code_devlop

    -

    The code construction Phase in metagpt

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "metagpt_code_devlop"
    -llm_config = LLMConfig(
    -    model_name="gpt-4", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -
    -query_content = "create a snake game by pygame"
    -query = Message(role_name="human", role_type="user", input_query=query_content, role_content=query_content, origin_query=query_content)
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    searchChatPhase

    -

    The fixed Phase: search first, then answer directly with LLM

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "searchChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1
    -query_content1 = "who is the president of the United States?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content1, input_query=query_content1, origin_query=query_content1,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content2 = "Who was the previous president of the United States, and is there any relationship between the two individuals?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content2, input_query=query_content2, origin_query=query_content2,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    toolReactPhase

    -

    The tool invocation scene based on the React template

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "toolReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -
    -# round-1
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -query_content = "Please check if there were any issues with the server at 127.0.0.1 at 10 o'clock; help me make a judgment"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools,
    -    role_content=query_content, input_query=query_content, origin_query=query_content
    -    )
    -
    -# phase.pre_print(query)  # This function is used to preview the Prompt of the Agents' execution chain
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/coagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/coagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" deleted file mode 100644 index 2dd5b1c..0000000 --- "a/docs/coagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - - - - - - diff --git a/docs/contribution/acknowledgements/index.html b/docs/contribution/acknowledgements/index.html deleted file mode 100644 index 327e43c..0000000 --- a/docs/contribution/acknowledgements/index.html +++ /dev/null @@ -1,313 +0,0 @@ - - - - - - - - -Acknowledgements · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Acknowledgements

    -
    -
    - - -

    The documentation homepage of CodeFuse-ai is built on docura

    -

    The ChatBot project is based on langchain-chatchat and codebox-api.

    -

    ……

    -

    Deep gratitude is extended for their open-source contributions!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/contribution/contribution-guide-zh/index.html b/docs/contribution/contribution-guide-zh/index.html deleted file mode 100644 index a85338a..0000000 --- a/docs/contribution/contribution-guide-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - - - - - - diff --git a/docs/contribution/contribution-guide/index.html b/docs/contribution/contribution-guide/index.html deleted file mode 100644 index a3ba9dc..0000000 --- a/docs/contribution/contribution-guide/index.html +++ /dev/null @@ -1,338 +0,0 @@ - - - - - - - - -Contribution Guide · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Contribution Guide

    -
    -
    - - -

    - 中文  |  English  -

    -

    Thank you for your interest in the Codefuse project. We warmly welcome any suggestions, opinions (including criticisms), comments, and contributions to the Codefuse project.

    -

    Your suggestions, opinions, and comments on Codefuse can be directly submitted through GitHub Issues.

    -

    There are many ways to participate in the Codefuse project and contribute to it: code implementation, test writing, process tool improvement, documentation enhancement, and more. We welcome any contributions and will add you to our list of contributors.

    -

    Furthermore, with enough contributions, you may have the opportunity to become a Committer for Codefuse.

    -

    For any questions, you can contact us for timely answers through various means including WeChat, Gitter (an instant messaging tool provided by GitHub), email, and more.

    -

    Getting Started

    -

    If you are new to the Codefuse community, you can:

    -
      -
    • Follow the Codefuse GitHub repository.
    • -
    • Join related WeChat groups for Codefuse to ask questions at any time;
    • -
    -

    Through the above methods, you can stay up-to-date with the development dynamics of the Codefuse project and express your opinions on topics of interest.

    -

    Contributation Ways

    -

    This contribution guide is not just about writing code. We value and appreciate help in all areas. Here are some ways you can contribute:

    -
      -
    • Documentation
    • -
    • Issues
    • -
    • Pull Requests (PR)
    • -
    -

    Improve Documentation

    -

    Documentation is the main way for you to understand Codefuse and is also where we need the most help!

    -

    By browsing the documentation, you can deepen your understanding of Codefuse and also help you grasp the features and technical details of Codefuse. If you find any issues with the documentation, please contact us in time;

    -

    If you are interested in improving the quality of the documentation, whether it is revising an address of a page, correcting a link, or writing a better introductory document, we are very welcoming!

    -

    Most of our documentation is written in markdown format. You can directly modify and submit documentation changes in the docs/ directory on GitHub. For submitting code changes, please refer to Pull Requests.

    -

    If You Discover a Bug or Issue

    -

    If you discover a bug or issue, you can directly submit a new Issue through GitHub Issues, and someone will handle it regularly. For more details, see Issue Template.Issue Template

    -

    You can also choose to read and analyze the code to fix it yourself (it is best to communicate with us before doing so, as someone might already be working on the same issue), and then submit a Pull Request.

    -

    Modify Code and Submit a PR (Pull Request)

    -

    You can download the code, compile, install, and deploy to try it out (you can refer to the compilation documentation to see if it works as you expected). If there are any issues, you can directly contact us, submit an Issue, or fix it yourself by reading and analyzing the source code. For more details, seeHow to Submit a PR.

    -

    Whether it’s fixing a bug or adding a feature, we warmly welcome it. If you wish to submit code to Doris, you need to fork the code repository to your project space on GitHub, create a new branch for your submitted code, add the original project as an upstream, and submit a PR. The method for submitting a PR can be referenced in the Pull Request documentation.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/contribution/contribution.en-US.md b/docs/contribution/contribution.en-US.md new file mode 100644 index 0000000..c07ac2f --- /dev/null +++ b/docs/contribution/contribution.en-US.md @@ -0,0 +1,33 @@ +--- +title: Publication +nav: + title: Publication + order: 2 +bannerTitle: https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*r31CSbR3uFUAAAAAAAAAAAAADlHYAQ/original +contentTitle: Selected Publications +list: + - Qwen Team. (2023). Qwen Technical Report. arXiv. + - Bai, J., Bai S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C. & Zhou., J. (2023). Qwen-VL:A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond. arXiv. + - Wang, P., Yang, A., Men, R., Lin, J., Bai, S., Li, Z., Ma, J., Zhou, C., Zhou, J., & Yang, H. (2022). Unifying Architectures, Tasks, and Modalities Through a Simple Sequence-to-Sequence Learning Framework. ICML. + - Bai, J., Men, R., Yang, H., Ren, X., Dang, K.E., Zhang, Y., Zhou, X., Wang, P., Tan, S., Yang, A., Cui, Z., Han, Y., Bai, S., Ge, W., Ma, J., Lin, J., Zhou, J., & Zhou, C. (2022). OFASys:A Multi-Modal Multi-Task Learning System for Building Generalist Models. arXiv, abs/2212.04408. + - Lin, J., Men, R., Yang, A., Zhou, C., Zhang, Y., Wang, P., Zhou, J., Tang, J., & Yang, H. (2021). M6:Multi-Modality-to-Multi-Modality Multitask Mega-transformer for Unified Pretraining. KDD. + - Yang, A., Pan, J., Lin, J., Men, R., Zhang, Y., Zhou, J., & Zhou, C. (2022). Chinese CLIP:Contrastive Vision-Language Pretraining in Chinese. arXiv, abs/2211.01335. + - Ma, J., Bai, S., & Zhou, C. (2022). Pretrained Diffusion Models for Unified Human Motion Synthesis. arXiv, abs/2212.02837. + - Yang, H., Lin, J., Yang, A., Wang, P., Zhou, C., & Yang, H. (2022). Prompt Tuning for Generative Multimodal Pretrained Models. arXiv, abs/2208.02532. + - Zhou, X., Wang, J., Cui, Z., Zhang, S., Yan, Z., Zhou, J., & Zhou, C. (2022). MMSpeech:Multi-modal Multi-task Encoder-Decoder Pre-training for Speech Recognition. arXiv, abs/2212.00500. + - Huang, Y., Lin, J., Zhou, C., Yang, H., & Huang, L. (2022). Modality Competition:What Makes Joint Training of Multi-modal Network Fail in Deep Learning? (Provably). ICML. + - Bai, S., Zhou, H., Li, Z., Zhou, C., & Yang, H. (2022). Single Stage Virtual Try-on via Deformable Attention Flows. ECCV. + - Cui, Z., Ma, J., Zhou, C., Zhou, J., Yang, H. (2022). M6-Rec:Generative Pretrained Language Models are Open-Ended Recommender Systems. arXiv, abs/2205.08084. + - Zhang, Z., Ma, J., Zhou, C., Men, R., Li, Z., Ding, M., Tang, J., Zhou, J., & Yang, H. (2021). UFC-BERT:Unifying Multi-Modal Controls for Conditional Image Synthesis. NeurIPS. + - Lin, J., Yang, A., Bai, J., Zhou, C., Jiang, L., Jia, X., Wang, A., Zhang, J., Li, Y., Lin, W., Zhou, J., & Yang, H. (2021). M6-10T:A Sharing-Delinking Paradigm for Efficient Multi-Trillion Parameter Pretraining. arXiv, abs/2110.03888. + - Yang, A., Lin, J., Men, R., Zhou, C., Jiang, L., Jia, X., Wang, A., Zhang, J., Wang, J., Li, Y., Zhang, D., Lin, W., Lin, Q., Zhou, J., & Yang, H. (2021). M6-T:Exploring sparse expert models and beyond. arXiv, abs/2105.15082. + - Ding, M., Yang, Z., Hong, W., Zheng, W., Zhou, C., Yin, D., Lin, J., Zou, X., Shao, Z., Yang, H., & Tang, J. (2021). CogView:Mastering Text-to-Image Generation via Transformers. NeurIPS. + - Ren, S., Lin, J., Zhao, G., Men, R., Yang, A., Zhou, J., Sun, X., & Yang, H. (2021). Learning Relation Alignment for Calibrated Cross-modal Retrieval. ACL-IJCNLP. + - Wang, P., Lin, J., Yang, A., Zhou, C., Zhang, Y., Zhou, J., & Yang, H. (2021). Sketch and Refine:Towards Faithful and Informative Table-to-Text Generation. Findings of ACL-IJCNLP. + - Lin, J., Yang, A., Zhang, Y., Liu, J., Zhou, J., & Yang, H. (2020). InterBERT:Vision-and-Language Interaction for Multi-modal Pretraining. arXiv, abs/2003.13198. + - Zhang, Z., Zhou, C., Ma, J., Lin, Z., Zhou, J., Yang, H., & Zhao, Z. (2021). Learning to Rehearse in Long Sequence Memorization. ICML. + - Zhou, C., Ma, J., Zhang, J., Zhou, J., & Yang, H. (2021). Contrastive learning for debiased candidate generation in large-scale recommender systems. KDD. + - Ma, J., Zhou, C., Cui, P., Yang, H., & Zhu, W. (2019). Learning Disentangled Representations for Recommendation. NeurIPS. + - Chen, Q., Lin, J., Zhang, Y., Ding, M., Cen, Y., Yang, H., & Tang, J. (2019). Towards Knowledge-Based Recommender Dialog System. EMNLP-IJCNLP. + - Chen, Q., Lin, J., Zhang, Y., Yang, H., Zhou, J., & Tang, J. (2019). Towards Knowledge-Based Personalized Product Description Generation in E-commerce. KDD. +--- diff --git a/docs/contribution/contribution.zh-CN.md b/docs/contribution/contribution.zh-CN.md new file mode 100644 index 0000000..21eba29 --- /dev/null +++ b/docs/contribution/contribution.zh-CN.md @@ -0,0 +1,33 @@ +--- +title: 出版物 +nav: + title: 出版物 + order: 2 +bannerTitle: https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*ApP-TL00Dw8AAAAAAAAAAAAADlHYAQ/original +contentTitle: 精选出版物 +list: + - 奎文队。 (2023)。Qwen 技术报告。 arXiv。 + - 白静、白静、杨静、王静、谭静、王平、林静、周成和周静 (2023)。Qwen-VL:用于理解、本地化、文本阅读等的多功能视觉语言模型。 arXiv。 + - 王平、杨 A.、门 R.、林 J.、白 S.、李 Z.、马 J.、周 C.、周 J.、杨 H. (2022)。通过简单的序列到序列学习框架统一架构、任务和模式。 ICML。 + - 白建、门荣、杨红、任新、党柯、张勇、周新、王平、谭胜、杨安、崔,Z.,韩 Y.,白 S.,葛 W.,马 J.,林 J.,周 J.,&周 C.(2022)。OFASys:用于构建通才模型的多模式多任务学习系统。 arXiv,abs/2212.04408。 + - 林杰、门 R.、杨 A.、周 C.、张 Y.、王 P.、周 J.、唐 J. 和杨 H. (2021)。M6:用于统一预训练的多模态到多模态多任务巨型变压器。 克德。 + - 杨 A.、潘 J.、林 J.、门 R.、张 Y.、周 J. 和周 C. (2022)。中文 CLIP:中文视觉对比语言预训练。 arXiv,abs/2211.01335。 + - 马建、白松、周成 (2022)。用于统一人体运动合成的预训练扩散模型。 arXiv,abs/2212.02837。 + - 杨 H.、林 J.、杨 A.、王 P.、周 C. 和杨 H. (2022)。快速调整生成多模态预训练模型。 arXiv,abs/2208.02532。 + - 周 X.、王 J.、崔 Z.、张 S.、严 Z.、周 J.和周 C. (2022)。MMSpeech:语音识别的多模态多任务编码器-解码器预训练。 arXiv,abs/2212.00500。 + - 黄 Y.、林 J.、周 C.、杨 H. 和黄 L. (2022)。模态竞赛:深度学习多模态网络联合训练失败的原因是什么? (可以证明)。 ICML。 + - 白顺、周红、李志、周成、杨红 (2022)。通过可变形注意力流进行单阶段虚拟试穿。埃克 CV。 + - 崔正,马建,周成,周建,杨红(2022)。M6-Rec:生成预训练语言模型是开放式推荐系统。 arXiv,abs/2205.08084。 + - 张 Z.、马 J.、周 C.、门 R.、李 Z.、丁明、唐 J.、周 J.和杨 H. (2021)。UFC-BERT:统一条件图像合成的多模态控制。 神经信息处理系统。 + - 林杰、杨 A、白 J.、周成、姜立、贾 X、王 A、张 J、李 Y、林 W、周 J.和杨 H.(2021)。M6-10T:用于高效数万亿参数预训练的共享解链范式。 arXiv,abs/2110.03888。 + - 杨 A.,林 J.,门 R.,周成.,江 L.,贾 X.,王 A.,张 J.,王 J.,李 Y.,张德、林文、林清、周静、杨慧 (2021)。M6-T:探索稀疏专家模型及其他模型。 arXiv,abs/2105.15082。 + - 丁明、杨志、洪文、郑文、周成、尹大、林俊、邹新、邵志、杨红、 &唐,J.(2021)。CogView:通过 Transformers 掌握文本到图像的生成。神经信息处理系统。 + - 任 S.、林 J.、赵 G.、门 R.、杨 A.、周 J.、孙 X.和杨 H. (2021)。学习校准跨模态检索的关系对齐。 ACL-IJCNLP。 + - 王平、林静、杨安、周成、张勇、周静、杨慧 (2021)。草图和细化:实现忠实且信息丰富的表格到文本的生成。 ACL-IJCNLP 的研究结果。 + - 林静、杨安、张云、刘静、周静、杨慧 (2020)。InterBERT:多模式预训练的视觉和语言交互。 arXiv,abs/2003.13198。 + - 张 Z.、周 C.、马 J.、林 Z.、周 J.、杨 H.和赵 Z. (2021)。学习排练长序列记忆。 ICML。 + - 周成、马建、张建、周建和杨红 (2021)。大规模推荐系统中无偏候选生成的对比学习。 克德。 + - 马建、周成、崔平、杨红、朱文 (2019)。学习用于推荐的解缠结表示。 神经信息处理系统。 + - 陈 Q.、林 J.、张 Y.、丁明、岑 Y.、杨 H.、唐 J. (2019)。走向基于知识的推荐对话系统。 EMNLP-IJCNLP。 + - 陈 Q.、林 J.、张 Y.、杨 H.、周 J.和唐 J. (2019)。电子商务中基于知识的个性化产品描述生成。 克德。 +--- diff --git a/docs/contribution/index.html b/docs/contribution/index.html deleted file mode 100644 index 8a287e3..0000000 --- a/docs/contribution/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /contribution/contribution-guide/ - - - - - - diff --git a/docs/contribution/index.xml b/docs/contribution/index.xml deleted file mode 100644 index 4580e99..0000000 --- a/docs/contribution/index.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - Contributions on CodeFuse-AI - /contribution/ - Recent content in Contributions on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - Acknowledgements - /contribution/acknowledgements/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/acknowledgements/ - The documentation homepage of CodeFuse-ai is built on docura The ChatBot project is based on langchain-chatchat and codebox-api. &hellip;&hellip; Deep gratitude is extended for their open-source contributions! - - - Contribution Guide - /contribution/contribution-guide/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/contribution-guide/ - 中文&nbsp | &nbspEnglish&nbsp Thank you for your interest in the Codefuse project. We warmly welcome any suggestions, opinions (including criticisms), comments, and contributions to the Codefuse project. Your suggestions, opinions, and comments on Codefuse can be directly submitted through GitHub Issues. There are many ways to participate in the Codefuse project and contribute to it: code implementation, test writing, process tool improvement, documentation enhancement, and more. We welcome any contributions and will add you to our list of contributors. - - - Issue Report - /contribution/issue-report/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/issue-report/ - 中文&nbsp | &nbspEnglish&nbsp Issue Type Issues can be categorized into three types: Bug: Issues where code or execution examples contain bugs or lack dependencies, resulting in incorrect execution. Documentation: Discrepancies in documentation, inconsistencies between documentation content and code, etc. Feature: New functionalities that evolve from the current codebase. Issue Template Issue: Bug Template Checklist before submitting an issue Please confirm that you have checked the document, issues, discussions (GitHub feature), and other publicly available documentation. - - - Pull Request - /contribution/pull-request/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/pull-request/ - 中文&nbsp | &nbspEnglish&nbsp Contribution Pre-Checklist First, confirm whether you have checked the document, issue, discussion (GitHub features), or other publicly available documentation. Find the GitHub issue you want to address. If none exists, create an issue or draft PR and ask a Maintainer for a check Check for related, similar, or duplicate pull requests Create a draft pull request Complete the PR template for the description Link any GitHub issue(s) that are resolved by your PR Description A description of the PR should be articulated in concise language, highlighting the work completed by the PR. - - - diff --git a/docs/contribution/issue-report-zh/index.html b/docs/contribution/issue-report-zh/index.html deleted file mode 100644 index 11e9e78..0000000 --- a/docs/contribution/issue-report-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - - - - - - diff --git a/docs/contribution/issue-report/index.html b/docs/contribution/issue-report/index.html deleted file mode 100644 index be818a4..0000000 --- a/docs/contribution/issue-report/index.html +++ /dev/null @@ -1,373 +0,0 @@ - - - - - - - - -Issue Report · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Issue Report

    -
    -
    - - -

    - 中文  |  English  -

    -

    Issue Type

    -

    Issues can be categorized into three types:

    -
      -
    • Bug: Issues where code or execution examples contain bugs or lack dependencies, resulting in incorrect execution.
    • -
    • Documentation: Discrepancies in documentation, inconsistencies between documentation content and code, etc.
    • -
    • Feature: New functionalities that evolve from the current codebase.
    • -
    -

    Issue Template

    -

    Issue: Bug Template

    -

    Checklist before submitting an issue -
    Please confirm that you have checked the document, issues, discussions (GitHub feature), and other publicly available documentation.

    -
      -
    • I have searched through all documentation related to Codefuse.
    • -
    • I used GitHub search to find a similar issue, but did not find one.
    • -
    • I have added a very descriptive title for this issue.
    • -
    -

    System Information -
    Please confirm your operating system, such as mac-xx, windows-xx, linux-xx.

    -

    Code Version -
    Please confirm the code version or branch, such as master, release, etc.

    -

    Problem Description -
    Describe the problem you encountered, what you want to achieve, or the bug encountered during code execution.

    -

    Code Example -
    Attach your execution code and relevant configuration to facilitate rapid intervention and reproduction.

    -

    Error Information, Logs -
    The error logs and related information after executing the above code example.

    -

    Related Dependencies -
    Taking the chatbot project as an example:

    -
      -
    • connector
    • -
    • codechat
    • -
    • sandbox
    • -
    • -
    -

    Issue: Documentation Template

    -

    Issue with current documentation: -
    Please point out any problems, typos, or confusing points in the current documentation.

    -

    Idea or request for content -
    What do you think would be a reasonable way to express the documentation?

    -

    Issue: Feature Template

    -

    Checklist before submitting an issue -
    Please confirm that you have checked the document, issues, discussions (GitHub feature), and other publicly available documentation.

    -
      -
    • I have searched through all documentation related to Codefuse.
    • -
    • I used GitHub Issue search to find a similar issue, but did not find one.
    • -
    • I have added a very descriptive title for this issue.
    • -
    -

    Feature Description -
    Describe the purpose of this feature.

    -

    Related Examples -
    Provide references to documents, repositories, etc., Please provide links to any relevant GitHub repos, papers, or other resources if relevant.

    -

    Motivation -
    Describe the motivation for this feature. Why is it needed? Provide enough context information to help understand the demand for this feature.

    -

    Contribution -
    How you can contribute to the building of this feature (if you are participating).

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/contribution/pull-request-zh/index.html b/docs/contribution/pull-request-zh/index.html deleted file mode 100644 index 0efa961..0000000 --- a/docs/contribution/pull-request-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - - - - - - diff --git a/docs/contribution/pull-request/index.html b/docs/contribution/pull-request/index.html deleted file mode 100644 index 591906a..0000000 --- a/docs/contribution/pull-request/index.html +++ /dev/null @@ -1,391 +0,0 @@ - - - - - - - - -Pull Request · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Pull Request

    -
    -
    - - -

    - 中文  |  English  -

    -

    Contribution

    -

    Pre-Checklist

    -
      -
    • First, confirm whether you have checked the document, issue, discussion (GitHub features), or other publicly available documentation.
    • -
    • Find the GitHub issue you want to address. If none exists, create an issue or draft PR and ask a Maintainer for a check
    • -
    • Check for related, similar, or duplicate pull requests
    • -
    • Create a draft pull request
    • -
    • Complete the PR template for the description
    • -
    • Link any GitHub issue(s) that are resolved by your PR
    • -
    -

    Description

    -

    A description of the PR should be articulated in concise language, highlighting the work completed by the PR. See specific standards atCommit Format Specification

    - -

    #xx if has

    -

    Test Code with Result

    -

    Please provide relevant test code when necessary.

    -

    Commit Format Specification

    -

    A commit consists of a “title” and a “body.” The title should generally be in lowercase, while the first letter of the body should be uppercase.

    -

    Title

    -

    The title of the commit message: [<type>](<scope>) <subject> (#pr)

    -

    Type - Available Options

    -

    本次提交的类型,限定在以下类型(全小写)

    -
      -
    • fix: Bug fixes
    • -
    • feature: New features
    • -
    • feature-wip: Features that are currently in development, such as partial code for a function.
    • -
    • improvement: Optimizations and improvements to existing features
    • -
    • style: Adjustments to code style
    • -
    • typo: Typographical errors in code or documentation
    • -
    • refactor: Code refactoring (without changing functionality)
    • -
    • performance/optimize: Performance optimization
    • -
    • test: Addition or fix of unit tests
    • -
    • deps: Modifications to third-party dependencies
    • -
    • community: Community-related changes, such as modifying Github Issue templates, etc.
    • -
    -

    Please note:

    -

    If multiple types occur in one commit, add multiple types.

    -

    If code refactoring leads to performance improvement, both [refactor][optimize] can be added.

    -

    Other types not listed above should not appear. If necessary, new types must be added to this document.

    -

    Scope - Available Options

    -

    The scope of the modules involved in the current submission. Due to the multitude of functional modules, only a few are listed here, and this list will be updated continuously based on needs.

    -

    For example, using a chatbot framework: -connector -codechat -sandbox -…

    -

    Please note:

    -

    Try to use options that are already listed. If you need to add new ones, please update this document promptly.

    -

    Subject Content

    -

    The title should clearly indicate the main content of the current submission.

    -

    For Example -[feature](coagent)<增加antflow兼容和增加coagent demo>

    -

    Example

    -

    comming soon

    -

    Reference

    -

    doris-commit-format

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/contribution/zh-acknowledgements/index.html b/docs/contribution/zh-acknowledgements/index.html deleted file mode 100644 index e24329b..0000000 --- a/docs/contribution/zh-acknowledgements/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E8%87%B4%E8%B0%A2/ - - - - - - diff --git "a/docs/contribution/\345\246\202\344\275\225\346\217\220\344\272\244issue/index.html" "b/docs/contribution/\345\246\202\344\275\225\346\217\220\344\272\244issue/index.html" deleted file mode 100644 index 11e9e78..0000000 --- "a/docs/contribution/\345\246\202\344\275\225\346\217\220\344\272\244issue/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - - - - - - diff --git "a/docs/contribution/\345\246\202\344\275\225\346\217\220\344\272\244pr/index.html" "b/docs/contribution/\345\246\202\344\275\225\346\217\220\344\272\244pr/index.html" deleted file mode 100644 index 0efa961..0000000 --- "a/docs/contribution/\345\246\202\344\275\225\346\217\220\344\272\244pr/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - - - - - - diff --git "a/docs/contribution/\350\207\264\350\260\242/index.html" "b/docs/contribution/\350\207\264\350\260\242/index.html" deleted file mode 100644 index e24329b..0000000 --- "a/docs/contribution/\350\207\264\350\260\242/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E8%87%B4%E8%B0%A2/ - - - - - - diff --git "a/docs/contribution/\350\264\241\347\214\256\346\214\207\345\215\227/index.html" "b/docs/contribution/\350\264\241\347\214\256\346\214\207\345\215\227/index.html" deleted file mode 100644 index a85338a..0000000 --- "a/docs/contribution/\350\264\241\347\214\256\346\214\207\345\215\227/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - - - - - - diff --git a/content/en/docs/a1.overview.md b/docs/docs/about/overview.en-US.md similarity index 51% rename from content/en/docs/a1.overview.md rename to docs/docs/about/overview.en-US.md index edf9c49..000f89b 100644 --- a/content/en/docs/a1.overview.md +++ b/docs/docs/about/overview.en-US.md @@ -1,29 +1,34 @@ ---- -title: overview -description: Learn more about the team maintaining Docura, how and why the project started, and how to get involved. -url: "docs/en_overview" -aliases: -- "/docs" -- "/docs/en_overview" -- "/docs/overview" ---- - -

    - -

    - - -
    - -[**HuggingFace** ](https://huggingface.co/codefuse-ai)|[ **ModelScope** ](https://modelscope.cn/organization/codefuse-ai) -
    - -Hello World! This is CodeFuse! -
    - -**CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis.** - -

    - -

    -We are passionating about creating innovative open-source solutions that empower developers throughout the software development process as shown above. We also encourage engineers and researchers within this community to join us in co-constructing/improving CodeFuse. \ No newline at end of file +--- +nav: + title: Docs + order: -1 + second: + title: About CodeFuse + order: 0 +group: + title: 📖 CodeFuse-AI Introduce + index: true + order: 0 +title: Overview +toc: content +description: Learn more about the team maintaining Docura, how and why the project started, and how to get involved. +--- + +

    + +

    + + + +Hello World! This is CodeFuse! +
    + +**CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis.** + +

    + +

    +We are passionating about creating innovative open-source solutions that empower developers throughout the software development process as shown above. We also encourage engineers and researchers within this community to join us in co-constructing/improving CodeFuse. diff --git a/docs/docs/about/overview.zh-CN.md b/docs/docs/about/overview.zh-CN.md new file mode 100644 index 0000000..59adc27 --- /dev/null +++ b/docs/docs/about/overview.zh-CN.md @@ -0,0 +1,34 @@ +--- +nav: + title: 文档 + order: -1 + second: + title: 关于 CodeFuse + order: 1 +group: + title: 📖 CodeFuse-AI 整体介绍 + index: true + order: 0 +title: 概览 +toc: content +description: Learn more about the team maintaining Docura, how and why the project started, and how to get involved. +--- + +

    + +

    + + +Hello World! This is CodeFuse! + +**CodeFuse 的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs),涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。** + +

    + +

    + +我们非常有激情去构建创新的解决方案来支持全生命周期 AI 驱动的软件开发,如上图所示。同时,我们也诚邀志同道合的工程师和研究人员加入这个社区,共同构建和增强 CodeFuse。 diff --git a/docs/docs/abstract/index.html b/docs/docs/abstract/index.html deleted file mode 100644 index 3700ca0..0000000 --- a/docs/docs/abstract/index.html +++ /dev/null @@ -1,807 +0,0 @@ - - - - - - - - -Abstract · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Abstract

    -
    -
    - - -

    Abstract

    -

    With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. For example, a security team might need sophisticated algorithms like context-sensitive taint analysis to review smaller codebases, while project managers might need a lighter algorithm, such as one that calculates cyclomatic complexity, to measure developer productivity on larger codebases.

    -

    These diversified needs, coupled with the common computational resource constraints in large organizations, pose a significant challenge. Traditional tools, with their problem-specific computation methods, often fail to scale in such environments. This is why we introduced CodeQuery, a centralized data platform specifically designed for large-scale static analysis.
    -In implementing CodeQuery, we treat source code and analysis results as data, and the execution process as big data processing, a significant departure from traditional tool-centric approaches. We leverage common systems in large organizations, such as data warehouses, data computation facilities like MaxCompute and Hive, OSS object storage, and flexible computing resources like Kubernetes, allowing CodeQuery to integrate seamlessly into these systems. This approach makes CodeQuery highly maintainable and scalable, capable of supporting diverse needs and effectively addressing changing demands. Furthermore, CodeQuery’s open architecture encourages interoperability between various internal systems, facilitating seamless interaction and data exchange. This level of integration and interaction not only increases the degree of automation within the organization but also improves efficiency and reduces the likelihood of manual errors. By breaking down information silos and fostering a more interconnected, automated environment, CodeQuery significantly enhances the overall productivity and efficiency of the software development process.
    -Moreover, CodeQuery’s data-centric approach offers unique advantages when addressing domain-specific challenges in static source code analysis. For instance, source code is typically a highly structured and interconnected dataset, with strong informational and relational ties to other code and configuration files. By treating code as data, CodeQuery can adeptly handle these issues, making it especially suitable for use in large organizations where codebases evolve continuously but incrementally, with most code undergoing minor changes daily while remaining stable. CodeQuery also supports use cases like code-data based Business Intelligence (BI), generating reports and dashboards to aid in monitoring and decision-making processes. Additionally, CodeQuery plays an important role in analyzing training data for large language models (LLMs), providing deep insights to enhance the overall effectiveness of these models.

    -

    In the current field of static analysis, CodeQuery introduces a new paradigm. It not only meets the needs of analyzing large, complex codebases but is also adaptable to the ever-changing and diversified scenarios of static analysis. CodeQuery’s data-centric approach gives it a unique advantage in dealing with code analysis issues in big data environments. Designed to address static analysis problems in large-scale software development settings, it views both source code and analysis results as data, allowing it to integrate flexibly into various systems within large organizations. This approach not only enables efficient handling of large codebases but can also accommodate various complex analysis needs, thereby making static analysis work more effective and accurate.

    -

    The characteristics and advantages of CodeQuery can be summarized as follows:

    -
      -
    • Highly Scalable: CodeQuery can handle large codebases and adapt to different analysis needs. This high level of scalability makes CodeQuery particularly valuable in large organizations.
    • -
    • Data-Centric: By treating source code and analysis results as data, CodeQuery’s data-centric approach gives it a distinct edge in addressing code analysis problems in big data environments.
    • -
    • Highly Integrated: CodeQuery can integrate seamlessly into various systems within large organizations, including data warehouses, data computation facilities, object storage, and flexible computing resources. This high level of integration makes the use of CodeQuery in large organizations more convenient and efficient.
    • -
    • Supports Diverse Needs: CodeQuery can process large codebases and accommodate various complex analysis needs, including QoS analysis, cross-language analysis, algorithmic needs, and performance requirements.
    • -
    -

    CodeQuery is a powerful static code analysis platform, suitable for large-scale, complex codebase analysis scenarios. Its data-centric approach and high scalability give it a unique advantage in the modern software development environment. As static code analysis technology continues to evolve, CodeQuery is expected to play an increasingly important role in this field.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/acknowledgements/index.html b/docs/docs/acknowledgements/index.html deleted file mode 100644 index 8eeeab8..0000000 --- a/docs/docs/acknowledgements/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/acnowledgements/acknowledgements/ - - - - - - diff --git a/docs/docs/acnowledgements/acknowledgements/index.html b/docs/docs/acnowledgements/acknowledgements/index.html deleted file mode 100644 index 6422e86..0000000 --- a/docs/docs/acnowledgements/acknowledgements/index.html +++ /dev/null @@ -1,460 +0,0 @@ - - - - - - - - -Acknowledgements · CodeFuse - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Acknowledgements

    -
    -
    - - -

    CodeFuse-ai 主页基于docura构建,在此深深感谢他们的开源贡献!

    -

    ChatBot 项目基于langchain-chatchatcodebox-api,在此深深感谢他们的开源贡献!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/content/en/muagent/connector/connector_agent.md b/docs/docs/api-docs/MuAgent/connector/connector_agent.en-US.md similarity index 57% rename from content/en/muagent/connector/connector_agent.md rename to docs/docs/api-docs/MuAgent/connector/connector_agent.en-US.md index c0e511b..e695e1b 100644 --- a/content/en/muagent/connector/connector_agent.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_agent.en-US.md @@ -1,150 +1,158 @@ ---- -title: Connector Agent -slug: Connector Agent -url: "muagent/connector-agent" -aliases: -- "/muagent/connector-agent" ---- - - -## Quickly Build an Agent -- First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) - - -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" - -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -- Then Set LLM Configuration and Vector Model Configuration -Configure related LLM and Embedding Model -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### Agent Configuration -- Define two react agents for actual task execution - -``` -# Here, predefined prompts are used, but you can also refer to the above prompts to complete the writing -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT - -# A tool agent based on react is defined -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) -tool_react_agent = ReactAgent( - role=tool_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - - -# A code agent based on react is defined -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) -code_react_agent = ReactAgent( - role=code_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) -``` - -- Define a groupAgent for agent selection -``` -prompt = """#### Agent Profile -Your goal is to respond according to the information in the Context Data with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list. -ATTENTION: respond carefully following the "Response Output Format". -#### Response Output Format -**Thoughts:** think step by step about why you selected one role -**Role:** Select the role from the agent names. -""" - -# A groupAgent is defined -role = Role(role_type="assistant", role_name="qaer", prompt=prompt) -base_agent = SelectorAgent( - role=role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, - group_agents=[tool_react_agent, code_react_agent] -) -``` - -### Start Actual Q&A -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) -question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart" - -query = Message( - user_name="test", role_type="user", role_name="user", input_query=question, - tools=tools, -) -# base_agent.pre_print(query) -output_message = base_agent.step(query) -print(output_message.input_query) -print(output_message.role_content) -``` - - -## Agent Configs -``` -# Configuration structure is in this directory -from muagent.connector.schema import Role -``` - -### Agent Config -|Config Key Name| Type| Description| -| ------------------ | ---------- | ---------- | -|role| Role |Role description| -|focus_agents |List[String] |Logic of MetaGPT, focusing on the messages generated by which agents, optional values are: role_name| -|focus_message_keys |List[String]| Additional logic, focusing on specific key information in the message, optional values are: agent's output_keys| -|chat_turn |int |Valid only for ReactAgent| -|llm_config |LLMConfig |Large language model configuration| -|embed_config |EmbedConfig |Vector model configuration| -|sandbox_server |Dict |Sandbox environment, i.e., notebook startup configuration| -|jupyter_work_path |str |Working directory of the sandbox environment| -|kb_root_path |str |Storage path for memory| -|log_verbose |str |Log printing level of agent prompt & predict| - -### Role - -| Config Key Name | Type | Description | -|------------------|------|--------------------| -| role_type | str | Role type, Enum: system, user, assistant, function, observation, summary | -| role_name | str | Role name | -| role_desc | str | Role description | -| agent_type | str | Agent type | -| role_prompt | str | Role instruction | -| prompt | str | Complete prompt structure | \ No newline at end of file +--- +group: + title: Connector + order: 0 +title: Agent +order: -1 +toc: content +--- + +## Quickly Build an Agent + +- First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" + +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +- Then Set LLM Configuration and Vector Model Configuration + Configure related LLM and Embedding Model + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Agent Configuration + +- Define two react agents for actual task execution + +``` +# Here, predefined prompts are used, but you can also refer to the above prompts to complete the writing +from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT + +# A tool agent based on react is defined +tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) +tool_react_agent = ReactAgent( + role=tool_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + + +# A code agent based on react is defined +code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) +code_react_agent = ReactAgent( + role=code_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) +``` + +- Define a groupAgent for agent selection + +``` +prompt = """#### Agent Profile +Your goal is to respond according to the information in the Context Data with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list. +ATTENTION: respond carefully following the "Response Output Format". +#### Response Output Format +**Thoughts:** think step by step about why you selected one role +**Role:** Select the role from the agent names. +""" + +# A groupAgent is defined +role = Role(role_type="assistant", role_name="qaer", prompt=prompt) +base_agent = SelectorAgent( + role=role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, + group_agents=[tool_react_agent, code_react_agent] +) +``` + +### Start Actual Q&A + +``` +# prepare your tools +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) +question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart" + +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, + tools=tools, +) +# base_agent.pre_print(query) +output_message = base_agent.step(query) +print(output_message.input_query) +print(output_message.role_content) +``` + +## Agent Configs + +``` +# Configuration structure is in this directory +from muagent.connector.schema import Role +``` + +### Agent Config + +| Config Key Name | Type | Description | +| ------------------ | ------------ | --------------------------------------------------------------------------------------------------------------- | +| role | Role | Role description | +| focus_agents | List[String] | Logic of MetaGPT, focusing on the messages generated by which agents, optional values are: role_name | +| focus_message_keys | List[String] | Additional logic, focusing on specific key information in the message, optional values are: agent's output_keys | +| chat_turn | int | Valid only for ReactAgent | +| llm_config | LLMConfig | Large language model configuration | +| embed_config | EmbedConfig | Vector model configuration | +| sandbox_server | Dict | Sandbox environment, i.e., notebook startup configuration | +| jupyter_work_path | str | Working directory of the sandbox environment | +| kb_root_path | str | Storage path for memory | +| log_verbose | str | Log printing level of agent prompt & predict | + +### Role + +| Config Key Name | Type | Description | +| --------------- | ---- | ------------------------------------------------------------------------ | +| role_type | str | Role type, Enum: system, user, assistant, function, observation, summary | +| role_name | str | Role name | +| role_desc | str | Role description | +| agent_type | str | Agent type | +| role_prompt | str | Role instruction | +| prompt | str | Complete prompt structure | diff --git a/content/zh/muagent/connector/connector_agent.md b/docs/docs/api-docs/MuAgent/connector/connector_agent.zh-CN.md similarity index 55% rename from content/zh/muagent/connector/connector_agent.md rename to docs/docs/api-docs/MuAgent/connector/connector_agent.zh-CN.md index 5e93234..8aa2af0 100644 --- a/content/zh/muagent/connector/connector_agent.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_agent.zh-CN.md @@ -1,153 +1,164 @@ ---- -title: Connector Agent -slug: Connector Agent ZH -url: "muagent/connector-agent-zh" -aliases: -- "/muagent/connector-agent-zh" ---- - - -## 快速构建一个Agent -### 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -### 然后设置LLM配置和向量模型配置 - -- 配置相关 LLM 和 Embedding Model -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### Agent 配置 -- 定义两个react agent,进行实际任务执行 -``` -# 这里采用了预定义的prompt,也可以参考上述prompt完成编写 -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT -# 定义了基于react的tool agent -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) -tool_react_agent = ReactAgent( - role=tool_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -# 定义了基于react的code agent -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) -code_react_agent = ReactAgent( - role=code_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -``` - -- 定义groupAgent,用于agent选择 -``` -prompt = """#### Agent Profile - -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. - -ATTENTION: response carefully referenced "Response Output Format" in format. - -#### Response Output Format - -**Thoughts:** think the reason step by step about why you selecte one role - -**Role:** Select the role from agent names. -""" - -# 定义了一个groupAgent -role = Role(role_type="assistant", role_name="qaer", prompt=prompt) -base_agent = SelectorAgent( - role=role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, - group_agents=[tool_react_agent, code_react_agent] -) -``` - -### 开始实际问答 -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - user_name="test", role_type="user", role_name="user", input_query=question, - tools=tools, -) -# base_agent.pre_print(query) -output_message = base_agent.step(query) -print(output_message.input_query) -print(output_message.role_content) -``` - -## Agent 参数配置 -``` -# 配置结构在这个目录 -from muagent.connector.schema import Role -``` - - -### Agent Config -|Config Key Name| Type| Description| -| ------------------ | ---------- | ---------- | -|role| Role |角色描述| -|focus_agents |List[String] |metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name -|focus_message_keys |List[String]| 额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys| -|chat_turn |int |只针对ReactAgent有效| -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| - -### Role - -| Config Key Name | Type | Description | -|------------------|------|--------------------| -| role_type | str | 角色类型, Enum: system、user、assistant、function、observation、summary | -| role_name | str | 角色名称 | -| role_desc | str | 角色描述 | -| agent_type | str | 代理类型 | -| role_prompt | str | 角色instruction | -| prompt | str | 完整prompt结构 | +--- +group: + title: Connector + order: 0 +title: Agent +order: -1 +toc: content +--- + +## 快速构建一个 Agent + +### 首先增加 openai 配置,也可以是其它类似于 openai 接口的模型(通过 fastchat 启动) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### 然后设置 LLM 配置和向量模型配置 + +- 配置相关 LLM 和 Embedding Model + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Agent 配置 + +- 定义两个 react agent,进行实际任务执行 + +``` +# 这里采用了预定义的prompt,也可以参考上述prompt完成编写 +from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT +# 定义了基于react的tool agent +tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) +tool_react_agent = ReactAgent( + role=tool_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +# 定义了基于react的code agent +code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) +code_react_agent = ReactAgent( + role=code_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +``` + +- 定义 groupAgent,用于 agent 选择 + +``` +prompt = """#### Agent Profile + +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. + +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. + +ATTENTION: response carefully referenced "Response Output Format" in format. + +#### Response Output Format + +**Thoughts:** think the reason step by step about why you selecte one role + +**Role:** Select the role from agent names. +""" + +# 定义了一个groupAgent +role = Role(role_type="assistant", role_name="qaer", prompt=prompt) +base_agent = SelectorAgent( + role=role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, + group_agents=[tool_react_agent, code_react_agent] +) +``` + +### 开始实际问答 + +``` +# prepare your tools +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, + tools=tools, +) +# base_agent.pre_print(query) +output_message = base_agent.step(query) +print(output_message.input_query) +print(output_message.role_content) +``` + +## Agent 参数配置 + +``` +# 配置结构在这个目录 +from muagent.connector.schema import Role +``` + +### Agent Config + +| Config Key Name | Type | Description | +| ------------------ | ------------ | ---------------------------------------------------------------------------------- | +| role | Role | 角色描述 | +| focus_agents | List[String] | metagpt 的逻辑,关注哪些 agent 生成的 message,可选值范围为:role_name | +| focus_message_keys | List[String] | 额外增加的逻辑,关注 message 里面具体的 key 信息可选值范围为:agent 的 output_keys | +| chat_turn | int | 只针对 ReactAgent 有效 | +| llm_config | LLMConfig | 大语言模型配置 | +| embed_config | EmbedConfig | 向量模型配置 | +| sandbox_server | Dict | 沙盒环境即 notebook 启动配置 | +| jupyter_work_path | str | 沙盒环境的工作目录 | +| kb_root_path | str | memory 的存储路径 | +| log_verbose | str | agent prompt&predict 的日志打印级别 | + +### Role + +| Config Key Name | Type | Description | +| --------------- | ---- | ----------------------------------------------------------------------- | +| role_type | str | 角色类型, Enum: system、user、assistant、function、observation、summary | +| role_name | str | 角色名称 | +| role_desc | str | 角色描述 | +| agent_type | str | 代理类型 | +| role_prompt | str | 角色 instruction | +| prompt | str | 完整 prompt 结构 | diff --git a/content/en/muagent/connector/connector_chain.md b/docs/docs/api-docs/MuAgent/connector/connector_chain.en-US.md similarity index 79% rename from content/en/muagent/connector/connector_chain.md rename to docs/docs/api-docs/MuAgent/connector/connector_chain.en-US.md index be0ab8e..3259243 100644 --- a/content/en/muagent/connector/connector_chain.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_chain.en-US.md @@ -1,137 +1,146 @@ ---- -title: Connector Chain -slug: Connector Chain -url: "muagent/connector-chain" -aliases: -- "/muagent/connector-chain" ---- - - -## Quickly Build an Agent -### First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) - - -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -### Then Set LLM Configuration and Vector Model Configuration -Configure related LLM and Embedding Model -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### Agent Configuration -- Define two react agents for actual task execution - -``` -# Here, predefined prompts are used, but you can also refer to the above prompts to complete the writing -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT - -# A tool agent based on react is defined -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) -tool_react_agent = ReactAgent( - role=tool_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -# A code agent based on react is defined -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) -code_react_agent = ReactAgent( - role=code_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) -``` - -- Define a groupAgent for agent selection -``` -prompt = """#### Agent Profile -Your goal is to respond according to the information in the Context Data with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list. -ATTENTION: respond carefully following the "Response Output Format". -#### Response Output Format -**Thoughts:** think step by step about why you selected one role -**Role:** Select the role from the agent names. -""" -# A groupAgent is defined -role = Role(role_type="assistant", role_name="qaer", prompt=prompt) -base_agent = SelectorAgent( - role=role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, - group_agents=[tool_react_agent, code_react_agent] -) -``` - -### Chain Config -``` -chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) -base_chain = BaseChain( - chainConfig=chain_config, agents=[base_agent], - llm_config=llm_config, embed_config=embed_config, -) -``` - -### Start Actual Q&A -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) -question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart" -query = Message( - user_name="test", role_type="user", role_name="user", input_query=question, - tools=tools, -) - -# base_chain.pre_print(query) -output_message, output_memory = base_chain.step(query) -print(output_message.input_query) -print(output_message.role_content) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -## Chain Parameter Configuration -|Config Key Name| Type |Description| -| ------------------ | ---------- | ---------- | -|agents| List[BaseAgent] | -|llm_config |LLMConfig |Large Language Model Configuration| -|embed_config |EmbedConfig |Vector Model Configuration| -|sandbox_server |Dict |Sandbox environment or notebook startup configuration| -|jupyter_work_path |str |Working directory for the sandbox environment| -|kb_root_path |str |Storage path for memory| -|log_verbose |str |Log printing level for agent prompts & predictions| \ No newline at end of file +--- +group: + title: Connector + order: 0 +title: Chain +order: 0 +toc: content +--- + +## Quickly Build an Agent + +### First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### Then Set LLM Configuration and Vector Model Configuration + +Configure related LLM and Embedding Model + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Agent Configuration + +- Define two react agents for actual task execution + +``` +# Here, predefined prompts are used, but you can also refer to the above prompts to complete the writing +from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT + +# A tool agent based on react is defined +tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) +tool_react_agent = ReactAgent( + role=tool_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +# A code agent based on react is defined +code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) +code_react_agent = ReactAgent( + role=code_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) +``` + +- Define a groupAgent for agent selection + +``` +prompt = """#### Agent Profile +Your goal is to respond according to the information in the Context Data with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list. +ATTENTION: respond carefully following the "Response Output Format". +#### Response Output Format +**Thoughts:** think step by step about why you selected one role +**Role:** Select the role from the agent names. +""" +# A groupAgent is defined +role = Role(role_type="assistant", role_name="qaer", prompt=prompt) +base_agent = SelectorAgent( + role=role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, + group_agents=[tool_react_agent, code_react_agent] +) +``` + +### Chain Config + +``` +chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) +base_chain = BaseChain( + chainConfig=chain_config, agents=[base_agent], + llm_config=llm_config, embed_config=embed_config, +) +``` + +### Start Actual Q&A + +``` +# prepare your tools +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) +question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart" +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, + tools=tools, +) + +# base_chain.pre_print(query) +output_message, output_memory = base_chain.step(query) +print(output_message.input_query) +print(output_message.role_content) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +## Chain Parameter Configuration + +| Config Key Name | Type | Description | +| ----------------- | --------------- | ----------------------------------------------------- | +| agents | List[BaseAgent] | +| llm_config | LLMConfig | Large Language Model Configuration | +| embed_config | EmbedConfig | Vector Model Configuration | +| sandbox_server | Dict | Sandbox environment or notebook startup configuration | +| jupyter_work_path | str | Working directory for the sandbox environment | +| kb_root_path | str | Storage path for memory | +| log_verbose | str | Log printing level for agent prompts & predictions | diff --git a/content/zh/muagent/connector/connector_chain.md b/docs/docs/api-docs/MuAgent/connector/connector_chain.zh-CN.md similarity index 75% rename from content/zh/muagent/connector/connector_chain.md rename to docs/docs/api-docs/MuAgent/connector/connector_chain.zh-CN.md index cca5ee0..ca51de1 100644 --- a/content/zh/muagent/connector/connector_chain.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_chain.zh-CN.md @@ -1,145 +1,158 @@ ---- -title: Connector Chain -slug: Connector Chain ZH -url: "muagent/connector-chain-zh" -aliases: -- "/muagent/connector-chain-zh" ---- - - -## 快速构建一个Agent Chain -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -### 然后设置LLM配置和向量模型配置 -- 配置相关 LLM 和 Embedding Model -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### Agent 配置 -- 定义两个react agent,进行实际任务执行 -``` -# 这里采用了预定义的prompt,也可以参考上述prompt完成编写 -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT -# 定义了基于react的tool agent -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) -tool_react_agent = ReactAgent( - role=tool_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -# 定义了基于react的code agent -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) -code_react_agent = ReactAgent( - role=code_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -``` - -- 定义groupAgent,用于agent选择 -``` -prompt = """#### Agent Profile - -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. - -ATTENTION: response carefully referenced "Response Output Format" in format. - -#### Response Output Format - -**Thoughts:** think the reason step by step about why you selecte one role - -**Role:** Select the role from agent names. -""" - -# 定义了一个groupAgent -role = Role(role_type="assistant", role_name="qaer", prompt=prompt) -base_agent = SelectorAgent( - role=role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, - group_agents=[tool_react_agent, code_react_agent] -) -``` -### Chain 配置 -``` -chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) -base_chain = BaseChain( - chainConfig=chain_config, agents=[base_agent], - llm_config=llm_config, embed_config=embed_config, -) - -``` - - -### 开始实际问答 -- 开始执行 -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - user_name="test", role_type="user", role_name="user", input_query=question, - tools=tools, -) - -# base_chain.pre_print(query) -output_message, output_memory = base_chain.step(query) -print(output_message.input_query) -print(output_message.role_content) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -## Chain 参数配置 -|Config Key Name| Type |Description| -| ------------------ | ---------- | ---------- | -|agents| List[BaseAgent] | -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| +--- +group: + title: Connector + order: 0 +title: Chain +order: 0 +toc: content +--- + +## 快速构建一个 Agent Chain + +- 首先增加 openai 配置,也可以是其它类似于 openai 接口的模型(通过 fastchat 启动) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### 然后设置 LLM 配置和向量模型配置 + +- 配置相关 LLM 和 Embedding Model + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Agent 配置 + +- 定义两个 react agent,进行实际任务执行 + +``` +# 这里采用了预定义的prompt,也可以参考上述prompt完成编写 +from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT +# 定义了基于react的tool agent +tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) +tool_react_agent = ReactAgent( + role=tool_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +# 定义了基于react的code agent +code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) +code_react_agent = ReactAgent( + role=code_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +``` + +- 定义 groupAgent,用于 agent 选择 + +``` +prompt = """#### Agent Profile + +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. + +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. + +ATTENTION: response carefully referenced "Response Output Format" in format. + +#### Response Output Format + +**Thoughts:** think the reason step by step about why you selecte one role + +**Role:** Select the role from agent names. +""" + +# 定义了一个groupAgent +role = Role(role_type="assistant", role_name="qaer", prompt=prompt) +base_agent = SelectorAgent( + role=role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, + group_agents=[tool_react_agent, code_react_agent] +) +``` + +### Chain 配置 + +``` +chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) +base_chain = BaseChain( + chainConfig=chain_config, agents=[base_agent], + llm_config=llm_config, embed_config=embed_config, +) + +``` + +### 开始实际问答 + +- 开始执行 + +``` +# prepare your tools +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, + tools=tools, +) + +# base_chain.pre_print(query) +output_message, output_memory = base_chain.step(query) +print(output_message.input_query) +print(output_message.role_content) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +## Chain 参数配置 + +| Config Key Name | Type | Description | +| ----------------- | --------------- | ----------------------------------- | +| agents | List[BaseAgent] | +| llm_config | LLMConfig | 大语言模型配置 | +| embed_config | EmbedConfig | 向量模型配置 | +| sandbox_server | Dict | 沙盒环境即 notebook 启动配置 | +| jupyter_work_path | str | 沙盒环境的工作目录 | +| kb_root_path | str | memory 的存储路径 | +| log_verbose | str | agent prompt&predict 的日志打印级别 | diff --git a/docs/docs/api-docs/MuAgent/connector/connector_localmemory.en-US.md b/docs/docs/api-docs/MuAgent/connector/connector_localmemory.en-US.md new file mode 100644 index 0000000..806d6d7 --- /dev/null +++ b/docs/docs/api-docs/MuAgent/connector/connector_localmemory.en-US.md @@ -0,0 +1,102 @@ +--- +group: + title: Connector + order: 0 +subGroup: + title: Memory +title: Tbase Memory Builder +order: 3 +toc: content +--- + +## Usage Example + +### Create memory manager instance + +``` +import os +import openai +from coagent.base_configs.env_config import KB_ROOT_PATH +from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager +from coagent.llm_models.llm_config import EmbedConfig, LLMConfig +from coagent.connector.schema import Message + + +os.environ["API_BASE_URL"] = OPENAI_API_BASE +os.environ["OPENAI_API_KEY"] = "sk-xx" +openai.api_key = "sk-xxx" +# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" + + +# LLM and Embedding Model configurations +llm_config = LLMConfig( + model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"], + api_base_url=os.environ["API_BASE_URL"], temperature=0.3 +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=os.environ["embed_model"], + embed_model_path=os.environ["embed_model_path"] +) +``` + +### support memory manage + +``` +# prepare your message +message1 = Message( + chat_index="default", role_name="test1", role_type="user", role_content="hello", + parsed_output_list=[{"input": "hello"}], user_name="default" +) + +text = "hi! how can I help you?" +message2 = Message( + chat_index="shuimo", role_name="test2", role_type="assistant", role_content=text, parsed_output_list=[{"answer": text}], + user_name="shuimo" +) + +text = "they say hello and hi to each other" +message3 = Message( + chat_index="shanshi", role_name="test3", role_type="summary", role_content=text, + parsed_output_list=[{"summary": text}], + user_name="shanshi" + ) + +# append or extend test +local_memory_manager = LocalMemoryManager(embed_config=embed_config, llm_config=llm_config, do_init=True) +# append can ignore user_name +local_memory_manager.append(message=message1) +local_memory_manager.append(message=message2) +local_memory_manager.append(message=message3) +``` + +### Reload memory + +``` +local_memory_manager = LocalMemoryManager(embed_config=embed_config, llm_config=llm_config, do_init=False) +local_memory_manager.load() +print(local_memory_manager.get_memory_pool("default").messages) +print(local_memory_manager.get_memory_pool("shuimo").messages) +print(local_memory_manager.get_memory_pool("shanshi").messages) +``` + +### Support for memory retrieval + +``` +# embedding retrieval test +text = "say hi to each other, i want some help" +# retrieval_type=datetime => retrieval from datetime and jieba +print(local_memory_manager.router_retrieval(chat_index="shanshi", text=text, datetime="2024-03-12 17:48:00", n=4, top_k=5, retrieval_type= "datetime")) +# retrieval_type=eembedding => retrieval from embedding +print(local_memory_manager.router_retrieval(chat_index="shanshi", text=text, top_k=5, retrieval_type= "embedding")) +# retrieval_type=text => retrieval from jieba +print(local_memory_manager.router_retrieval(chat_index="shanshi", text=text, top_k=5, retrieval_type= "text")) +``` + +### Support for memory summarization + +``` +# recursive_summary test +print(local_memory_manager.recursive_summary(local_memory_manager.get_memory_pool("shanshi").messages, split_n=1, chat_index="shanshi")) +``` diff --git a/content/zh/muagent/connector/connector_memory.md b/docs/docs/api-docs/MuAgent/connector/connector_localmemory.zh-CN.md similarity index 55% rename from content/zh/muagent/connector/connector_memory.md rename to docs/docs/api-docs/MuAgent/connector/connector_localmemory.zh-CN.md index 29b27fd..00798b0 100644 --- a/content/zh/muagent/connector/connector_memory.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_localmemory.zh-CN.md @@ -1,116 +1,112 @@ ---- -title: Connector Memory -slug: Connector Memory ZH -url: "muagent/connector-memory-zh" -aliases: -- "/muagent/connector-memory-zh" ---- - - -## Memory Manager -- 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -- 对 chat history 进行关键信息总结 summary context,作为 prompt context -- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - -## 使用示例 -完整示例见 ~/tests/connector/memory_manager_test.py -### 创建 memory manager 实例 -``` -import os -import openai - -from muagent.base_configs.env_config import KB_ROOT_PATH -from muagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.connector.schema import Message - -# -OPENAI_API_BASE = "https://api.openai.com/v1" -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xxx" -openai.api_key = "sk-xxx" -os.environ["model_name"] = "gpt-3.5-turbo" - -# -os.environ["embed_model"] = "{{embed_model_name}}" -os.environ["embed_model_path"] = "{{embed_model_path}}" - -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" - - -# LLM 和 Embedding Model 配置 -llm_config = LLMConfig( - model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=os.environ["embed_model"], - embed_model_path=os.environ["embed_model_path"] -) -# -phase_name = "test" -memory_manager = LocalMemoryManager( - unique_name=phase_name, - do_init=True, - kb_root_path = KB_ROOT_PATH, - embed_config=embed_config, - llm_config=llm_config - ) -``` - -### 支持Message管理 - -``` -message1 = Message( - role_name="test1", role_type="user", role_content="hello", - parsed_output_list=[{"input": "hello"}], user_name="default" -) - -text = "hi! how can I help you?" -message2 = Message( - role_name="test2", role_type="assistant", role_content=text, parsed_output_list=[{"answer": text}], - user_name="shuimo" -) - -text = "they say hello and hi to each other" -message3 = Message( - role_name="test3", role_type="summary", role_content=text, - parsed_output_list=[{"summary": text}], - user_name="shanshi" - ) - -local_memory_manager.append(message=message1) -local_memory_manager.append(message=message2) -local_memory_manager.append(message=message3) -``` - -### 重新加载 -``` -local_memory_manager = LocalMemoryManager(user_name="shanshi", embed_config=embed_config, llm_config=llm_config, do_init=False) -local_memory_manager.load() -print(local_memory_manager.get_memory_pool("default").messages) -print(local_memory_manager.get_memory_pool("shanshi").messages) -print(local_memory_manager.get_memory_pool("shuimo").messages) -``` - -### 支持 memory 检索 -``` -# embedding retrieval test -text = "say hi to each other, i want some help" -# retrieval_type=datetime => retrieval from datetime and jieba -print(local_memory_manager.router_retrieval(user_name="shanshi", text=text, datetime="2024-03-12 17:48:00", n=4, top_k=5, retrieval_type= "datetime")) -# retrieval_type=eembedding => retrieval from embedding -print(local_memory_manager.router_retrieval(user_name="shanshi", text=text, top_k=5, retrieval_type= "embedding")) -# retrieval_type=text => retrieval from jieba -print(local_memory_manager.router_retrieval(user_name="shanshi", text=text, top_k=5, retrieval_type= "text")) - -``` -### 支持 memory 总结 -``` -# recursive_summary test -print(local_memory_manager.recursive_summary(local_memory_manager.get_memory_pool("shanshi").messages, split_n=1)) -``` \ No newline at end of file +--- +group: + title: Connector + order: 0 +subGroup: + title: Memory +title: Local Memory Builder +order: 3 +toc: content +--- + +## 使用示例 + +### 创建 memory manager 实例 + +``` +import os +import openai + +from muagent.base_configs.env_config import KB_ROOT_PATH +from muagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.connector.schema import Message + +# +OPENAI_API_BASE = "https://api.openai.com/v1" +os.environ["API_BASE_URL"] = OPENAI_API_BASE +os.environ["OPENAI_API_KEY"] = "sk-xxx" +openai.api_key = "sk-xxx" +os.environ["model_name"] = "gpt-3.5-turbo" + +# +os.environ["embed_model"] = "{{embed_model_name}}" +os.environ["embed_model_path"] = "{{embed_model_path}}" + +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" + + +# LLM 和 Embedding Model 配置 +llm_config = LLMConfig( + model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"], + api_base_url=os.environ["API_BASE_URL"], temperature=0.3 +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=os.environ["embed_model"], + embed_model_path=os.environ["embed_model_path"] +) +``` + +### 支持 Message 管理 + +``` + +# prepare your message +message1 = Message( + chat_index="default", role_name="test1", role_type="user", role_content="hello", + parsed_output_list=[{"input": "hello"}], user_name="default" +) + +text = "hi! how can I help you?" +message2 = Message( + chat_index="shuimo", role_name="test2", role_type="assistant", role_content=text, parsed_output_list=[{"answer": text}], + user_name="shuimo" +) + +text = "they say hello and hi to each other" +message3 = Message( + chat_index="shanshi", role_name="test3", role_type="summary", role_content=text, + parsed_output_list=[{"summary": text}], + user_name="shanshi" + ) + +# append or extend test +local_memory_manager = LocalMemoryManager(embed_config=embed_config, llm_config=llm_config, do_init=True) +# append can ignore user_name +local_memory_manager.append(message=message1) +local_memory_manager.append(message=message2) +local_memory_manager.append(message=message3) +``` + +### 重新加载 + +``` +local_memory_manager = LocalMemoryManager(embed_config=embed_config, llm_config=llm_config, do_init=False) +local_memory_manager.load() +print(local_memory_manager.get_memory_pool("default").messages) +print(local_memory_manager.get_memory_pool("shuimo").messages) +print(local_memory_manager.get_memory_pool("shanshi").messages) +``` + +### 支持 memory 检索 + +``` +# embedding retrieval test +text = "say hi to each other, i want some help" +# retrieval_type=datetime => retrieval from datetime and jieba +print(local_memory_manager.router_retrieval(chat_index="shanshi", text=text, datetime="2024-03-12 17:48:00", n=4, top_k=5, retrieval_type= "datetime")) +# retrieval_type=eembedding => retrieval from embedding +print(local_memory_manager.router_retrieval(chat_index="shanshi", text=text, top_k=5, retrieval_type= "embedding")) +# retrieval_type=text => retrieval from jieba +print(local_memory_manager.router_retrieval(chat_index="shanshi", text=text, top_k=5, retrieval_type= "text")) + +``` + +### 支持 memory 总结 + +``` +# recursive_summary test +print(local_memory_manager.recursive_summary(local_memory_manager.get_memory_pool("shanshi").messages, split_n=1, chat_index="shanshi")) +``` diff --git a/docs/docs/api-docs/MuAgent/connector/connector_memory.en-US.md b/docs/docs/api-docs/MuAgent/connector/connector_memory.en-US.md new file mode 100644 index 0000000..4333c6d --- /dev/null +++ b/docs/docs/api-docs/MuAgent/connector/connector_memory.en-US.md @@ -0,0 +1,20 @@ +--- +group: + title: Connector + order: 0 +title: Memory +order: 3 +toc: content +--- + +## Memory Manager + +Primarily used for managing chat history, not yet completed + +- Read and write chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval. +- Summarize key information from the chat history into a summary context, serving as a prompt context. +- Provide a search function to retrieve information related to the question from chat history or summary context, aiding in Q&A. + +## Usage Example + +Examples see ~/tests/connector/memory_manager_test.py diff --git a/docs/docs/api-docs/MuAgent/connector/connector_memory.zh-CN.md b/docs/docs/api-docs/MuAgent/connector/connector_memory.zh-CN.md new file mode 100644 index 0000000..6c9a6f3 --- /dev/null +++ b/docs/docs/api-docs/MuAgent/connector/connector_memory.zh-CN.md @@ -0,0 +1,18 @@ +--- +group: + title: Connector + order: 0 +title: Memory +order: 3 +toc: content +--- + +## Memory Manager + +- 将 chat history 在数据库进行读写管理,包括 user input、 llm output、doc retrieval、code retrieval、search retrieval +- 对 chat history 进行关键信息总结 summary context,作为 prompt context +- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 + +## 使用示例 + +完整示例见 ~/tests/connector/memory_manager_test.py diff --git a/content/en/muagent/connector/connector_phase.md b/docs/docs/api-docs/MuAgent/connector/connector_phase.en-US.md similarity index 77% rename from content/en/muagent/connector/connector_phase.md rename to docs/docs/api-docs/MuAgent/connector/connector_phase.en-US.md index 8530d9d..83aaeb6 100644 --- a/content/en/muagent/connector/connector_phase.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_phase.en-US.md @@ -1,132 +1,155 @@ ---- -title: Connector Phase -slug: Connector Phase -url: "muagent/connector-phase" -aliases: -- "/muagent/connector-phase" ---- - -## Quickly Build an Agent Phase -- First, add OpenAI configuration, which can be models with similar interfaces to OpenAI (triggered via fastchat). -``` -import os, sys -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` -### Then Set LLM Configuration and Vector Model Configuration -- Configure related LLM and Embedding Model. -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` -### Agent Configuration -- Define two react agents for actual task execution. -``` -# Predefined prompts are used here; you can also refer to the above-mentioned prompts to write your own. -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT -# Defined a tool agent based on react -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) -tool_react_agent = ReactAgent( - role=tool_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) -# Defined a code agent based on react -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) -code_react_agent = ReactAgent( - role=code_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) -``` -- Define a GroupAgent for agent selection. -``` -prompt = """#### Agent Profile -Your goal is to respond according to the information provided by the Context Data's with the role that will best facilitate a solution, taking into account all relevant context data (Context). -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list. -ATTENTION: respond carefully, referenced to the "Response Output Format" standard. -#### Response Output Format -**Thoughts:** think the reason step by step about why you select one role -**Role:** Select the role from the agent names. -""" -# Defined a GroupAgent -role = Role(role_type="assistant", role_name="qaer", prompt=prompt) -base_agent = SelectorAgent( - role=role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, - group_agents=[tool_react_agent, code_react_agent] -) -``` -### Chain Configuration -``` -chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) -base_chain = BaseChain( - chainConfig=chain_config, agents=[base_agent], - llm_config=llm_config, embed_config=embed_config, -) -``` -### Phase Configuration -``` -base_phase = BasePhase( - phase_name="group_phase", chains=[base_chain], - embed_config=embed_config, llm_config=llm_config -) -``` -### Start Real Q&A -- Start execution. -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) -question = "Confirm whether employee_data.csv exists locally, and review its columns and data types; then plot a bar chart." -query = Message( - user_name="test", role_type="user", role_name="user", input_query=question, - tools=tools, -) - - -# base_phase.pre_print(query) -output_message, output_memory = base_phase.step(query) -print(output_message.input_query) -print(output_message.role_content) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -## Phase Parameter Configuration -| Config Key Name | Type | Description | -| ------------------ | ---------- | ---------- | -| phase_name | String | Scenario name | -| chains | List[Chain] | List of chains to be executed in order | -| llm_config | LLMConfig | Large Language Model configuration | -| embed_config | EmbedConfig | Vector model configuration | -| sandbox_server | Dict | Sandbox environment, i.e., notebook startup configuration | -| jupyter_work_path | str | Working directory in the sandbox environment | -| kb_root_path | str | Storage path for memory | -| log_verbose | str | Log print level for agent prompts & predictions | +--- +group: + title: Connector + order: 0 +title: Phase +order: 1 +toc: content +--- + +## Quickly Build an Agent Phase + +- First, add OpenAI configuration, which can be models with similar interfaces to OpenAI (triggered via fastchat). + +``` +import os, sys +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### Then Set LLM Configuration and Vector Model Configuration + +- Configure related LLM and Embedding Model. + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.phase import BasePhase +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Agent Configuration + +- Define two react agents for actual task execution. + +``` +# Predefined prompts are used here; you can also refer to the above-mentioned prompts to write your own. +from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT +# Defined a tool agent based on react +tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) +tool_react_agent = ReactAgent( + role=tool_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) +# Defined a code agent based on react +code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) +code_react_agent = ReactAgent( + role=code_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) +``` + +- Define a GroupAgent for agent selection. + +``` +prompt = """#### Agent Profile +Your goal is to respond according to the information provided by the Context Data's with the role that will best facilitate a solution, taking into account all relevant context data (Context). +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list. +ATTENTION: respond carefully, referenced to the "Response Output Format" standard. +#### Response Output Format +**Thoughts:** think the reason step by step about why you select one role +**Role:** Select the role from the agent names. +""" +# Defined a GroupAgent +role = Role(role_type="assistant", role_name="qaer", prompt=prompt) +base_agent = SelectorAgent( + role=role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, + group_agents=[tool_react_agent, code_react_agent] +) +``` + +### Chain Configuration + +``` +chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) +base_chain = BaseChain( + chainConfig=chain_config, agents=[base_agent], + llm_config=llm_config, embed_config=embed_config, +) +``` + +### Phase Configuration + +``` +base_phase = BasePhase( + phase_name="group_phase", chains=[base_chain], + embed_config=embed_config, llm_config=llm_config +) +``` + +### Start Real Q&A + +- Start execution. + +``` +# prepare your tools +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) +question = "Confirm whether employee_data.csv exists locally, and review its columns and data types; then plot a bar chart." +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, + tools=tools, +) + + +# base_phase.pre_print(query) +output_message, output_memory = base_phase.step(query) +print(output_message.input_query) +print(output_message.role_content) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +## Phase Parameter Configuration + +| Config Key Name | Type | Description | +| ----------------- | ----------- | --------------------------------------------------------- | +| phase_name | String | Scenario name | +| chains | List[Chain] | List of chains to be executed in order | +| llm_config | LLMConfig | Large Language Model configuration | +| embed_config | EmbedConfig | Vector model configuration | +| sandbox_server | Dict | Sandbox environment, i.e., notebook startup configuration | +| jupyter_work_path | str | Working directory in the sandbox environment | +| kb_root_path | str | Storage path for memory | +| log_verbose | str | Log print level for agent prompts & predictions | diff --git a/content/zh/muagent/connector/connector_phase.md b/docs/docs/api-docs/MuAgent/connector/connector_phase.zh-CN.md similarity index 74% rename from content/zh/muagent/connector/connector_phase.md rename to docs/docs/api-docs/MuAgent/connector/connector_phase.zh-CN.md index 636e07b..cc49068 100644 --- a/content/zh/muagent/connector/connector_phase.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_phase.zh-CN.md @@ -1,155 +1,169 @@ ---- -title: Connector Phase -slug: Connector Phase ZH -url: "muagent/connector-phase-zh" -aliases: -- "/muagent/connector-phase-zh" ---- - - - -## 快速构建一个Agent Phase -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -### 然后设置LLM配置和向量模型配置 - -- 配置相关 LLM 和 Embedding Model -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### Agent 配置 -- 定义两个react agent,进行实际任务执行 -``` -# 这里采用了预定义的prompt,也可以参考上述prompt完成编写 -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT -# 定义了基于react的tool agent -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) -tool_react_agent = ReactAgent( - role=tool_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -# 定义了基于react的code agent -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) -code_react_agent = ReactAgent( - role=code_role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, -) - -``` - -- 定义groupAgent,用于agent选择 -``` -prompt = """#### Agent Profile - -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. - -ATTENTION: response carefully referenced "Response Output Format" in format. - -#### Response Output Format - -**Thoughts:** think the reason step by step about why you selecte one role - -**Role:** Select the role from agent names. -""" - -# 定义了一个groupAgent -role = Role(role_type="assistant", role_name="qaer", prompt=prompt) -base_agent = SelectorAgent( - role=role, - task="", - chat_turn=3, - focus_agents=[], - focus_message_keys=[], - llm_config=llm_config, embed_config=embed_config, - group_agents=[tool_react_agent, code_react_agent] -) -``` -### Chain 配置 -``` -chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) -base_chain = BaseChain( - chainConfig=chain_config, agents=[base_agent], - llm_config=llm_config, embed_config=embed_config, -) - -``` -### Phase 配置 -``` -base_phase = BasePhase( - phase_name="group_phase", chains=[base_chain], - embed_config=embed_config, llm_config=llm_config -) -``` - - -### 开始实际问答 -- 开始执行 -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - user_name="test", role_type="user", role_name="user", input_query=question, - tools=tools, -) - -# base_phase.pre_print(query) -output_message, output_memory = base_phase.step(query) -print(output_message.input_query) -print(output_message.role_content) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -## Phase 参数配置 -|Config Key Name |Type |Description| -| ------------------ | ---------- | ---------- | -|phase_name| String| 场景名称| -|chains| List[Chain] | chain列表,按顺序执行 | -|llm_config |LLMConfig |大语言模型配置| -|embed_config |EmbedConfig |向量模型配置| -|sandbox_server |Dict |沙盒环境即notebook启动配置| -|jupyter_work_path |str |沙盒环境的工作目录| -|kb_root_path |str |memory的存储路径| -|log_verbose |str |agent prompt&predict的日志打印级别| \ No newline at end of file +--- +group: + title: Connector + order: 0 +title: Phase +order: 1 +toc: content +--- + +## 快速构建一个 Agent Phase + +- 首先增加 openai 配置,也可以是其它类似于 openai 接口的模型(通过 fastchat 启动) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### 然后设置 LLM 配置和向量模型配置 + +- 配置相关 LLM 和 Embedding Model + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.phase import BasePhase +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Agent 配置 + +- 定义两个 react agent,进行实际任务执行 + +``` +# 这里采用了预定义的prompt,也可以参考上述prompt完成编写 +from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT +# 定义了基于react的tool agent +tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT) +tool_react_agent = ReactAgent( + role=tool_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +# 定义了基于react的code agent +code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT) +code_react_agent = ReactAgent( + role=code_role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, +) + +``` + +- 定义 groupAgent,用于 agent 选择 + +``` +prompt = """#### Agent Profile + +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. + +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. + +ATTENTION: response carefully referenced "Response Output Format" in format. + +#### Response Output Format + +**Thoughts:** think the reason step by step about why you selecte one role + +**Role:** Select the role from agent names. +""" + +# 定义了一个groupAgent +role = Role(role_type="assistant", role_name="qaer", prompt=prompt) +base_agent = SelectorAgent( + role=role, + task="", + chat_turn=3, + focus_agents=[], + focus_message_keys=[], + llm_config=llm_config, embed_config=embed_config, + group_agents=[tool_react_agent, code_react_agent] +) +``` + +### Chain 配置 + +``` +chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1) +base_chain = BaseChain( + chainConfig=chain_config, agents=[base_agent], + llm_config=llm_config, embed_config=embed_config, +) + +``` + +### Phase 配置 + +``` +base_phase = BasePhase( + phase_name="group_phase", chains=[base_chain], + embed_config=embed_config, llm_config=llm_config +) +``` + +### 开始实际问答 + +- 开始执行 + +``` +# prepare your tools +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, + tools=tools, +) + +# base_phase.pre_print(query) +output_message, output_memory = base_phase.step(query) +print(output_message.input_query) +print(output_message.role_content) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +## Phase 参数配置 + +| Config Key Name | Type | Description | +| ----------------- | ----------- | ----------------------------------- | +| phase_name | String | 场景名称 | +| chains | List[Chain] | chain 列表,按顺序执行 | +| llm_config | LLMConfig | 大语言模型配置 | +| embed_config | EmbedConfig | 向量模型配置 | +| sandbox_server | Dict | 沙盒环境即 notebook 启动配置 | +| jupyter_work_path | str | 沙盒环境的工作目录 | +| kb_root_path | str | memory 的存储路径 | +| log_verbose | str | agent prompt&predict 的日志打印级别 | diff --git a/content/en/muagent/connector/connector_prompt.md b/docs/docs/api-docs/MuAgent/connector/connector_prompt.en-US.md similarity index 94% rename from content/en/muagent/connector/connector_prompt.md rename to docs/docs/api-docs/MuAgent/connector/connector_prompt.en-US.md index 6db0b58..ef101e2 100644 --- a/content/en/muagent/connector/connector_prompt.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_prompt.en-US.md @@ -1,222 +1,232 @@ ---- -title: Connector Prompt -slug: Connector Prompt -url: "muagent/connector-prompt" -aliases: -- "/muagent/connector-prompt" ---- - -## Prompt Manager -Managing prompt creation in multi-agent linkages -- Quick Configuration: Utilizing preset processing functions, users can easily configure by simply defining the inputs and outputs of the agents, enabling fast assembly and configuration of multi-agent prompts. -- Customization Support: Allows users to customize the internal processing logic of each module within the prompt to achieve personalized implementation of the agent prompt. - -### Preset Template Structure for Prompts -- Agent Profile: This section involves the basic description of the agent, including but not limited to the type of agent, its functions, and command set. Users can set the basic attributes of the agent here to ensure its behavior aligns with expectations. -- Context: Contextual Information, provided as a reference for the agent, aiding in better decision-making. - - Tool Information: This part provides the agent with a list of available tools, from which the agent can choose appropriate ones to assist in task execution based on current scenario requirements. - - Reference Documents: This may include documents or code snippets for the agent to refer to when handling requests, to facilitate the use of relevant information. - - Session Records: In multi-round conversations, this section records previous dialogue content to ensure continuity within the context. -- Response Output Format: Here the user can set the output format of the agent to ensure that the generated responses meet specific formatting requirements, including structure, grammar, etc. - -## Standard Structure of Prompt -In the entire structure of a Prompt, we need to define three parts: -- Agent Profile -- Input Format -- Response Output Format - -``` -#### Agent Profile -Agent Description ... - -#### Input Format -**Origin Query:** the initial question or objective that the user wanted to achieve -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved. - -#### Response Output Format -**Action Status:** finished or continued -If it's 'finished', the context can answer the origin query. -If it's 'continued', the context can't answer the origin query. -**REASON:** Justify the decision of choosing 'finished' or 'continued' by evaluating the progress step by step. -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion. -``` - -Here, we have integrated some of the common operations of the `Input Format`, with certain fields and operational procedures built in to form a standardized configurable operation. -In the future, we will also make parts of the Agent Profile and Response Output Format configurable to reduce the difficulty of writing Prompts. - - -### Customizing Agents -- Implement construction with custom fields according to actual needs -``` -class CodeGenDocer(BaseAgent): - def start_action_step(self, message: Message) -> Message: - '''do action before agent predict ''' - # Get code snippets and node information based on the question - action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, - embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") - current_vertex = action_json['vertex'] - message.customed_kargs["Code Snippet"] = action_json["code"] - message.customed_kargs['Current_Vertex'] = current_vertex - return message - -``` - - - -### pre_print Function -After building phases, chains, or agents, we can confirm agent linkages using the pre-print function of methods, allowing for debugging in advance to avoid discovering issues only after execution. -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - - -import os, sys -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" - -llm_config = LLMConfig( - model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 -) -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) - -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) -phase.pre_print(query) -``` - - -Here, pre-defined agents are used,,custom case can be seen [customed_example](/muagent/customed-examples) -
    - - - -## check the pre-print prompt -``` -########################## -<<<>>> -########################## - -### Agent Profile -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. -ATTENTION: response carefully referenced "Response Output Format" in format. - -### Tool Information - -### Agent Infomation - Please ensure your selection is one of the listed roles. Available roles for selection: - "role name: tool_react -role description: Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.," -"role name: code_react -role description: Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.," - Please ensure select the Role from agent names, such as tool_react, code_react - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Current Plan - -### Response Output Format -**Thoughts:** think the reason step by step about why you selecte one role -**Role:** Select the role from agent names. - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Role:** - - -########################### -<<<>>> -########################### -### Agent Profile -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools. -Please note that all the tools you can use are listed below. You can only choose from these tools for use. -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use. -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. - -### Tool Information - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Task Records - -### Response Output Format -**Thoughts:** According the previous observations, plan the approach for using the tool effectively. -... - -### Begin!!! - -################### -<<<>>> -################### -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -########################### -<<<>>> -########################### -### Agent Profile -When users need help with coding, your role is to provide precise and effective guidance. -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step. - -### Context Data - -#### Reference Documents - -#### Session Records - -### Response Output Format - -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem, -outline the plan for executing this step. - -**Action Status:** Set to 'stopped' or 'code_executing'. -If it's 'stopped', the action is to provide the final answer to the session records and executed steps. -If it's 'code_executing', the action is to write the code. -... - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -``` +--- +group: + title: Connector + order: 0 +title: Prompt +order: 2 +toc: content +--- + +## Prompt Manager + +Managing prompt creation in multi-agent linkages + +- Quick Configuration: Utilizing preset processing functions, users can easily configure by simply defining the inputs and outputs of the agents, enabling fast assembly and configuration of multi-agent prompts. +- Customization Support: Allows users to customize the internal processing logic of each module within the prompt to achieve personalized implementation of the agent prompt. + +### Preset Template Structure for Prompts + +- Agent Profile: This section involves the basic description of the agent, including but not limited to the type of agent, its functions, and command set. Users can set the basic attributes of the agent here to ensure its behavior aligns with expectations. +- Context: Contextual Information, provided as a reference for the agent, aiding in better decision-making. + - Tool Information: This part provides the agent with a list of available tools, from which the agent can choose appropriate ones to assist in task execution based on current scenario requirements. + - Reference Documents: This may include documents or code snippets for the agent to refer to when handling requests, to facilitate the use of relevant information. + - Session Records: In multi-round conversations, this section records previous dialogue content to ensure continuity within the context. +- Response Output Format: Here the user can set the output format of the agent to ensure that the generated responses meet specific formatting requirements, including structure, grammar, etc. + +## Standard Structure of Prompt + +In the entire structure of a Prompt, we need to define three parts: + +- Agent Profile +- Input Format: such as `**key_name:** key_description` +- Response Output Format: such as `**key_name:** key_description` + +``` +#### Agent Profile +Agent Description ... + +#### Input Format +**Origin Query:** the initial question or objective that the user wanted to achieve +**Context:** the current status and history of the tasks to determine if Origin Query has been achieved. + +#### Response Output Format +**Action Status:** finished or continued +If it's 'finished', the context can answer the origin query. +If it's 'continued', the context can't answer the origin query. +**REASON:** Justify the decision of choosing 'finished' or 'continued' by evaluating the progress step by step. +Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion. +``` + +Here, we have integrated some of the common operations of the `Input Format`, with certain fields and operational procedures built in to form a standardized configurable operation. +In the future, we will also make parts of the Agent Profile and Response Output Format configurable to reduce the difficulty of writing Prompts. + +### Customizing Agents + +- Implement construction with custom fields according to actual needs + +``` +class CodeGenDocer(BaseAgent): + def start_action_step(self, message: Message) -> Message: + '''do action before agent predict ''' + # Get code snippets and node information based on the question + action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, + embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") + current_vertex = action_json['vertex'] + message.customed_kargs["Code Snippet"] = action_json["code"] + message.customed_kargs['Current_Vertex'] = current_vertex + return message + +``` + +### pre_print Function + +After building phases, chains, or agents, we can confirm agent linkages using the pre-print function of methods, allowing for debugging in advance to avoid discovering issues only after execution. + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + + +import os, sys +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" + +llm_config = LLMConfig( + model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 +) +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) + +phase_name = "baseGroupPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart" +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, +) +phase.pre_print(query) +``` + +Here, pre-defined agents are used,,custom case can be seen [customed_example](./customed_examples.en-US.md) +
    + +## check the pre-print prompt + +``` +########################## +<<<>>> +########################## + +### Agent Profile +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. +ATTENTION: response carefully referenced "Response Output Format" in format. + +### Tool Information + +### Agent Infomation + Please ensure your selection is one of the listed roles. Available roles for selection: + "role name: tool_react +role description: Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.," +"role name: code_react +role description: Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.," + Please ensure select the Role from agent names, such as tool_react, code_react + +### Context Data + +#### Reference Documents + +#### Session Records + +#### Current Plan + +### Response Output Format +**Thoughts:** think the reason step by step about why you selecte one role +**Role:** Select the role from agent names. + +### Begin!!! + +################### +<<<>>> +################### + +**Thoughts:** +**Role:** + + +########################### +<<<>>> +########################### +### Agent Profile +When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools. +Please note that all the tools you can use are listed below. You can only choose from these tools for use. +If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use. +ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. + +### Tool Information + +### Context Data + +#### Reference Documents + +#### Session Records + +#### Task Records + +### Response Output Format +**Thoughts:** According the previous observations, plan the approach for using the tool effectively. +... + +### Begin!!! + +################### +<<<>>> +################### +**Thoughts:** +**Action Status:** +**Action:** +**Observation:** +**Thoughts:** +**Action Status:** +**Action:** + +########################### +<<<>>> +########################### +### Agent Profile +When users need help with coding, your role is to provide precise and effective guidance. +Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step. + +### Context Data + +#### Reference Documents + +#### Session Records + +### Response Output Format + +**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem, +outline the plan for executing this step. + +**Action Status:** Set to 'stopped' or 'code_executing'. +If it's 'stopped', the action is to provide the final answer to the session records and executed steps. +If it's 'code_executing', the action is to write the code. +... + +### Begin!!! + +################### +<<<>>> +################### + +**Thoughts:** +**Action Status:** +**Action:** +**Observation:** +**Thoughts:** +**Action Status:** +**Action:** + +``` diff --git a/content/zh/muagent/connector/connector_prompt.md b/docs/docs/api-docs/MuAgent/connector/connector_prompt.zh-CN.md similarity index 86% rename from content/zh/muagent/connector/connector_prompt.md rename to docs/docs/api-docs/MuAgent/connector/connector_prompt.zh-CN.md index 22569e6..8549c2b 100644 --- a/content/zh/muagent/connector/connector_prompt.md +++ b/docs/docs/api-docs/MuAgent/connector/connector_prompt.zh-CN.md @@ -1,233 +1,239 @@ ---- -title: Connector Prompt -slug: Connector Prompt ZH -url: "muagent/connector-prompt-zh" -aliases: -- "/muagent/connector-prompt-zh" ---- - - -## 提示管理器(Prompt Manager) -管理多智能体链路中的prompt创建 -- 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 -- 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 - -### Prompt预设模板结构 - -- Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 -- Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 - - Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 - - Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 - - Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 -- Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 - - -## Prompt 的标准结构 -在整个Prompt的整个结构中,我们需要去定义三个部分 -- Agent Profil -- Input Format -- Response Output Format - -``` -#### Agent Profile - -Agent Description ... - -#### Input Format - -**Origin Query:** the initial question or objective that the user wanted to achieve - -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved. - -#### Response Output Format -**Action Status:** finished or continued -If it's 'finished', the context can answer the origin query. -If it's 'continued', the context cant answer the origin query. - -**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step. -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion. -``` - - -其中,我们整合了部分 `Input Format` 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。 - -未来我们会也会进一步将 Agent Profile和Response Output Format的部分,实现可配置化操作,降低Prompt编写难度 - -### 自定义 Agent - -- 有自定义字段需求,根据实际需求完成构造 -``` -class CodeGenDocer(BaseAgent): - - def start_action_step(self, message: Message) -> Message: - '''do action before agent predict ''' - # 根据问题获取代码片段和节点信息 - action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, - embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") - current_vertex = action_json['vertex'] - message.customed_kargs["Code Snippet"] = action_json["code"] - message.customed_kargs['Current_Vertex'] = current_vertex - return message - -``` - -### pre_print 功能 -在我们构建phase、chain或者agent之后,可以通过函数的预打印功能,实现agents链路确认,避免在执行后才发现问题,可提前进行debug -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Role, Message, ChainConfig -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS - - -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" - -llm_config = LLMConfig( - model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 -) -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) - -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -phase.pre_print(query) -``` - - -这里采用预定义好的链路,自定义case可见[customed_example](/muagent/customed-examples-zh) -
    - - - -``` ->>> 完整信息确认 muagent.connector.configs中进行确认 - -########################## -<<<>>> -########################## - -### Agent Profile -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. -ATTENTION: response carefully referenced "Response Output Format" in format. - -### Tool Information - -### Agent Infomation - Please ensure your selection is one of the listed roles. Available roles for selection: - "role name: tool_react -role description: Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.," -"role name: code_react -role description: Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.," - Please ensure select the Role from agent names, such as tool_react, code_react - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Current Plan - -### Response Output Format -**Thoughts:** think the reason step by step about why you selecte one role -**Role:** Select the role from agent names. - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Role:** - - -########################### -<<<>>> -########################### -### Agent Profile -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools. -Please note that all the tools you can use are listed below. You can only choose from these tools for use. -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use. -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. - -### Tool Information - -### Context Data - -#### Reference Documents - -#### Session Records - -#### Task Records - -### Response Output Format -**Thoughts:** According the previous observations, plan the approach for using the tool effectively. -... - -### Begin!!! - -################### -<<<>>> -################### -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -########################### -<<<>>> -########################### -### Agent Profile -When users need help with coding, your role is to provide precise and effective guidance. -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step. - -### Context Data - -#### Reference Documents - -#### Session Records - -### Response Output Format - -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem, -outline the plan for executing this step. - -**Action Status:** Set to 'stopped' or 'code_executing'. -If it's 'stopped', the action is to provide the final answer to the session records and executed steps. -If it's 'code_executing', the action is to write the code. -... - -### Begin!!! - -################### -<<<>>> -################### - -**Thoughts:** -**Action Status:** -**Action:** -**Observation:** -**Thoughts:** -**Action Status:** -**Action:** - -``` +--- +group: + title: Connector + order: 0 +title: Prompt +order: 2 +toc: content +--- + +## 提示管理器(Prompt Manager) + +管理多智能体链路中的 prompt 创建 + +- 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的 prompt 快速组装和配置。 +- 自定义支持:允许用户自定义 prompt 内部各模块的处理逻辑,以达到个性化的智能体 prompt 实现。 + +### Prompt 预设模板结构 + +- Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 +- Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 + - Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 + - Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 + - Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 +- Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 + +## Prompt 的标准结构 + +在整个 Prompt 的整个结构中,我们需要去定义三个部分 + +- Agent Profil +- Input Format: 需要写出 `**key_name:** key_description` +- Response Output Format:需要写出 `**key_name:** key_description` + +``` +#### Agent Profile + +Agent Description ... + +#### Input Format + +**Origin Query:** the initial question or objective that the user wanted to achieve + +**Context:** the current status and history of the tasks to determine if Origin Query has been achieved. + +#### Response Output Format +**Action Status:** finished or continued +If it's 'finished', the context can answer the origin query. +If it's 'continued', the context cant answer the origin query. + +**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step. +Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion. +``` + +其中,我们整合了部分 `Input Format` 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。 + +未来我们会也会进一步将 Agent Profile 和 Response Output Format 的部分,实现可配置化操作,降低 Prompt 编写难度 + +### 自定义 Agent + +- 有自定义字段需求,根据实际需求完成构造 + +``` +class CodeGenDocer(BaseAgent): + + def start_action_step(self, message: Message) -> Message: + '''do action before agent predict ''' + # 根据问题获取代码片段和节点信息 + action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, + embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") + current_vertex = action_json['vertex'] + message.customed_kargs["Code Snippet"] = action_json["code"] + message.customed_kargs['Current_Vertex'] = current_vertex + return message + +``` + +### pre_print 功能 + +在我们构建 phase、chain 或者 agent 之后,可以通过函数的预打印功能,实现 agents 链路确认,避免在执行后才发现问题,可提前进行 debug + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS + + +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" + +llm_config = LLMConfig( + model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 +) +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) + +phase_name = "baseGroupPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + user_name="test", role_type="user", role_name="user", input_query=question, +) +phase.pre_print(query) +``` + +这里采用预定义好的链路,自定义 case 可见[customed_example](./customed_examples.zh-CN.md) +
    + +``` +>>> 完整信息确认 muagent.connector.configs中进行确认 + +########################## +<<<>>> +########################## + +### Agent Profile +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. +ATTENTION: response carefully referenced "Response Output Format" in format. + +### Tool Information + +### Agent Infomation + Please ensure your selection is one of the listed roles. Available roles for selection: + "role name: tool_react +role description: Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.," +"role name: code_react +role description: Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.," + Please ensure select the Role from agent names, such as tool_react, code_react + +### Context Data + +#### Reference Documents + +#### Session Records + +#### Current Plan + +### Response Output Format +**Thoughts:** think the reason step by step about why you selecte one role +**Role:** Select the role from agent names. + +### Begin!!! + +################### +<<<>>> +################### + +**Thoughts:** +**Role:** + + +########################### +<<<>>> +########################### +### Agent Profile +When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools. +Please note that all the tools you can use are listed below. You can only choose from these tools for use. +If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use. +ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying. + +### Tool Information + +### Context Data + +#### Reference Documents + +#### Session Records + +#### Task Records + +### Response Output Format +**Thoughts:** According the previous observations, plan the approach for using the tool effectively. +... + +### Begin!!! + +################### +<<<>>> +################### +**Thoughts:** +**Action Status:** +**Action:** +**Observation:** +**Thoughts:** +**Action Status:** +**Action:** + +########################### +<<<>>> +########################### +### Agent Profile +When users need help with coding, your role is to provide precise and effective guidance. +Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step. + +### Context Data + +#### Reference Documents + +#### Session Records + +### Response Output Format + +**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem, +outline the plan for executing this step. + +**Action Status:** Set to 'stopped' or 'code_executing'. +If it's 'stopped', the action is to provide the final answer to the session records and executed steps. +If it's 'code_executing', the action is to write the code. +... + +### Begin!!! + +################### +<<<>>> +################### + +**Thoughts:** +**Action Status:** +**Action:** +**Observation:** +**Thoughts:** +**Action Status:** +**Action:** + +``` diff --git a/docs/docs/api-docs/MuAgent/connector/connector_tbasememory.en-US.md b/docs/docs/api-docs/MuAgent/connector/connector_tbasememory.en-US.md new file mode 100644 index 0000000..ba8bd92 --- /dev/null +++ b/docs/docs/api-docs/MuAgent/connector/connector_tbasememory.en-US.md @@ -0,0 +1,138 @@ +--- +group: + title: Connector + order: 0 +subGroup: + title: Memory +title: Local Memory Builder +order: 3 +toc: content +--- + +## Usage Example + +### Create memory manager instance + +``` +import os +import openai +from coagent.base_configs.env_config import KB_ROOT_PATH +from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager +from coagent.llm_models.llm_config import EmbedConfig, LLMConfig +from coagent.connector.schema import Message + + +os.environ["API_BASE_URL"] = OPENAI_API_BASE +os.environ["OPENAI_API_KEY"] = "sk-xx" +openai.api_key = "sk-xxx" +# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" + +# TBASE CONFIG +TBASE_ARGS = { + 'host': '{url}', + 'port': 6379, + 'username': '', + 'password': '' + } + + +# LLM and Embedding Model configurations +llm_config = LLMConfig( + model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"], + api_base_url=os.environ["API_BASE_URL"], temperature=0.3 +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=os.environ["embed_model"], + embed_model_path=os.environ["embed_model_path"] +) + + +#specify index_name +index_name = 'your_index_name' +th = TbaseHandler(TBASE_ARGS, index_name, definition_value="message") + + +# # drop index +# th.drop_index(index_name) + +# create tbase memory manager +memory_manager = TbaseMemoryManager( + unique_name="EKG", + embed_config=embed_config, + llm_config=llm_config, + tbase_handler=th, + use_vector=False + ) +``` + +### support memory manage + +``` +import uuid + +# example1 +message = Message( + chat_index="wyp311395_test_chatindex_0", + message_index= f"nodeid0-{uuid.uuid4()}", + user_name="311395", + role_name = "311395", # agent 名字, + role_type = "user", # agent 类型,默认assistant,可选observation + ## llm output + role_content = "今天天气如何?", # 输入 +) + +memory_manager.append(message) + +# example2 +message = Message( + chat_index="wyp311395_test_chatindex_0", + message_index= f"nodeid1-{uuid.uuid4()}", + user_name="311395", + role_name = "tester_0", # agent 名字, + role_type = "assistant", # agent 类型,默认assistant,可选observation + ## llm output + role_content = " {'date': '2024-04-17'}", # 输入 +) + +memory_manager.append(message) +``` + +### Support for memory retrieval + +``` + +logger.debug(f'按user_name检索:{memory_manager.get_memory_pool("311395")}') + +logger.debug(f'全局检索:{memory_manager.get_memory_pool_by_content("今天天气如何?")}') + +logger.debug(f'全局检索:{memory_manager.get_memory_pool_by_content("functioncall")}') + +logger.debug(f'按kev-value检索:{memory_manager.get_memory_pool_by_key_content("role_content", "functioncall")}') + +logger.debug(f'按key-value检索:{memory_manager.get_memory_pool_by_all({"chat_index": "wyp311395_test_chatindex_0", "role_content": "functioncall"})}') + +logger.debug(f'按key-value检索:{memory_manager.get_memory_pool_by_all({"keyword": "nodeid3"})}') + +# +logger.debug(f'按datetime检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", datetime="2024-03-12 17:48:00", n=4, top_k=5, retrieval_type= "datetime")}') + +# +logger.debug(f'按datetime检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", datetime="2024-04-18 11:30:00", n=4, top_k=5, retrieval_type= "datetime")}') + +# +logger.debug(f'按text检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", text="今天天气", top_k=5, retrieval_type= "text")}') + +# +logger.debug(f'按embedding检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", text="今天天气", top_k=5, retrieval_type= "embedding")}') + +``` + +### Support for memory summarization + +``` +# recursive_summary test +messages = memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", text="今天天气", top_k=5, retrieval_type= "embedding") +print(memory_manager.recursive_summary(messages, chat_index="wyp311395_test_chatindex_0", nodeid="nodeid3", user_name="311395", split_n=1)) +``` diff --git a/docs/docs/api-docs/MuAgent/connector/connector_tbasememory.zh-CN.md b/docs/docs/api-docs/MuAgent/connector/connector_tbasememory.zh-CN.md new file mode 100644 index 0000000..9b7fcaa --- /dev/null +++ b/docs/docs/api-docs/MuAgent/connector/connector_tbasememory.zh-CN.md @@ -0,0 +1,138 @@ +--- +group: + title: Connector + order: 0 +subGroup: + title: Memory +title: Tbase Memory Builder +order: 3 +toc: content +--- + +## 使用示例 + +### 参加 memory manager 实例 + +``` +import os +import openai +from coagent.base_configs.env_config import KB_ROOT_PATH +from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager +from coagent.llm_models.llm_config import EmbedConfig, LLMConfig +from coagent.connector.schema import Message + + +os.environ["API_BASE_URL"] = OPENAI_API_BASE +os.environ["OPENAI_API_KEY"] = "sk-xx" +openai.api_key = "sk-xxx" +# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" + +# TBASE CONFIG +TBASE_ARGS = { + 'host': '{url}', + 'port': 6379, + 'username': '', + 'password': '' + } + + +# LLM and Embedding Model configurations +llm_config = LLMConfig( + model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"], + api_base_url=os.environ["API_BASE_URL"], temperature=0.3 +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=os.environ["embed_model"], + embed_model_path=os.environ["embed_model_path"] +) + + +#specify index_name +index_name = 'your_index_name' +th = TbaseHandler(TBASE_ARGS, index_name, definition_value="message") + + +# # drop index +# th.drop_index(index_name) + +# create tbase memory manager +memory_manager = TbaseMemoryManager( + unique_name="EKG", + embed_config=embed_config, + llm_config=llm_config, + tbase_handler=th, + use_vector=False + ) +``` + +### 支持 memory 存储管理 + +``` +import uuid + +# example1 +message = Message( + chat_index="wyp311395_test_chatindex_0", + message_index= f"nodeid0-{uuid.uuid4()}", + user_name="311395", + role_name = "311395", # agent 名字, + role_type = "user", # agent 类型,默认assistant,可选observation + ## llm output + role_content = "今天天气如何?", # 输入 +) + +memory_manager.append(message) + +# example2 +message = Message( + chat_index="wyp311395_test_chatindex_0", + message_index= f"nodeid1-{uuid.uuid4()}", + user_name="311395", + role_name = "tester_0", # agent 名字, + role_type = "assistant", # agent 类型,默认assistant,可选observation + ## llm output + role_content = " {'date': '2024-04-17'}", # 输入 +) + +memory_manager.append(message) +``` + +### 支持 memory 检索 + +``` + +logger.debug(f'按user_name检索:{memory_manager.get_memory_pool("311395")}') + +logger.debug(f'全局检索:{memory_manager.get_memory_pool_by_content("今天天气如何?")}') + +logger.debug(f'全局检索:{memory_manager.get_memory_pool_by_content("functioncall")}') + +logger.debug(f'按kev-value检索:{memory_manager.get_memory_pool_by_key_content("role_content", "functioncall")}') + +logger.debug(f'按key-value检索:{memory_manager.get_memory_pool_by_all({"chat_index": "wyp311395_test_chatindex_0", "role_content": "functioncall"})}') + +logger.debug(f'按key-value检索:{memory_manager.get_memory_pool_by_all({"keyword": "nodeid3"})}') + +# +logger.debug(f'按datetime检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", datetime="2024-03-12 17:48:00", n=4, top_k=5, retrieval_type= "datetime")}') + +# +logger.debug(f'按datetime检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", datetime="2024-04-18 11:30:00", n=4, top_k=5, retrieval_type= "datetime")}') + +# +logger.debug(f'按text检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", text="今天天气", top_k=5, retrieval_type= "text")}') + +# +logger.debug(f'按embedding检索:{memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", text="今天天气", top_k=5, retrieval_type= "embedding")}') + +``` + +### 支持 memory 总结 + +``` +# recursive_summary test +messages = memory_manager.router_retrieval(chat_index="wyp311395_test_chatindex_0", text="今天天气", top_k=5, retrieval_type= "embedding") +print(memory_manager.recursive_summary(messages, chat_index="wyp311395_test_chatindex_0", nodeid="nodeid3", user_name="311395", split_n=1)) +``` diff --git a/content/en/muagent/connector/customed_examples.md b/docs/docs/api-docs/MuAgent/connector/customed_examples.en-US.md similarity index 93% rename from content/en/muagent/connector/customed_examples.md rename to docs/docs/api-docs/MuAgent/connector/customed_examples.en-US.md index e4ebe1e..5cadbd9 100644 --- a/content/en/muagent/connector/customed_examples.md +++ b/docs/docs/api-docs/MuAgent/connector/customed_examples.en-US.md @@ -1,290 +1,294 @@ ---- -title: Customed Examples -slug: Customed Examples -url: "muagent/custom-examples" -aliases: -- "/muagent/custom-examples" ---- - - - -## How to Create Your Personalized Agent Phase Scenario -Below we will use a code repository to demonstrate the automatic generation of API documentation from code, detailing how to customize the construction of an agent phase. - -### Design Your Prompt Structure - -- codeGenDocGroup_PROMPT, create group Agent Prompt -``` -# update new agent configs -codeGenDocGroup_PROMPT = """#### Agent Profile - -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. - -#### Input Format - -#### Response Output Format - -**Code Path:** Extract the paths for the class/method/function that need to be addressed from the context - -**Role:** Select the role from agent names -""" -``` - -- classGenDoc_PROMPT, create class code to api doc Prompt -``` -classGenDoc_PROMPT = """#### Agent Profile -As an advanced code documentation generator, you are proficient in translating class definitions into comprehensive documentation with a focus on instantiation parameters. -Your specific task is to parse the given code snippet of a class, extract information regarding its instantiation parameters. - -#### Input Format - -**Current_Vertex:** Provide the code vertex of the function or method. - -**Code Snippet:** Provide the full class definition, including the constructor and any parameters it may require for instantiation. - -#### Response Output Format -**Class Base:** Specify the base class or interface from which the current class extends, if any. - -**Class Description:** Offer a brief description of the class's purpose and functionality. - -**Init Parameters:** List each parameter from construct. For each parameter, provide: - - `param`: The parameter name - - `param_description`: A concise explanation of the parameter's purpose. - - `param_type`: The data type of the parameter, if explicitly defined. - - ```json - [ - { - "param": "parameter_name", - "param_description": "A brief description of what this parameter is used for.", - "param_type": "The data type of the parameter" - }, - ... - ] - ``` - - - If no parameter for construct, return - ```json - [] - ``` -""" -``` - -- funcGenDoc_PROMPT,create function code to api doc Prompt -``` -funcGenDoc_PROMPT = """#### Agent Profile -You are a high-level code documentation assistant, skilled at extracting information from function/method code into detailed and well-structured documentation. - - -#### Input Format -**Code Path:** Provide the code path of the function or method you wish to document. -This name will be used to identify and extract the relevant details from the code snippet provided. - -**Current_Vertex:** Provide the code vertex of the function or method. - -**Code Snippet:** A segment of code that contains the function or method to be documented. - -#### Response Output Format - -**Class Description:** Offer a brief description of the method(function)'s purpose and functionality. - -**Parameters:** Extract parameter for the specific function/method Code from Code Snippet. For parameter, provide: - - `param`: The parameter name - - `param_description`: A concise explanation of the parameter's purpose. - - `param_type`: The data type of the parameter, if explicitly defined. - ```json - [ - { - "param": "parameter_name", - "param_description": "A brief description of what this parameter is used for.", - "param_type": "The data type of the parameter" - }, - ... - ] - ``` - - If no parameter for function/method, return - ```json - [] - ``` - -**Return Value Description:** Describe what the function/method returns upon completion. - -**Return Type:** Indicate the type of data the function/method returns (e.g., string, integer, object, void). -""" -``` - - -### Import Packages and Basic Configuration Parameters -- First, add openai configuration or similar interfaces to models such as openai (launched via fastchat) - -``` -import os, sys -from muagent.base_configs.env_config import CB_ROOT_PATH -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.connector.phase import BasePhase -from muagent.connector.agents import BaseAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Message, Role, ChainConfig -from muagent.codechat.codebase_handler.codebase_handler import CodeBaseHandler -from loguru import logger -from muagent.tools import CodeRetrievalSingle - - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - - -### Defining a New Agent Class -For custom key-value information -``` -class CodeGenDocer(BaseAgent): - def start_action_step(self, message: Message) -> Message: - '''do action before agent predict ''' - # Retrieve code snippets and node information based on the question - action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, - embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") - current_vertex = action_json['vertex'] - message.customed_kargs["Code Snippet"] = action_json["code"] - message.customed_kargs['Current_Vertex'] = current_vertex - return message - -``` - - -### Preparing LLM & Embedding -``` -llm_config = LLMConfig( - model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 -) -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - - -### Codebase Loading -``` -# initialize codebase -# delete codebase -codebase_name = 'client_nebula' -code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" -use_nh = True -do_interpret = False -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, - llm_config=llm_config, embed_config=embed_config) -cbh.delete_codebase(codebase_name=codebase_name) -# load codebase -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, - llm_config=llm_config, embed_config=embed_config) -cbh.import_code(do_interpret=do_interpret) -``` - - -### Then Construct a Phase Instance and Begin Execution -``` -# log-level, print prompt, and llm predict -os.environ["log_verbose"] = "1" - -funcGenDoc_role = Role(role_type="assistant", role_name="funcGenDoc_role", prompt=funcGenDoc_PROMPT) -funcGenDoc = CodeGenDocer( - role=funcGenDoc_role, - chat_turn=1, - llm_config=llm_config, embed_config=embed_config, -) - -classGenDoc_role = Role(role_type="assistant", role_name="classGenDoc_role", prompt=classGenDoc_PROMPT) -classGenDoc = CodeGenDocer( - role=classGenDoc_role, - chat_turn=1, - llm_config=llm_config, embed_config=embed_config, -) - -codeGenDocGroup_role = Role(role_type="assistant", role_name="codeGenDocGroup_role", prompt=codeGenDocGroup_PROMPT) -codeGenDocGroup = SelectorAgent( - role=codeGenDocGroup_role, - chat_turn=1, - llm_config=llm_config, embed_config=embed_config, - group_agents=[funcGenDoc, classGenDoc] -) - -chain_config = ChainConfig( - chain_name="codeGenDocGroup_chain", agents=[codeGenDocGroup.role.role_name,], - chat_turn=1) -chain = BaseChain( - chainConfig=chain_config, agents=[codeGenDocGroup], - llm_config=llm_config, embed_config=embed_config, -) - -phase = BasePhase( - phase_name="codeGenDocGroup_phase", chains=[chain], - embed_config=embed_config, llm_config=llm_config -) -``` - -### start to generate api docs from code - -``` -# Initialize based on the previous loading process -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, - llm_config=llm_config, embed_config=embed_config) -cbh.search_vertices(vertex_type="method") -# Begin transforming code into API documentation structure -for vertex_type in ["class", "method"]: - vertices = cbh.search_vertices(vertex_type=vertex_type) - logger.info(f"vertices={vertices}") - # round-1 - docs = [] - for vertex in vertices: - vertex = vertex.split("-")[0] # '-' is the delimiter for method parameters - query_content = f"Generate documentation for {vertex_type} node {vertex}" - query = Message( - role_name="human", role_type="user", input_query=query_content, - code_engine_name=codebase_name, score_threshold=1.0, top_k=3, cb_search_type="tag", use_nh=use_nh, - local_graph_path=CB_ROOT_PATH, - ) - output_message, output_memory = phase.step(query, reinit_memory=True) - # print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - docs.append(output_memory.get_spec_parserd_output()) - os.makedirs(f"{CB_ROOT_PATH}/docs", exist_ok=True) - with open(f"{CB_ROOT_PATH}/docs/raw_{vertex_type}.json", "w") as f: - json.dump(docs, f) - - -# Convert the generated document information into markdown text -from muagent.utils.code2doc_util import * -import json -with open(f"/home/user/code_base/docs/raw_method.json", "r") as f: - method_raw_data = json.load(f) - - -with open(f"/home/user/code_base/docs/raw_class.json", "r") as f: - class_raw_data = json.load(f) - -method_data = method_info_decode(method_raw_data) -class_data = class_info_decode(class_raw_data) -method_mds = encode2md(method_data, method_text_md) -class_mds = encode2md(class_data, class_text_md) - -docs_dict = {} -for k,v in class_mds.items(): - method_textmds = method_mds.get(k, []) - for vv in v: - # Theoretically, there should only be one - text_md = vv - for method_textmd in method_textmds: - text_md += "\n
    " + method_textmd - docs_dict.setdefault(k, []).append(text_md) - - with open(f"/home/user/code_base/docs/{k}.md", "w") as f: - f.write(text_md) -``` \ No newline at end of file +--- +group: + title: Connector + order: 0 +title: Customed Examples +order: 4 +toc: content +--- + +## How to Create Your Personalized Agent Phase Scenario + +Below we will use a code repository to demonstrate the automatic generation of API documentation from code, detailing how to customize the construction of an agent phase. + +### Design Your Prompt Structure + +- codeGenDocGroup_PROMPT, create group Agent Prompt + +``` +# update new agent configs +codeGenDocGroup_PROMPT = """#### Agent Profile + +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. + +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. + +#### Input Format + +#### Response Output Format + +**Code Path:** Extract the paths for the class/method/function that need to be addressed from the context + +**Role:** Select the role from agent names +""" +``` + +- classGenDoc_PROMPT, create class code to api doc Prompt + +```` +classGenDoc_PROMPT = """#### Agent Profile +As an advanced code documentation generator, you are proficient in translating class definitions into comprehensive documentation with a focus on instantiation parameters. +Your specific task is to parse the given code snippet of a class, extract information regarding its instantiation parameters. + +#### Input Format + +**Current_Vertex:** Provide the code vertex of the function or method. + +**Code Snippet:** Provide the full class definition, including the constructor and any parameters it may require for instantiation. + +#### Response Output Format +**Class Base:** Specify the base class or interface from which the current class extends, if any. + +**Class Description:** Offer a brief description of the class's purpose and functionality. + +**Init Parameters:** List each parameter from construct. For each parameter, provide: + - `param`: The parameter name + - `param_description`: A concise explanation of the parameter's purpose. + - `param_type`: The data type of the parameter, if explicitly defined. + + ```json + [ + { + "param": "parameter_name", + "param_description": "A brief description of what this parameter is used for.", + "param_type": "The data type of the parameter" + }, + ... + ] + ``` + + + If no parameter for construct, return + ```json + [] + ``` +""" +```` + +- funcGenDoc_PROMPT,create function code to api doc Prompt + +```` +funcGenDoc_PROMPT = """#### Agent Profile +You are a high-level code documentation assistant, skilled at extracting information from function/method code into detailed and well-structured documentation. + + +#### Input Format +**Code Path:** Provide the code path of the function or method you wish to document. +This name will be used to identify and extract the relevant details from the code snippet provided. + +**Current_Vertex:** Provide the code vertex of the function or method. + +**Code Snippet:** A segment of code that contains the function or method to be documented. + +#### Response Output Format + +**Class Description:** Offer a brief description of the method(function)'s purpose and functionality. + +**Parameters:** Extract parameter for the specific function/method Code from Code Snippet. For parameter, provide: + - `param`: The parameter name + - `param_description`: A concise explanation of the parameter's purpose. + - `param_type`: The data type of the parameter, if explicitly defined. + ```json + [ + { + "param": "parameter_name", + "param_description": "A brief description of what this parameter is used for.", + "param_type": "The data type of the parameter" + }, + ... + ] + ``` + + If no parameter for function/method, return + ```json + [] + ``` + +**Return Value Description:** Describe what the function/method returns upon completion. + +**Return Type:** Indicate the type of data the function/method returns (e.g., string, integer, object, void). +""" +```` + +### Import Packages and Basic Configuration Parameters + +- First, add openai configuration or similar interfaces to models such as openai (launched via fastchat) + +``` +import os, sys, json +from muagent.base_configs.env_config import CB_ROOT_PATH +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.connector.phase import BasePhase +from muagent.connector.agents import BaseAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Message, Role, ChainConfig +from muagent.codechat.codebase_handler.codebase_handler import CodeBaseHandler +from loguru import logger +from muagent.tools import CodeRetrievalSingle + + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### Defining a New Agent Class + +For custom key-value information + +``` +class CodeGenDocer(BaseAgent): + def start_action_step(self, message: Message) -> Message: + '''do action before agent predict ''' + # Retrieve code snippets and node information based on the question + action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, + embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") + current_vertex = action_json['vertex'] + message.customed_kargs["Code Snippet"] = action_json["code"] + message.customed_kargs['Current_Vertex'] = current_vertex + return message + +``` + +### Preparing LLM & Embedding + +``` +llm_config = LLMConfig( + model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 +) +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Codebase Loading + +``` +# initialize codebase +# delete codebase +codebase_name = 'client_nebula' +code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" +use_nh = True +do_interpret = False +cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, + llm_config=llm_config, embed_config=embed_config) +cbh.delete_codebase(codebase_name=codebase_name) +# load codebase +cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, + llm_config=llm_config, embed_config=embed_config) +cbh.import_code(do_interpret=do_interpret) +``` + +### Then Construct a Phase Instance and Begin Execution + +``` +# log-level, print prompt, and llm predict +os.environ["log_verbose"] = "1" + +funcGenDoc_role = Role(role_type="assistant", role_name="funcGenDoc_role", prompt=funcGenDoc_PROMPT) +funcGenDoc = CodeGenDocer( + role=funcGenDoc_role, + chat_turn=1, + llm_config=llm_config, embed_config=embed_config, +) + +classGenDoc_role = Role(role_type="assistant", role_name="classGenDoc_role", prompt=classGenDoc_PROMPT) +classGenDoc = CodeGenDocer( + role=classGenDoc_role, + chat_turn=1, + llm_config=llm_config, embed_config=embed_config, +) + +codeGenDocGroup_role = Role(role_type="assistant", role_name="codeGenDocGroup_role", prompt=codeGenDocGroup_PROMPT) +codeGenDocGroup = SelectorAgent( + role=codeGenDocGroup_role, + chat_turn=1, + llm_config=llm_config, embed_config=embed_config, + group_agents=[funcGenDoc, classGenDoc] +) + +chain_config = ChainConfig( + chain_name="codeGenDocGroup_chain", agents=[codeGenDocGroup.role.role_name,], + chat_turn=1) +chain = BaseChain( + chainConfig=chain_config, agents=[codeGenDocGroup], + llm_config=llm_config, embed_config=embed_config, +) + +phase = BasePhase( + phase_name="codeGenDocGroup_phase", chains=[chain], + embed_config=embed_config, llm_config=llm_config +) +``` + +### start to generate api docs from code + +``` +# Initialize based on the previous loading process +cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, + llm_config=llm_config, embed_config=embed_config) +cbh.search_vertices(vertex_type="method") +# Begin transforming code into API documentation structure +for vertex_type in ["class", "method"]: + vertices = cbh.search_vertices(vertex_type=vertex_type) + logger.info(f"vertices={vertices}") + # round-1 + docs = [] + for vertex in vertices: + vertex = vertex.split("-")[0] # '-' is the delimiter for method parameters + query_content = f"Generate documentation for {vertex_type} node {vertex}" + query = Message( + role_name="human", role_type="user", input_query=query_content, + code_engine_name=codebase_name, score_threshold=1.0, top_k=3, cb_search_type="tag", use_nh=use_nh, + local_graph_path=CB_ROOT_PATH, + ) + output_message, output_memory = phase.step(query, reinit_memory=True) + # print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + docs.append(output_memory.get_spec_parserd_output()) + os.makedirs(f"{CB_ROOT_PATH}/docs", exist_ok=True) + with open(f"{CB_ROOT_PATH}/docs/raw_{vertex_type}.json", "w") as f: + json.dump(docs, f) + + +# Convert the generated document information into markdown text +from muagent.utils.code2doc_util import * +import json +with open(f"/home/user/code_base/docs/raw_method.json", "r") as f: + method_raw_data = json.load(f) + + +with open(f"/home/user/code_base/docs/raw_class.json", "r") as f: + class_raw_data = json.load(f) + +method_data = method_info_decode(method_raw_data) +class_data = class_info_decode(class_raw_data) +method_mds = encode2md(method_data, method_text_md) +class_mds = encode2md(class_data, class_text_md) + +docs_dict = {} +for k,v in class_mds.items(): + method_textmds = method_mds.get(k, []) + for vv in v: + # Theoretically, there should only be one + text_md = vv + for method_textmd in method_textmds: + text_md += "\n
    " + method_textmd + docs_dict.setdefault(k, []).append(text_md) + + with open(f"/home/user/code_base/docs/{k}.md", "w") as f: + f.write(text_md) +``` diff --git a/content/zh/muagent/connector/customed_examples.md b/docs/docs/api-docs/MuAgent/connector/customed_examples.zh-CN.md similarity index 89% rename from content/zh/muagent/connector/customed_examples.md rename to docs/docs/api-docs/MuAgent/connector/customed_examples.zh-CN.md index de451e5..37af68a 100644 --- a/content/zh/muagent/connector/customed_examples.md +++ b/docs/docs/api-docs/MuAgent/connector/customed_examples.zh-CN.md @@ -1,302 +1,309 @@ ---- -title: Customed Examples -slug: Customed Examples ZH -url: "muagent/custom-examples-zh" -aliases: -- "/muagent/custom-examples-zh" ---- - - -## 如何创建你个性化的 agent phase 场景 - -下面通过 代码库来实现代码转API文档的自动生成, 来详细演示如何自定义一个 agent phase 的构建 - -### 设计你的prompt结构 - -- codeGenDocGroup_PROMPT, 构建 group Agent Prompt -``` -# update new agent configs -codeGenDocGroup_PROMPT = """#### Agent Profile - -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. - -#### Input Format - -#### Response Output Format - -**Code Path:** Extract the paths for the class/method/function that need to be addressed from the context - -**Role:** Select the role from agent names -""" -``` - -- classGenDoc_PROMPT, 构建 class code to api doc Prompt -``` -classGenDoc_PROMPT = """#### Agent Profile -As an advanced code documentation generator, you are proficient in translating class definitions into comprehensive documentation with a focus on instantiation parameters. -Your specific task is to parse the given code snippet of a class, extract information regarding its instantiation parameters. - -#### Input Format - -**Current_Vertex:** Provide the code vertex of the function or method. - -**Code Snippet:** Provide the full class definition, including the constructor and any parameters it may require for instantiation. - -#### Response Output Format -**Class Base:** Specify the base class or interface from which the current class extends, if any. - -**Class Description:** Offer a brief description of the class's purpose and functionality. - -**Init Parameters:** List each parameter from construct. For each parameter, provide: - - `param`: The parameter name - - `param_description`: A concise explanation of the parameter's purpose. - - `param_type`: The data type of the parameter, if explicitly defined. - - ```json - [ - { - "param": "parameter_name", - "param_description": "A brief description of what this parameter is used for.", - "param_type": "The data type of the parameter" - }, - ... - ] - ``` - - - If no parameter for construct, return - ```json - [] - ``` -""" -``` - -- funcGenDoc_PROMPT,构建 function code to api doc Prompt -``` -funcGenDoc_PROMPT = """#### Agent Profile -You are a high-level code documentation assistant, skilled at extracting information from function/method code into detailed and well-structured documentation. - - -#### Input Format -**Code Path:** Provide the code path of the function or method you wish to document. -This name will be used to identify and extract the relevant details from the code snippet provided. - -**Current_Vertex:** Provide the code vertex of the function or method. - -**Code Snippet:** A segment of code that contains the function or method to be documented. - -#### Response Output Format - -**Class Description:** Offer a brief description of the method(function)'s purpose and functionality. - -**Parameters:** Extract parameter for the specific function/method Code from Code Snippet. For parameter, provide: - - `param`: The parameter name - - `param_description`: A concise explanation of the parameter's purpose. - - `param_type`: The data type of the parameter, if explicitly defined. - ```json - [ - { - "param": "parameter_name", - "param_description": "A brief description of what this parameter is used for.", - "param_type": "The data type of the parameter" - }, - ... - ] - ``` - - If no parameter for function/method, return - ```json - [] - ``` - -**Return Value Description:** Describe what the function/method returns upon completion. - -**Return Type:** Indicate the type of data the function/method returns (e.g., string, integer, object, void). -""" -``` - -### 导包以及基础参数配置 -- 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) -``` -import os, sys -from muagent.base_configs.env_config import CB_ROOT_PATH -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.connector.phase import BasePhase -from muagent.connector.agents import BaseAgent, SelectorAgent -from muagent.connector.chains import BaseChain -from muagent.connector.schema import Message, Role, ChainConfig -from muagent.codechat.codebase_handler.codebase_handler import CodeBaseHandler - -from loguru import logger -from muagent.tools import CodeRetrievalSingle - - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - - - -### 定义新的agent类 -用于自定义key-value信息 -``` -class CodeGenDocer(BaseAgent): - - def start_action_step(self, message: Message) -> Message: - '''do action before agent predict ''' - # 根据问题获取代码片段和节点信息 - action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, - embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") - current_vertex = action_json['vertex'] - message.customed_kargs["Code Snippet"] = action_json["code"] - message.customed_kargs['Current_Vertex'] = current_vertex - return message - -``` - -### 准备LLM & Embedding -``` -llm_config = LLMConfig( - model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 -) -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### 代码库加载 - -``` - -# initialize codebase -# delete codebase -codebase_name = 'client_nebula' -code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" -use_nh = True -do_interpret = False -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, - llm_config=llm_config, embed_config=embed_config) -cbh.delete_codebase(codebase_name=codebase_name) - -# load codebase -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, - llm_config=llm_config, embed_config=embed_config) -cbh.import_code(do_interpret=do_interpret) - -``` - -### 接下来就构建 phase 实例,开始执行 -``` - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "1" - -funcGenDoc_role = Role(role_type="assistant", role_name="funcGenDoc_role", prompt=funcGenDoc_PROMPT) -funcGenDoc = CodeGenDocer( - role=funcGenDoc_role, - chat_turn=1, - llm_config=llm_config, embed_config=embed_config, -) - - -classGenDoc_role = Role(role_type="assistant", role_name="classGenDoc_role", prompt=classGenDoc_PROMPT) -classGenDoc = CodeGenDocer( - role=classGenDoc_role, - chat_turn=1, - llm_config=llm_config, embed_config=embed_config, -) - -codeGenDocGroup_role = Role(role_type="assistant", role_name="codeGenDocGroup_role", prompt=codeGenDocGroup_PROMPT) -codeGenDocGroup = SelectorAgent( - role=codeGenDocGroup_role, - chat_turn=1, - llm_config=llm_config, embed_config=embed_config, - group_agents=[funcGenDoc, classGenDoc] -) - -chain_config = ChainConfig( - chain_name="codeGenDocGroup_chain", agents=[codeGenDocGroup.role.role_name,], - chat_turn=1) - -chain = BaseChain( - chainConfig=chain_config, agents=[codeGenDocGroup], - llm_config=llm_config, embed_config=embed_config, -) - -phase = BasePhase( - phase_name="codeGenDocGroup_phase", chains=[chain], - embed_config=embed_config, llm_config=llm_config -) -``` - - -### 开始代码转api文档 -``` -# 根据前面的load过程进行初始化 -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, - llm_config=llm_config, embed_config=embed_config) - -cbh.search_vertices(vertex_type="method") - -# 开始代码转换API文档结构 -for vertex_type in ["class", "method"]: - vertexes = cbh.search_vertices(vertex_type=vertex_type) - logger.info(f"vertexes={vertexes}") - - # round-1 - docs = [] - for vertex in vertexes: - vertex = vertex.split("-")[0] # -为method的参数 - query_content = f"为{vertex_type}节点 {vertex}生成文档" - query = Message( - role_name="human", role_type="user", input_query=query_content, - code_engine_name=codebase_name, score_threshold=1.0, top_k=3, cb_search_type="tag", use_nh=use_nh, - local_graph_path=CB_ROOT_PATH, - ) - output_message, output_memory = phase.step(query, reinit_memory=True) - # print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - docs.append(output_memory.get_spec_parserd_output()) - - os.makedirs(f"{CB_ROOT_PATH}/docs", exist_ok=True) - with open(f"{CB_ROOT_PATH}/docs/raw_{vertex_type}.json", "w") as f: - json.dump(docs, f) - - -# 下面把生成的文档信息转换成markdown文本 -from muagent.utils.code2doc_util import * - -import json -with open(f"/home/user/code_base/docs/raw_method.json", "r") as f: - method_raw_data = json.load(f) - -with open(f"/home/user/code_base/docs/raw_class.json", "r") as f: - class_raw_data = json.load(f) - - -method_data = method_info_decode(method_raw_data) -class_data = class_info_decode(class_raw_data) -method_mds = encode2md(method_data, method_text_md) -class_mds = encode2md(class_data, class_text_md) - -docs_dict = {} -for k,v in class_mds.items(): - method_textmds = method_mds.get(k, []) - for vv in v: - # 理论上只有一个 - text_md = vv - - for method_textmd in method_textmds: - text_md += "\n
    " + method_textmd - - docs_dict.setdefault(k, []).append(text_md) - - with open(f"/home/user/code_base/docs/{k}.md", "w") as f: - f.write(text_md) -``` \ No newline at end of file +--- +group: + title: Connector + order: 0 +title: 自定义示例 +order: 4 +toc: content +--- + +## 如何创建你个性化的 agent phase 场景 + +下面通过 代码库来实现代码转 API 文档的自动生成, 来详细演示如何自定义一个 agent phase 的构建 + +### 设计你的 prompt 结构 + +- codeGenDocGroup_PROMPT, 构建 group Agent Prompt + +``` +# update new agent configs +codeGenDocGroup_PROMPT = """#### Agent Profile + +Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. + +When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list. + +#### Input Format + +#### Response Output Format + +**Code Path:** Extract the paths for the class/method/function that need to be addressed from the context + +**Role:** Select the role from agent names +""" +``` + +- classGenDoc_PROMPT, 构建 class code to api doc Prompt + +```` +classGenDoc_PROMPT = """#### Agent Profile +As an advanced code documentation generator, you are proficient in translating class definitions into comprehensive documentation with a focus on instantiation parameters. +Your specific task is to parse the given code snippet of a class, extract information regarding its instantiation parameters. + +#### Input Format + +**Current_Vertex:** Provide the code vertex of the function or method. + +**Code Snippet:** Provide the full class definition, including the constructor and any parameters it may require for instantiation. + +#### Response Output Format +**Class Base:** Specify the base class or interface from which the current class extends, if any. + +**Class Description:** Offer a brief description of the class's purpose and functionality. + +**Init Parameters:** List each parameter from construct. For each parameter, provide: + - `param`: The parameter name + - `param_description`: A concise explanation of the parameter's purpose. + - `param_type`: The data type of the parameter, if explicitly defined. + + ```json + [ + { + "param": "parameter_name", + "param_description": "A brief description of what this parameter is used for.", + "param_type": "The data type of the parameter" + }, + ... + ] + ``` + + + If no parameter for construct, return + ```json + [] + ``` +""" +```` + +- funcGenDoc_PROMPT,构建 function code to api doc Prompt + +```` +funcGenDoc_PROMPT = """#### Agent Profile +You are a high-level code documentation assistant, skilled at extracting information from function/method code into detailed and well-structured documentation. + + +#### Input Format +**Code Path:** Provide the code path of the function or method you wish to document. +This name will be used to identify and extract the relevant details from the code snippet provided. + +**Current_Vertex:** Provide the code vertex of the function or method. + +**Code Snippet:** A segment of code that contains the function or method to be documented. + +#### Response Output Format + +**Class Description:** Offer a brief description of the method(function)'s purpose and functionality. + +**Parameters:** Extract parameter for the specific function/method Code from Code Snippet. For parameter, provide: + - `param`: The parameter name + - `param_description`: A concise explanation of the parameter's purpose. + - `param_type`: The data type of the parameter, if explicitly defined. + ```json + [ + { + "param": "parameter_name", + "param_description": "A brief description of what this parameter is used for.", + "param_type": "The data type of the parameter" + }, + ... + ] + ``` + + If no parameter for function/method, return + ```json + [] + ``` + +**Return Value Description:** Describe what the function/method returns upon completion. + +**Return Type:** Indicate the type of data the function/method returns (e.g., string, integer, object, void). +""" +```` + +### 导包以及基础参数配置 + +- 首先增加 openai 配置,也可以是其它类似于 openai 接口的模型(通过 fastchat 启动) + +``` +import os, sys, json +from muagent.base_configs.env_config import CB_ROOT_PATH +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.connector.phase import BasePhase +from muagent.connector.agents import BaseAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.schema import Message, Role, ChainConfig +from muagent.codechat.codebase_handler.codebase_handler import CodeBaseHandler + +from loguru import logger +from muagent.tools import CodeRetrievalSingle + + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### 定义新的 agent 类 + +用于自定义 key-value 信息 + +``` +class CodeGenDocer(BaseAgent): + + def start_action_step(self, message: Message) -> Message: + '''do action before agent predict ''' + # 根据问题获取代码片段和节点信息 + action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, + embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag") + current_vertex = action_json['vertex'] + message.customed_kargs["Code Snippet"] = action_json["code"] + message.customed_kargs['Current_Vertex'] = current_vertex + return message + +``` + +### 准备 LLM & Embedding + +``` +llm_config = LLMConfig( + model_name="gpt-4", api_key=api_key, api_base_url=api_base_url, temperature=0.3 +) +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### 代码库加载 + +``` + +# initialize codebase +# delete codebase +codebase_name = 'client_nebula' +code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" +use_nh = True +do_interpret = False +cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, + llm_config=llm_config, embed_config=embed_config) +cbh.delete_codebase(codebase_name=codebase_name) + +# load codebase +cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, + llm_config=llm_config, embed_config=embed_config) +cbh.import_code(do_interpret=do_interpret) + +``` + +### 接下来就构建 phase 实例,开始执行 + +``` + +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "1" + +funcGenDoc_role = Role(role_type="assistant", role_name="funcGenDoc_role", prompt=funcGenDoc_PROMPT) +funcGenDoc = CodeGenDocer( + role=funcGenDoc_role, + chat_turn=1, + llm_config=llm_config, embed_config=embed_config, +) + + +classGenDoc_role = Role(role_type="assistant", role_name="classGenDoc_role", prompt=classGenDoc_PROMPT) +classGenDoc = CodeGenDocer( + role=classGenDoc_role, + chat_turn=1, + llm_config=llm_config, embed_config=embed_config, +) + +codeGenDocGroup_role = Role(role_type="assistant", role_name="codeGenDocGroup_role", prompt=codeGenDocGroup_PROMPT) +codeGenDocGroup = SelectorAgent( + role=codeGenDocGroup_role, + chat_turn=1, + llm_config=llm_config, embed_config=embed_config, + group_agents=[funcGenDoc, classGenDoc] +) + +chain_config = ChainConfig( + chain_name="codeGenDocGroup_chain", agents=[codeGenDocGroup.role.role_name,], + chat_turn=1) + +chain = BaseChain( + chainConfig=chain_config, agents=[codeGenDocGroup], + llm_config=llm_config, embed_config=embed_config, +) + +phase = BasePhase( + phase_name="codeGenDocGroup_phase", chains=[chain], + embed_config=embed_config, llm_config=llm_config +) +``` + +### 开始代码转 api 文档 + +``` +# 根据前面的load过程进行初始化 +cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, + llm_config=llm_config, embed_config=embed_config) + +cbh.search_vertices(vertex_type="method") + +# 开始代码转换API文档结构 +for vertex_type in ["class", "method"]: + vertexes = cbh.search_vertices(vertex_type=vertex_type) + logger.info(f"vertexes={vertexes}") + + # round-1 + docs = [] + for vertex in vertexes: + vertex = vertex.split("-")[0] # -为method的参数 + query_content = f"为{vertex_type}节点 {vertex}生成文档" + query = Message( + role_name="human", role_type="user", input_query=query_content, + code_engine_name=codebase_name, score_threshold=1.0, top_k=3, cb_search_type="tag", use_nh=use_nh, + local_graph_path=CB_ROOT_PATH, + ) + output_message, output_memory = phase.step(query, reinit_memory=True) + # print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + docs.append(output_memory.get_spec_parserd_output()) + + os.makedirs(f"{CB_ROOT_PATH}/docs", exist_ok=True) + with open(f"{CB_ROOT_PATH}/docs/raw_{vertex_type}.json", "w") as f: + json.dump(docs, f) + + +# 下面把生成的文档信息转换成markdown文本 +from muagent.utils.code2doc_util import * + +import json +with open(f"/home/user/code_base/docs/raw_method.json", "r") as f: + method_raw_data = json.load(f) + +with open(f"/home/user/code_base/docs/raw_class.json", "r") as f: + class_raw_data = json.load(f) + + +method_data = method_info_decode(method_raw_data) +class_data = class_info_decode(class_raw_data) +method_mds = encode2md(method_data, method_text_md) +class_mds = encode2md(class_data, class_text_md) + +docs_dict = {} +for k,v in class_mds.items(): + method_textmds = method_mds.get(k, []) + for vv in v: + # 理论上只有一个 + text_md = vv + + for method_textmd in method_textmds: + text_md += "\n
    " + method_textmd + + docs_dict.setdefault(k, []).append(text_md) + + with open(f"/home/user/code_base/docs/{k}.md", "w") as f: + f.write(text_md) +``` diff --git a/content/en/muagent/llm_models/embedding_config.md b/docs/docs/api-docs/MuAgent/llm_models/embedding_config.en-US.md similarity index 91% rename from content/en/muagent/llm_models/embedding_config.md rename to docs/docs/api-docs/MuAgent/llm_models/embedding_config.en-US.md index 0a99caf..9c59367 100644 --- a/content/en/muagent/llm_models/embedding_config.md +++ b/docs/docs/api-docs/MuAgent/llm_models/embedding_config.en-US.md @@ -1,65 +1,71 @@ ---- -title: Embedding Config -url: "muagent/embedding-model-config" -aliases: -- "/muagent/embedding-model-config" ---- - - -## Prepare Relevant Parameters -First, add the OpenAI configuration; this could also be a model similar to the OpenAI interface (launched via fastchat). -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -``` - -## Build LLM Config -- Constructing with a local model file -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - - -- Constructing via OpenAI -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - -embed_config = EmbedConfig( - embed_engine="openai", api_key=api_key, api_base_url=api_base_url, -) -``` - -- Customizing and inputting langchain embeddings -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - -class CustomizedEmbeddings(Embeddings): - def embed_documents(self, texts: List[str]) -> List[List[float]]: - embeddings = [] - # add your embedding code - return embeddings - def embed_query(self, text: str) -> List[float]: - """Compute query embeddings using a HuggingFace transformer model. - Args: - text: The text to embed. - Returns: - Embeddings for the text. - """ - # add your embedding code - return embedding - - -embeddings = CustomizedEmbeddings() -embed_config = EmbedConfig( - embed_model="default", - langchain_embeddings=embeddings -) -``` \ No newline at end of file +--- +group: + title: llm_models + order: 1 +title: Embedding Config +order: 0 +toc: content +--- + +## Prepare Relevant Parameters + +First, add the OpenAI configuration; this could also be a model similar to the OpenAI interface (launched via fastchat). + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +``` + +## Build LLM Config + +- Constructing with a local model file + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +- Constructing via OpenAI + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + +embed_config = EmbedConfig( + embed_engine="openai", api_key=api_key, api_base_url=api_base_url, +) +``` + +- Customizing and inputting langchain embeddings + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + +class CustomizedEmbeddings(Embeddings): + def embed_documents(self, texts: List[str]) -> List[List[float]]: + embeddings = [] + # add your embedding code + return embeddings + def embed_query(self, text: str) -> List[float]: + """Compute query embeddings using a HuggingFace transformer model. + Args: + text: The text to embed. + Returns: + Embeddings for the text. + """ + # add your embedding code + return embedding + + +embeddings = CustomizedEmbeddings() +embed_config = EmbedConfig( + embed_model="default", + langchain_embeddings=embeddings +) +``` diff --git a/content/zh/muagent/llm_models/embedding_config.md b/docs/docs/api-docs/MuAgent/llm_models/embedding_config.zh-CN.md similarity index 79% rename from content/zh/muagent/llm_models/embedding_config.md rename to docs/docs/api-docs/MuAgent/llm_models/embedding_config.zh-CN.md index 1d9b5cd..bbb85a5 100644 --- a/content/zh/muagent/llm_models/embedding_config.md +++ b/docs/docs/api-docs/MuAgent/llm_models/embedding_config.zh-CN.md @@ -1,71 +1,75 @@ ---- -title: Embedding 配置 -url: "muagent/embedding-model-config-zh" -aliases: -- "/muagent/embedding-model-config-zh" ---- - - -## 准备相关参数 -首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) - -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -``` - - -## 构建LLM Config -- 通过本地模型文件构建 -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - - -- 通过openai构建 -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - -embed_config = EmbedConfig( - embed_engine="openai", api_key=api_key, api_base_url=api_base_url, -) -``` - -- 自定义langchain embeddings传入 -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - - -class CustomizedEmbeddings(Embeddings): - - def embed_documents(self, texts: List[str]) -> List[List[float]]: - embeddings = [] - # add your embedding code - return embeddings - - def embed_query(self, text: str) -> List[float]: - """Compute query embeddings using a HuggingFace transformer model. - - Args: - text: The text to embed. - - Returns: - Embeddings for the text. - """ - # add your embedding code - return embedding - -embeddings = CustomizedEmbeddings() -embed_config = EmbedConfig( - embed_model="default", - langchain_embeddings=embeddings -) -``` \ No newline at end of file +--- +group: + title: llm_models + order: 1 +title: Embedding 配置 +order: 0 +toc: content +--- + +## 准备相关参数 + +首先增加 openai 配置,也可以是其它类似于 openai 接口的模型(通过 fastchat 启动) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +``` + +## 构建 LLM Config + +- 通过本地模型文件构建 + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +- 通过 openai 构建 + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + +embed_config = EmbedConfig( + embed_engine="openai", api_key=api_key, api_base_url=api_base_url, +) +``` + +- 自定义 langchain embeddings 传入 + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + + +class CustomizedEmbeddings(Embeddings): + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + embeddings = [] + # add your embedding code + return embeddings + + def embed_query(self, text: str) -> List[float]: + """Compute query embeddings using a HuggingFace transformer model. + + Args: + text: The text to embed. + + Returns: + Embeddings for the text. + """ + # add your embedding code + return embedding + +embeddings = CustomizedEmbeddings() +embed_config = EmbedConfig( + embed_model="default", + langchain_embeddings=embeddings +) +``` diff --git a/content/en/muagent/llm_models/llm_config.md b/docs/docs/api-docs/MuAgent/llm_models/llm_config.en-US.md similarity index 89% rename from content/en/muagent/llm_models/llm_config.md rename to docs/docs/api-docs/MuAgent/llm_models/llm_config.en-US.md index 4365298..83f241c 100644 --- a/content/en/muagent/llm_models/llm_config.md +++ b/docs/docs/api-docs/MuAgent/llm_models/llm_config.en-US.md @@ -1,57 +1,61 @@ ---- -title: LLM Config -url: "muagent/llm-model-config" -aliases: -- "/muagent/llm-model-config" ---- - -## Prepare Relevant Parameters -First, add the OpenAI configuration, or you can use another model similar to the OpenAI interface (launched through fastchat). -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -``` - - -## Build LLM Config -- By passing the class `openai` - -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) -``` - - -- Customizing and inputting langchain LLM -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from langchain.llms.base import BaseLLM, LLM - - -class CustomizedModel(LLM): - repetition_penalty = 1.1 - temperature = 0.2 - top_k = 40 - top_p = 0.9 - - def predict(self, prompt: str, stop: Optional[List[str]] = None) -> str: - return self._call(prompt, stop) - - def _call(self, prompt: str, - stop: Optional[List[str]] = None) -> str: - """_call""" - return "" - - -llm = CustomizedModel() -llm_config = LLMConfig( - llm=llm -) -``` \ No newline at end of file +--- +group: + title: llm_models + order: 1 +title: LLM Config +order: -1 +toc: content +--- + +## Prepare Relevant Parameters + +First, add the OpenAI configuration, or you can use another model similar to the OpenAI interface (launched through fastchat). + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +``` + +## Build LLM Config + +- By passing the class `openai` + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) +``` + +- Customizing and inputting langchain LLM + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from langchain.llms.base import BaseLLM, LLM + + +class CustomizedModel(LLM): + repetition_penalty = 1.1 + temperature = 0.2 + top_k = 40 + top_p = 0.9 + + def predict(self, prompt: str, stop: Optional[List[str]] = None) -> str: + return self._call(prompt, stop) + + def _call(self, prompt: str, + stop: Optional[List[str]] = None) -> str: + """_call""" + return "" + + +llm = CustomizedModel() +llm_config = LLMConfig( + llm=llm +) +``` diff --git a/content/zh/muagent/llm_models/llm_config.md b/docs/docs/api-docs/MuAgent/llm_models/llm_config.zh-CN.md similarity index 77% rename from content/zh/muagent/llm_models/llm_config.md rename to docs/docs/api-docs/MuAgent/llm_models/llm_config.zh-CN.md index 3ec20b9..6f2a5f0 100644 --- a/content/zh/muagent/llm_models/llm_config.md +++ b/docs/docs/api-docs/MuAgent/llm_models/llm_config.zh-CN.md @@ -1,55 +1,59 @@ ---- -title: LLM 配置 -url: "muagent/llm-model-config-zh" -aliases: -- "/muagent/llm-model-config-zh" ---- - - -## 准备相关参数 -首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) - -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -``` - - -## 构建LLM Config -- 通过调用 类openai 传入 -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) -``` - -- 自定义 langchain LLM 传入 -``` -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from langchain.llms.base import BaseLLM, LLM - -class CustomizedModel(LLM): - repetition_penalty = 1.1 - temperature = 0.2 - top_k = 40 - top_p = 0.9 - - def predict(self, prompt: str, stop: Optional[List[str]] = None) -> str: - return self._call(prompt, stop) - - def _call(self, prompt: str, - stop: Optional[List[str]] = None) -> str: - """_call - """ - return "" - -llm = CustomizedModel() -llm_config = LLMConfig( - llm=llm -) -``` \ No newline at end of file +--- +group: + title: llm_models + order: 1 +title: LLM 配置 +order: -1 +toc: content +--- + +## 准备相关参数 + +首先增加 openai 配置,也可以是其它类似于 openai 接口的模型(通过 fastchat 启动) + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +``` + +## 构建 LLM Config + +- 通过调用 类 openai 传入 + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) +``` + +- 自定义 langchain LLM 传入 + +``` +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from langchain.llms.base import BaseLLM, LLM + +class CustomizedModel(LLM): + repetition_penalty = 1.1 + temperature = 0.2 + top_k = 40 + top_p = 0.9 + + def predict(self, prompt: str, stop: Optional[List[str]] = None) -> str: + return self._call(prompt, stop) + + def _call(self, prompt: str, + stop: Optional[List[str]] = None) -> str: + """_call + """ + return "" + +llm = CustomizedModel() +llm_config = LLMConfig( + llm=llm +) +``` diff --git a/content/en/muagent/overview/agent-flow.md b/docs/docs/api-docs/MuAgent/overview/agent-flow.en-US.md similarity index 86% rename from content/en/muagent/overview/agent-flow.md rename to docs/docs/api-docs/MuAgent/overview/agent-flow.en-US.md index 664fdc9..6228bd0 100644 --- a/content/en/muagent/overview/agent-flow.md +++ b/docs/docs/api-docs/MuAgent/overview/agent-flow.en-US.md @@ -1,46 +1,52 @@ ---- -title: Agent Flow -slug: Agent Flow -url: "muagent/agent-flow" -aliases: -- "/muagent/agent-flow" ---- - - -## Introduction to Core Connectors -To facilitate everyone's understanding of the entire muagent link, we adopt the Flow format to introduce in detail how to build through configuration - -
    - 图片 -
    - - -
    Below, we first introduce the related core components
    - -### Agent -On the design level of the Agent, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios: - -1. BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input => output according to the Prompt format. -2. ReactAgent: Provides standard React functionality, accomplishing current tasks based on questions. -3. ExecutorAgent: Performs sequential execution of task lists, completing related tasks according to plans arranged by the User or the previous Agent. -4. SelectorAgent: Provides the function of selecting an Agent, choosing the appropriate Agent to respond according to the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which will later be managed by the Memory Manager. - -It selects the appropriate Agent to respond based on the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which is subsequently managed by the Memory Manager. - -### Chain -Basic Chain: BaseChain, connects the interactions of agents, manages the related messages and memory. - -### Phase -Basic Phase: BasePhase, connects the interactions of chains, and manages the related messages and memory. - -### Prompt Manager -The prompt creation for each agent in the Mutli-Agent link: - -- By setting simple prompt_input_keys and prompt_output_keys, the preset Prompt Context creation logic can be followed to quickly configure the agent prompt. -- It is also possible to design a new key-context in the prompt manager module, achieving personalized Agent Prompt. - -### Memory Manager -Mainly used for the management of chat history: -- Manages the reading and writing of chat history in a database, including user input, llm output, doc retrieval, code retrieval, search retrieval. -- Summarizes the key information in chat history to create a summary context, which serves as a prompt context. -- Provides a retrieval function to search for information related to the question in the chat history or summary context, assisting with question answering. \ No newline at end of file +--- +group: + title: ❤️ Codefuse-muAgent + order: -1 +title: Agent Flow +order: 0 +toc: content +--- + +## Introduction to Core Connectors + +To facilitate everyone's understanding of the entire CoAgent link, we use a Flow format to detail how to build through configuration settings. + +
    + 图片 +
    + +
    Below, we first introduce the related core components
    + +### Agent + +On the design level of the Agent, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios: + +1. BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input => output according to the Prompt format. +2. ReactAgent: Provides standard React functionality, accomplishing current tasks based on questions. +3. ExecutorAgent: Performs sequential execution of task lists, completing related tasks according to plans arranged by the User or the previous Agent. +4. SelectorAgent: Provides the function of selecting an Agent, choosing the appropriate Agent to respond according to the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which will later be managed by the Memory Manager. + +It selects the appropriate Agent to respond based on the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which is subsequently managed by the Memory Manager. + +### Chain + +Basic Chain: BaseChain, connects the interactions of agents, manages the related messages and memory. + +### Phase + +Basic Phase: BasePhase, connects the interactions of chains, and manages the related messages and memory. + +### Prompt Manager + +The prompt creation for each agent in the Mutli-Agent link: + +- By setting simple prompt_input_keys and prompt_output_keys, the preset Prompt Context creation logic can be followed to quickly configure the agent prompt. +- It is also possible to design a new key-context in the prompt manager module, achieving personalized Agent Prompt. + +### Memory Manager + +Mainly used for the management of chat history: + +- Manages the reading and writing of chat history in a database, including user input, llm output, doc retrieval, code retrieval, search retrieval. +- Summarizes the key information in chat history to create a summary context, which serves as a prompt context. +- Provides a retrieval function to search for information related to the question in the chat history or summary context, assisting with question answering. diff --git a/docs/docs/api-docs/MuAgent/overview/agent-flow.zh-CN.md b/docs/docs/api-docs/MuAgent/overview/agent-flow.zh-CN.md new file mode 100644 index 0000000..b126be6 --- /dev/null +++ b/docs/docs/api-docs/MuAgent/overview/agent-flow.zh-CN.md @@ -0,0 +1,55 @@ +--- +group: + title: ❤️ Codefuse-muAgent + order: -1 +title: Agent 编排 +order: 0 +toc: content +--- + +## 核心 Connector 介绍 + +为了便于大家理解整个 CoAgent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 + +
    + 图片 +
    + +
    下面,我们先介绍相关的核心组件
    + +### Agent + +在 Agent 设计层面,我们提供了四种基本的 Agent 类型,对这些 Agent 进行 Role 的基础设定,可满足多种通用场景的交互和使用 + +1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据 Prompt 格式实现 输入 => 输出 + +2. ReactAgent:提供标准 React 的功能,根据问题实现当前任务 + +3. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个 Agent 编排的计划,完成相关任务 + +4. SelectorAgent:提供选择 Agent 的功能,根据 User 或 上一个 Agent 的问题选择合适的 Agent 来进行回答. + +输出后将 message push 到 memory pool 之中,后续通过 Memory Manager 进行管理 + +### Chain + +基础链路:BaseChain,串联 agent 的交互,完成相关 message 和 memory 的管理 + +### Phase + +基础场景:BasePhase,串联 chain 的交互,完成相关 message 和 memory 的管理 + +### Prompt Manager + +Mutli-Agent 链路中每一个 agent 的 prompt 创建 + +- 通过对 promtp_input_keys 和 promtp_output_keys 对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现 agent prompt 快速配置 +- 也可以对 prompt manager 模块进行新的 key-context 设计,实现个性化的 Agent Prompt + +### Memory Manager + +主要用于 chat history 的管理 + +- 将 chat history 在数据库进行读写管理,包括 user input、 llm output、doc retrieval、code retrieval、search retrieval +- 对 chat history 进行关键信息总结 summary context,作为 prompt context +- 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 diff --git a/content/en/muagent/overview/multi-agent.md b/docs/docs/api-docs/MuAgent/overview/multi-agent.en-US.md similarity index 92% rename from content/en/muagent/overview/multi-agent.md rename to docs/docs/api-docs/MuAgent/overview/multi-agent.en-US.md index 9bfa32b..d71d6e6 100644 --- a/content/en/muagent/overview/multi-agent.md +++ b/docs/docs/api-docs/MuAgent/overview/multi-agent.en-US.md @@ -1,147 +1,159 @@ ---- -title: MuAgent -slug: MuAgent -url: "muagent/muagent" -aliases: -- "/muagent" -- "/muagent/multi-agent" -- "/muagent/muagent" -- "/muagent/muagent-overview" ---- - - -# Introduction -To enhance the performance of large models in terms of inference accuracy, various innovative Large Language Model (LLM) playbooks have emerged in the industry. From the earliest Chain of Thought (CoT) and Thread of Thought (ToT) to Games on Tracks (GoT), these methods have continually expanded the capability boundaries of LLMs. When handling complex problems, we can select, invoke and execute tool feedback through the ReAct process, while realizing multi-round tool use and multi-step execution. - -However, for more complex scenarios, such as the development of complex code, a single-function LLM Agent is clearly not up to the task. Therefore, the community has begun to develop combinations of multiple Agents, such as projects focused on the development field like metaGPT, GPT-Engineer, and chatDev, as well as the AutoGen project that focuses on automating the construction of Agents and Agent dialogue. - -After an in-depth analysis of these frameworks, it has been found that most Agent frameworks are highly coupled, with poor usability and extensibility. They implement specific scenarios in preset settings, but expanding to new scenarios can be very challenging. - -Therefore, we hope to build an extensible, easy-to-use Multi-Agent framework to support ChatBots in retrieving knowledge base information while assisting with various general tasks such as daily office work, data analysis, development, and operations. - -This project's Multi-Agent framework incorporates excellent designs from multiple frameworks, such as the message pool from metaGPT and the agent selector from autogen. - -
    - 图片 -
    - - -# MuAgent Framework -In MuAgent, in addition to defining the Agent interaction link and AgentBase basic execution flow, we have also designed two basic components: Prompt Manager and Memory Manager, which are used for automated construction of Prompts and chat history management, respectively. We have built an extensible, easy-to-use Multi-Agent framework, including the following content: - -- **Agent Base:** Established four basic types of Agents – BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent – to support basic activities in various scenarios. -- **Communication:** Completes the transfer of information between Agents through Message and Parse Message entities, and interacts with Memory Manager to manage memory in the Memory Pool. -- **Prompt Manager:** Automates the assembly of Customized Agent Prompts through Role Handler, Doc/Tool Handler, Session Handler, Customized Handler. -- **Memory Manager:** Supports storage management of chat history, information compression, memory retrieval, and finally storage in databases, local or vector databases through the Memory Pool. -- **Component:** Auxiliary ecosystem components for building Agents, including Retrieval, Tool, Action, Sandbox, etc. -- **Customized Model:** Supports the integration of private LLM and Embedding. - -## Agent Base -At the Agent level, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios. All Actions are executed by Agents. - -1. BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input => output according to the Prompt format. -
    - 图片 -
    - -2. ReactAgent: Provides standard React functionality, according to questions to execute current tasks. -
    - 图片 -
    - - -3. ExecutorAgent: Sequentially executes a list of tasks, completing related tasks according to plans arranged by the User or the previous Agent. The Agent receives a task list ([List[task]) and loops through the tasks (Feedback Agents can also be added in the middle for task re-optimization), until the task is complete. -
    - 图片 -
    - -4. SelectorAgent: Provides the function of selecting an Agent, choosing the appropriate Agent to respond based on the question from the User or the previous Agent. -
    - 图片 -
    - - -## Communication -To enable better interaction between Agents, as well as to provide each Agent with enough information to complete their specific tasks, we have divided the Message information body into several parts, such as System Content, Info Content, LLM Content, and LLM Parsed Content, etc. - -System Content: Used to store and manage the timing of the current LLM output, Role information, etc. -Info Content: LLM auxiliary information, such as knowledge base query information, code library retrieval information, tool information, Agent information, etc. - -LLM Content: Directly stores and conveys information generated by the LLM. -LLM Parsed Content: Parses the LLM's output into a more manageable key-value data structure, making it easier to filter through LLM content. -Customized Content: Manages key-value data content generated by custom actions, used for subsequent assembly and construction of custom Prompt templates. -By defining the above message formats, we can accomplish the transfer and management of general messages. Specific assembly methods can be seen in the Prompt Manager module. - - -## Context Manager -### Memory Manager -Mainly used for the management of chat history: - -- Storage Management: Implements the save and load management of chat history in the database or locally, including user input, LLM output, observation output. -- Information Compression: Summarizes key information from the chat history into a summary context, such as single text summaries, summaries from different angles, key information extraction, multi-text summaries, and serves as Prompt context. -- Memory Retrieval: Provides basic retrieval functions, retrieving information related to questions from chat history or Summary Context to assist in Q&A. -- LLM Automatic Trigger: Future definitions of policies or the use of LLM to trigger the compression summary and retrieval functions. - -### Prompt Manager -Asking LLMs has become common practice, but how to coordinate the planning and usage of tools, code writing abilities among multiple large models to guide their expected outputs has become a key issue. Essentially, this involves abstracting business problems into executable Prompts, so we're not just designing Agents but rather engaging in framework design after a deep understanding of the current demands. - -In actual business scenarios where LLMs are involved (excluding the SFT process), we can designate LLM to complete specific tasks and obtain expected outputs through the design of Agent Prompt content. In the process of MuAgent, the Prompt is divided into three parts: System Prompt, Context Prompt, Customized Prompt. - -- System Prompt includes Role Name, Role Description, Task, etc. -- Context Prompt includes Doc Context, Code Context, Tool Context, Agent Context, Session Context, etc. -- Customized Prompt involves custom inputs and outputs, such as... -We can also ask the model to output structured texts, such as the JSON string of a tool, code\ncode_content, etc., to complete particular workflows. - -**Automatic Prompt Assemble** - -After defining the structure as above, we can complete the automation assembly of Prompts in the following ways, without having to make extensive adjustments to the prompt each time: - -1. Upon defining an Agent, configure Role Name, Role Description, Task, etc., to determine what the Agent needs to do. -2. Pre-package some reusable Context Prompt general strategies, such as selectable Role's SessionContext, configurable Tool, Code Retrieval, Doc Retrieval, Search Retrieval, Agent to complete corresponding assemblies. -3. As the Agent's Prompt requires relatively personalized operations, it also supports the addition of new key-context designs within the Prompt Manager module to achieve personalized Agent Prompts. - -**Automatic Prompt Design** -Able to automatically design the best prompt based on role description, task, query, etc.; to be defined... - -**Multi Prompt Design** -Based on the previous definition of Prompt, we know that a Prompt consists of three parts: System Prompt, Context Prompt, Customized Prompt. Any changes in the three parts may cause changes in the final output of the LLM. - -For the same type of task, their System Prompt is the same. So, without considering the variations of Customiezd Prompt, it is possible to achieve the assembly differences of different contexts. For example, Prompt A obtains 10 rounds of chat history, while Prompt B uses 5 rounds of chat history, or alternatively, filters and compresses information in chat history. - -To be implemented... - - -## Component -### Retrieval -In all Prompts' Contexts, aside from Chat History session information, information based on external document libraries, code repositories, internet search results is also relied upon. This knowledge system beyond the model parameters can significantly enhance the Agent's ability to complete complex tasks. - -Thus, in MuAgent, we integrated three ways to retrieve information: Doc, Internet Search, Code Retrieval, and defined an abstract class IMRetrieval, supporting developers to customize their knowledge bases to complete the Agent's knowledge base registration. - -**Doc Retrieval** - -Document vector databases are currently the mainstream method for building knowledge bases, using Text Embedding models to vectorize documents and store them in vector databases. In the future, we will also support queries based on knowledge graphs and automatically extract entities and relations through large models to explore the complex relationships in data. - -**Code Retrieval** - -LLMs face the challenge of lagging training data for code generation, repair, and component understanding tasks, as well as not being able to perceive the context-dependent structure of code. During development, understanding, retrieving and querying metadata from the existing codebase and dependencies can take a considerable amount of time. Hence, we hope to provide an external knowledge system - - -**Search Retrieval** -In addition to the readily available document and code knowledge bases, in daily practice, browsing a large amount of web content to acquire more knowledge helps us understand emerging scenarios, businesses, technologies, and more. Hence, we've integrated duckduckgosearch, an open-source search tool, to provide LLMs with content beyond their knowledge reserves. - -### Tool -With OpenAI launching the Function Call feature, which generates parameters for specified tools through LLM and executes the call, machines can better understand and respond to human needs, thus solving practical problems and repetitive work. Nowadays, the ability to learn tools is increasingly becoming a standard feature of open-source models. Therefore, in MuAgent, it also supports agents to complete Tool registration. By using the Python registration template BaseToolModel class and writing related properties and methods such as Tool_name, Tool_description, ToolInputArgs, ToolOutputArgs, and run, tools can be quickly integrated. It also supports the direct use of langchain Tool interfaces. -For example, functions like the above XXRetrieval can also be registered as a Tool, ultimately called by LLM. - -### Action -In the definition of MuAgent, Action is viewed as a specific action or action flow that LLM needs to execute, including LLM information processing, knowledge retrieval, tool invocation, and code execution, etc., constituting a comprehensive and complex dynamic process. For instance, in the React process, we obtained a Tool parameter through LLM, and then "putting the tool parameter into the Tool and executing the call" is an Action, which practically invokes the Tool. Or, we defined an Agent, who orchestrates a fixed agent's Action steps, with the input parameters of this Agent specially designated by the Action. That is to say, whether the parameters are generated by LLM or set by engineering, as long as it involves a specific execution process, it is an Action. - -## Modules -- [connector](/muagent/connector-agent) Mainly introduces the work of this block of the Agent framework -- llm_models -- retrieval -- tools -- sandbox -- utils - - +--- +nav: + title: Docs + order: -1 + second: + title: API-Docs + order: 0 +group: + title: ❤️ Codefuse-muAgent + # index: true + order: -1 +title: muAgent +order: -1 +toc: content +--- + +# Introduction + +To enhance the performance of large models in terms of inference accuracy, various innovative Large Language Model (LLM) playbooks have emerged in the industry. From the earliest Chain of Thought (CoT) and Thread of Thought (ToT) to Games on Tracks (GoT), these methods have continually expanded the capability boundaries of LLMs. When handling complex problems, we can select, invoke and execute tool feedback through the ReAct process, while realizing multi-round tool use and multi-step execution. + +However, for more complex scenarios, such as the development of complex code, a single-function LLM Agent is clearly not up to the task. Therefore, the community has begun to develop combinations of multiple Agents, such as projects focused on the development field like metaGPT, GPT-Engineer, and chatDev, as well as the AutoGen project that focuses on automating the construction of Agents and Agent dialogue. + +After an in-depth analysis of these frameworks, it has been found that most Agent frameworks are highly coupled, with poor usability and extensibility. They implement specific scenarios in preset settings, but expanding to new scenarios can be very challenging. + +Therefore, we hope to build an extensible, easy-to-use Multi-Agent framework to support ChatBots in retrieving knowledge base information while assisting with various general tasks such as daily office work, data analysis, development, and operations. + +This project's Multi-Agent framework incorporates excellent designs from multiple frameworks, such as the message pool from metaGPT and the agent selector from autogen. + +
    + 图片 +
    + +# MuAgent Framework + +In MuAgent, in addition to defining the Agent interaction link and AgentBase basic execution flow, we have also designed two basic components: Prompt Manager and Memory Manager, which are used for automated construction of Prompts and chat history management, respectively. We have built an extensible, easy-to-use Multi-Agent framework, including the following content: + +- **Agent Base:** Established four basic types of Agents – BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent – to support basic activities in various scenarios. +- **Communication:** Completes the transfer of information between Agents through Message and Parse Message entities, and interacts with Memory Manager to manage memory in the Memory Pool. +- **Prompt Manager:** Automates the assembly of Customized Agent Prompts through Role Handler, Doc/Tool Handler, Session Handler, Customized Handler. +- **Memory Manager:** Supports storage management of chat history, information compression, memory retrieval, and finally storage in databases, local or vector databases through the Memory Pool. +- **Component:** Auxiliary ecosystem components for building Agents, including Retrieval, Tool, Action, Sandbox, etc. +- **Customized Model:** Supports the integration of private LLM and Embedding. + +## Agent Base + +At the Agent level, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios. All Actions are executed by Agents. + +1. BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input => output according to the Prompt format. +
    + 图片 +
    + +2. ReactAgent: Provides standard React functionality, according to questions to execute current tasks. +
    + 图片 +
    + +3. ExecutorAgent: Sequentially executes a list of tasks, completing related tasks according to plans arranged by the User or the previous Agent. The Agent receives a task list ([List[task]) and loops through the tasks (Feedback Agents can also be added in the middle for task re-optimization), until the task is complete. +
    + 图片 +
    + +4. SelectorAgent: Provides the function of selecting an Agent, choosing the appropriate Agent to respond based on the question from the User or the previous Agent. +
    + 图片 +
    + +## Communication + +To enable better interaction between Agents, as well as to provide each Agent with enough information to complete their specific tasks, we have divided the Message information body into several parts, such as System Content, Info Content, LLM Content, and LLM Parsed Content, etc. + +System Content: Used to store and manage the timing of the current LLM output, Role information, etc. +Info Content: LLM auxiliary information, such as knowledge base query information, code library retrieval information, tool information, Agent information, etc. + +LLM Content: Directly stores and conveys information generated by the LLM. +LLM Parsed Content: Parses the LLM's output into a more manageable key-value data structure, making it easier to filter through LLM content. +Customized Content: Manages key-value data content generated by custom actions, used for subsequent assembly and construction of custom Prompt templates. +By defining the above message formats, we can accomplish the transfer and management of general messages. Specific assembly methods can be seen in the Prompt Manager module. + +## Context Manager + +### Memory Manager + +Mainly used for the management of chat history: + +- Storage Management: Implements the save and load management of chat history in the database or locally, including user input, LLM output, observation output. +- Information Compression: Summarizes key information from the chat history into a summary context, such as single text summaries, summaries from different angles, key information extraction, multi-text summaries, and serves as Prompt context. +- Memory Retrieval: Provides basic retrieval functions, retrieving information related to questions from chat history or Summary Context to assist in Q&A. +- LLM Automatic Trigger: Future definitions of policies or the use of LLM to trigger the compression summary and retrieval functions. + +### Prompt Manager + +Asking LLMs has become common practice, but how to coordinate the planning and usage of tools, code writing abilities among multiple large models to guide their expected outputs has become a key issue. Essentially, this involves abstracting business problems into executable Prompts, so we're not just designing Agents but rather engaging in framework design after a deep understanding of the current demands. + +In actual business scenarios where LLMs are involved (excluding the SFT process), we can designate LLM to complete specific tasks and obtain expected outputs through the design of Agent Prompt content. In the process of MuAgent, the Prompt is divided into three parts: System Prompt, Context Prompt, Customized Prompt. + +- System Prompt includes Role Name, Role Description, Task, etc. +- Context Prompt includes Doc Context, Code Context, Tool Context, Agent Context, Session Context, etc. +- Customized Prompt involves custom inputs and outputs, such as... + We can also ask the model to output structured texts, such as the JSON string of a tool, code\ncode_content, etc., to complete particular workflows. + +**Automatic Prompt Assemble** + +After defining the structure as above, we can complete the automation assembly of Prompts in the following ways, without having to make extensive adjustments to the prompt each time: + +1. Upon defining an Agent, configure Role Name, Role Description, Task, etc., to determine what the Agent needs to do. +2. Pre-package some reusable Context Prompt general strategies, such as selectable Role's SessionContext, configurable Tool, Code Retrieval, Doc Retrieval, Search Retrieval, Agent to complete corresponding assemblies. +3. As the Agent's Prompt requires relatively personalized operations, it also supports the addition of new key-context designs within the Prompt Manager module to achieve personalized Agent Prompts. + +**Automatic Prompt Design** +Able to automatically design the best prompt based on role description, task, query, etc.; to be defined... + +**Multi Prompt Design** +Based on the previous definition of Prompt, we know that a Prompt consists of three parts: System Prompt, Context Prompt, Customized Prompt. Any changes in the three parts may cause changes in the final output of the LLM. + +For the same type of task, their System Prompt is the same. So, without considering the variations of Customiezd Prompt, it is possible to achieve the assembly differences of different contexts. For example, Prompt A obtains 10 rounds of chat history, while Prompt B uses 5 rounds of chat history, or alternatively, filters and compresses information in chat history. + +To be implemented... + +## Component + +### Retrieval + +In all Prompts' Contexts, aside from Chat History session information, information based on external document libraries, code repositories, internet search results is also relied upon. This knowledge system beyond the model parameters can significantly enhance the Agent's ability to complete complex tasks. + +Thus, in MuAgent, we integrated three ways to retrieve information: Doc, Internet Search, Code Retrieval, and defined an abstract class IMRetrieval, supporting developers to customize their knowledge bases to complete the Agent's knowledge base registration. + +**Doc Retrieval** + +Document vector databases are currently the mainstream method for building knowledge bases, using Text Embedding models to vectorize documents and store them in vector databases. In the future, we will also support queries based on knowledge graphs and automatically extract entities and relations through large models to explore the complex relationships in data. + +**Code Retrieval** + +LLMs face the challenge of lagging training data for code generation, repair, and component understanding tasks, as well as not being able to perceive the context-dependent structure of code. During development, understanding, retrieving and querying metadata from the existing codebase and dependencies can take a considerable amount of time. Hence, we hope to provide an external knowledge system + +**Search Retrieval** +In addition to the readily available document and code knowledge bases, in daily practice, browsing a large amount of web content to acquire more knowledge helps us understand emerging scenarios, businesses, technologies, and more. Hence, we've integrated duckduckgosearch, an open-source search tool, to provide LLMs with content beyond their knowledge reserves. + +### Tool + +With OpenAI launching the Function Call feature, which generates parameters for specified tools through LLM and executes the call, machines can better understand and respond to human needs, thus solving practical problems and repetitive work. Nowadays, the ability to learn tools is increasingly becoming a standard feature of open-source models. Therefore, in MuAgent, it also supports agents to complete Tool registration. By using the Python registration template BaseToolModel class and writing related properties and methods such as Tool_name, Tool_description, ToolInputArgs, ToolOutputArgs, and run, tools can be quickly integrated. It also supports the direct use of langchain Tool interfaces. +For example, functions like the above XXRetrieval can also be registered as a Tool, ultimately called by LLM. + +### Action + +In the definition of MuAgent, Action is viewed as a specific action or action flow that LLM needs to execute, including LLM information processing, knowledge retrieval, tool invocation, and code execution, etc., constituting a comprehensive and complex dynamic process. For instance, in the React process, we obtained a Tool parameter through LLM, and then "putting the tool parameter into the Tool and executing the call" is an Action, which practically invokes the Tool. Or, we defined an Agent, who orchestrates a fixed agent's Action steps, with the input parameters of this Agent specially designated by the Action. That is to say, whether the parameters are generated by LLM or set by engineering, as long as it involves a specific execution process, it is an Action. + +## 模块分类 + +- [connector](/en-US/coagent/connector/connector_agent) +- document_loaders +- embeddings +- llm_models +- orm +- sandbox +- service +- text_splitter +- tools +- utils diff --git a/docs/docs/api-docs/MuAgent/overview/multi-agent.zh-CN.md b/docs/docs/api-docs/MuAgent/overview/multi-agent.zh-CN.md new file mode 100644 index 0000000..19d5a83 --- /dev/null +++ b/docs/docs/api-docs/MuAgent/overview/multi-agent.zh-CN.md @@ -0,0 +1,150 @@ +--- +nav: + title: 文档 + order: -1 + second: + title: API文档 + order: 0 +group: + title: ❤️ Codefuse-muAgent + # index: true + order: -1 +title: muAgent +order: -1 +toc: content +--- + +# 简介 + +为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的 CoT、ToT 到 GoT,这些方法不断拓展了 LLM 的能力边界。在处理复杂问题时,我们可以通过 ReAct 过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 + +但对于更复杂的场景,例如复杂代码的开发,单一功能的 LLM Agent 显然难以胜任。因此,社区开始发展出多 Agent 的组合玩法,比如专注于 metaGPT、GPT-Engineer、chatDev 等开发领域的项目,以及专注于自动化构建 Agent 和 Agent 对话的 AutoGen 项目。 + +经过对这些框架的深入分析,发现大多数的 Agent 框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 + +因此,我们希望构建一个可扩展、易于使用的 Multi-Agent 框架,以支持 ChatBot 在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 + +本项目的 Mutli-Agent 框架汲取兼容了多个框架的优秀设计,比如 metaGPT 中的消息池(message pool)、autogen 中的代理选择器(agent selector)等。 + +
    + 图片 +
    + +# muAgent 框架 + +在 MuAgent 中,我们除了定义 Agent 交互链路和 AgentBase 基础执行流以外,还额外设计了 Prompt Manager 和 Memory Manager 两个基础组件,分别用于自动化构建 Prompt 和 chat history 管理。最终构建出一个可扩展、易于使用的 Multi-Agent 框架,包括以下内容 + +- Agent Base:构建了四种基本的 Agent 类型 BaseAgent、ReactAgent、ExecutorAgent、SelectorAgent,支撑各种场景的基础活动 +- Communication:通过 Message 和 Parse Message 实体完成 Agent 间的信息传递,并与 Memory Manager 交互再 Memory Pool 完成记忆管理 +- Prompt Manager:通过 Role Handler、Doc/Tool Handler、Session Handler、Customized Handler,来自动化组装 Customized 的 Agent Prompt +- Memory Manager: 用于支撑 chat history 的存储管理、信息压缩、记忆检索等管理,最后通过 Memory Pool 在数据库、本地、向量数据库中完成存储 +- Component:用于构建 Agent 的辅助生态组件,包括 Retrieval、Tool、Action、Sandbox 等 +- Customized Model:支持私有化的 LLM 和 Embedding 的接入 + +## Agent Base + +在 Agent 层面,提供四种基本的 Agent 类型,对这些 Agent 进行 Role 的基础设定,可满足多种通用场景的交互和使用。所有的 Action 都由 Agent 执行。 + +1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据 Prompt 格式实现 输入 => 输出 + +
    + 图片 +
    + +2. ReactAgent:提供标准 React 的功能,根据问题实现当前任务 +
    + 图片 +
    + +3. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个 Agent 编排的计划,完成相关任务 +Agent 接受到任务清单(List[task]),对这个任务清单 Task 进行循环执行(中间也可添加 Feedback Agent 来进行任务重新优化),直到任务完成 +
    + 图片 +
    + +4. SelectorAgent:提供选择 Agent 的功能,根据 User 或 上一个 Agent 的问题选择合适的 Agent 来进行回答. +
    + 图片 +
    + +## Communication + +为了让 Agent 之间进行更好的交互,以及能够让每一个 Agent 接受到足够的信息完成它们特定任务,我们将 Message 信息体分成了多个部分,System Content、Info Content、LLM Content 和 LLM Parsed Content 等 + +- System Content:用于存储管理当前 LLM 输出的时间,Role 信息等 +- Info Content:LLM 辅助信息,比如像知识库查询信息、代码库检索信息、工具信息、Agent 信息等 +- LLM Content:直接存储和传递 LLM 产生的信息 +- LLM Parsed Content:对 LLM 进行解析转成更易操作的 key-value 数据结构,方便对 LLM 内容进行过滤 +- Customized Content:用于管理自定义 action 产生的 key-value 数据内容,用于后续自定义 Prompt 模板的组装构建 + +通过对以上消息格式的定义,我们便可以完成通用消息的传递和管理。具体组装见 Prompt Manager 模块 + +## Context Manager + +### Memory Manager + +主要用于 chat history 的管理 + +- 存储管理:在数据库或本地实现对 chat history 进行 save 和 load 管理,包括 user input、 llm output、observation ouput +- 信息压缩:对 chat history 进行关键信息压缩总结 summary context,比如说单文本概况、侧重不同角度进行文本概况、关键信息提取、多文本概况,作为 Prompt context +- 记忆检索:提供基础检索功能,检索 chat history 或者 Summary Context 中与问题相关信息,辅助问答 +- LLM 自动触发:后续定义策略或通过 LLM 来 触发 压缩总结和检索的功能 + +### Prompt Manager + +提问 LLM 已经成为一种常见的实践,但如何让多个大模型分工并协调好 LLM 间的规划、调用工具、代码编写能力,来引导它们产生期望的输出,成为了一个关键的问题,其本质就是将业务问题抽象并拆解到可执行的 Prompt,那与其说我们是在设计 Agents,不如说是对当前需求的深入理解后进行框架设计。 +在 LLM 介入到实际业务场景(不涉及 SFT 过程),我们能通过设计 Agent Prompt 的内容来指定 LLM 完成相应任务得到相应输出。在 MuAgent 这个过程中,将这个 Prompt 分成了三个部分,System Prompt、Context Prompt、Customized Prompt + +- System Prompt 包括 Role Name、Role Description、Task 等 +- Context Prompt 包括 Doc Context、Code Context、Tool Context、Agent Context、Session Context 等 +- Customized Prompt 则是 自定义的一些 Input 和 Ouput,比如说 ... + 我们还可以要求模型输出结构化的文本,比如说 tool 的 json 串、*code\ncode_content*等来完成特定工作流。 + +**Automatic Prompt Assemble** +在按照上述结构定义后,我们便可以通过以下方式来完成 Prompt 的自动化组装,不需要每次去做大量的 prompt 调整工作 + +1. 定义 Agent 时直接配置 Role Name、Role Description、Task 等来决定 Agent 需要做的事情 +2. 预封装一些可复用的 Context Prompt 通用策略,比如说可筛选 Role 的 SessionContext、可配置的 Tool、Code Retrieval、Doc Retrieval、Search Retrieval、Agent 来完成对应的组装 +3. 由于 Agent 的 Prompt 是相对个性化的操作,所以也支持在 Prompt Manager 模块内新增新的 key-context 设计,实现个性化的 Agent Prompt。 + +**Automatic Prompt Design** +能根据 role description、task、query 等来自动化设计出最优的 prompt;待定义... + +**Multi Prompt Design** +根据前面 Prompt 的定义,我们可以了解到 Prompt 由 System Prompt、Context Prompt、Customized Prompt 三个部分组成,三个部分的任一变化都有可能会引起 LLM 最终输出结果的变化。 +对于同种任务而言,即它们的 System Prompt 是相同的。那么在不考虑 Customiezd Prompt 变化时,就可实现不同上下文的组装差异,比如说 Prompt A 获取 10 轮的 chat history,而 Pormpt B 采用 5 轮的 chat history,又或者是对 chat history 进行信息过滤、信息压缩等。 +待实现... + +## Component + +### Retrieval + +在所有 Prompt 的 Context 中,除了 Chat History 的会话信息外,还需要依赖于从外界文档知识库、代码库、互联网搜索得来的相关信息,这些模型参数知识外的知识体系能够极大提升 Agent 完成复杂任务的能力。 +于是在 MuAgent 中我们集成了 Doc、Internet Search、Code Retrieval 三种检索信息的方式,并定义了一个抽象 IMRetrieval 类,可支持开发者自定义个性化的知识库,来完成 Agent 的知识库注册。 + +**Doc Retrieval** +文档向量数据库是当前最主流的知识库构建方法,使用 Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。 + +**Code Retrieval** +LLM 在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为 LLM 提供知识体系外的代码。 + +**Search Retrieval** +除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了 duckduckgosearch 这款开源的搜索工具,能够为 LLM 提供知识储备以外的内容。 + +### Tool + +随着 OpenAI 推出了 Function Call 功能,通过 LLM 生成指定工具的参数并执行调用,使机器能更好地理解和回应人类的需求,从而解决实际问题和重复性的工作。现如今工具学习能力越来越作为开源模型的标配。那在 MuAgent 中也支持 Agent 完成 Tool 的注册,通过 Python 注册模板`BaseToolModel`类,编写 Tool_name、Tool_description、ToolInputArgs、ToolOutputArgs、run 等相关属性和方法即可实现工具的快速接入,同时支持 langchain Tool 接口的直接使用。 +例如像上述 XXRetrieval 的功能也可以注册为 Tool,最终由 LLM 执行调用。 + +### Action + +在 MuAgent 的定义里,Action 是作为 LLM 具体要执行的动作或动作流,会包括 LLM 信息处理、知识检索、工具调用以及代码执行等一个综合性的复杂过程,是一个动态过程。比如在 React 过程中,我们通过 LLM 获取到了一个 Tool 参数,接下来"将工具参数放入到 Tool 并执行调用"这个过程就是 Action,它去实践性的调用了 Tool。又或者说我们定义了一个 Agent,它编排在一个固定 Agent 的 Action 步骤之中,这个 Agent 的输入参数由 Action 特殊指定。也就是说无论是由 LLM 产生参数还是工程设定参数,只有涉及具体的执行过程,就是一个 Action。 + +## 模块分类 + +- [connector](../connector/connector_agent.zh-CN.md) 主要介绍这块 Agent 框架的工作 +- llm_models +- retrieval +- tools +- sandbox +- utils diff --git a/content/en/muagent/overview/quick-start.md b/docs/docs/api-docs/MuAgent/overview/quick-start.en-US.md similarity index 95% rename from content/en/muagent/overview/quick-start.md rename to docs/docs/api-docs/MuAgent/overview/quick-start.en-US.md index d484080..11142c6 100644 --- a/content/en/muagent/overview/quick-start.md +++ b/docs/docs/api-docs/MuAgent/overview/quick-start.en-US.md @@ -1,350 +1,360 @@ ---- -title: Quick Start -slug: Quick Start -url: "muagent/quick-start" -aliases: -- "/muagent/quick-start" ---- - - -## Quick Start -For a complete example, see [examples/muagent_examples](htpps://) -### First, prepare the relevant configuration information -``` -import os, sys - -api_key = "sk-xxx" -api_base_url= "https://api.openai.com/v1" -model_name = "gpt-3.5-turbo" -embed_model = "{{embed_model_name}}" -embed_model_path = "{{embed_model_path}}" -# -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" -``` - -### Then, set up LLM configuration and Embedding model configuration -``` -from muagent.base_configs.env_config import JUPYTER_WORK_PATH -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig -from muagent.connector.phase import BasePhase -from muagent.connector.schema import Message - - -llm_config = LLMConfig( - model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, - stop="**Observation:**" -) -embed_config = EmbedConfig( - embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path -) -``` - -### Finally, choose an existing scenario to execute -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# Choose a scenario -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -# round-1 needs to be completed by code interpreter -query_content = "Confirm whether employee_data.csv exists locally and view its columns and data types; then draw a bar chart" -query = Message( - role_name="human", role_type="user", tools=[], input_query=query_content, -) -# phase.pre_print(query) # This function is used to pre-print the Prompt of the Agents' execution chain -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - - -# round-2 requires the execution of a tool -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) -query_content = "Please help me check if the server at 127.0.0.1 had any issues at 10 o'clock, help me to determine" -query = Message( - role_name="human", role_type="user", tools=tools, input_query=query_content, -) -# phase.pre_print(query) # This function is used to pre-print the Prompt of the Agents' execution chain -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -## Phase Customization -Refer to [How to Customize Phase](/muagent/customed-examples) - - -## Introduction and Usage of Scenes -Below are some specific scene introductions and usages. -We also welcome everyone to brainstorm and construct some interesting cases. -### baseTaskPhase -Scenarios involving task segmentation and multi-step execution of xAgents - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "baseTaskPhase" -phase = BasePhase( -phase_name, embed_config=embed_config, llm_config=llm_config, -) - - -# round-1 -query_content = "Check if employee_data.csv exists locally and see what columns and data types it has; then draw a bar chart" -query = Message( - role_name="human", role_type="user", input_query=query_content, - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### codeReactPhase -The code interpreter scenario based on React - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# then, create a data analyze phase -phase_name = "codeReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, - jupyter_work_path=JUPYTER_WORK_PATH, -) - -# round-1 -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### codeToolReactPhase -The tool invocation and code interpreter scenario based on the React template - - -``` -TOOL_SETS = [ - "StockName", "StockInfo", - ] -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "codeToolReactPhase" - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -query_content = "Query the stock code of Kweichow Moutai and acquire the time series data of the last 10 days up to the current date (December 24th, 2023); then use code to draw a line chart and analyze it" - -query = Message(role_name="human", role_type="user", input_query=query_content, tools=tools) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### docChatPhase -Knowledge Base Retrieval and Question-Answering Pipeline - -- example 1 -``` -# create your knowledge base -from muagent.service.kb_api import create_kb, upload_files2kb -from muagent.utils.server_utils import run_async -from muagent.orm import create_tables - - -# use to test, don't create some directory -create_tables() - -# create a knowledge base -kb_name = "example_test" -run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH)) - -# add doc to knowledge base -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") -files = [file] -upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH) - - -## start to chat with knowledge base -# log-level, print prompt, and llm predict -os.environ["log_verbose"] = "0" - -## example 1 -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, -) - -# round-1 -query_content = "What modules does langchain have?" -query = Message( - role_name="human", role_type="user", input_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content = "What is the use of prompts?" -query = Message( - role_name="human", role_type="user", input_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -- example 2 -``` -## Customized register demo -from muagent.tools import DocRetrieval -class BaseDocRetrieval(IMRertrieval): - - def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): - self.knowledge_base_name = knowledge_base_name - self.search_top = search_top - self.score_threshold = score_threshold - self.embed_config = embed_config - self.kb_root_path = kb_root_path - - def run(self, query: str, search_top=None, score_threshold=None, ): - docs = DocRetrieval.run( - query=query, knowledge_base_name=self.knowledge_base_name, - search_top=search_top or self.search_top, - score_threshold=score_threshold or self.score_threshold, - embed_config=self.embed_config, - kb_root_path=self.kb_root_path - ) - return docs - - -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) - -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, - doc_retrieval=doc_retrieval -) - -# round-1 -query_content = "What modules does langchain have?" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - - -# round-2 -query_content = "What is the use of prompts?" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### metagpt_code_devlop -The code construction Phase in metagpt - -``` -# log level, print prompt, and llm predict -os.environ["log_verbose"] = "2" -phase_name = "metagpt_code_development" - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -query_content = "create a snake game" -query = Message(role_name="human", role_type="user", input_query=query_content) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### searchChatPhase -Fixed scenario chain, search first then directly answer based on LLM - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# This can be configured when the duckduckgo connection is not available -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5h://127.0.0.1:13659" -phase_name = "searchChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - - -# round-1 -query_content1 = "Who is the current President of the United States?" -query = Message( - role_name="human", role_type="user", input_query=query_content1, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content2 = "Who was the previous president of the United States, and do these two people have any relationship?" -query = Message( - role_name="human", role_type="user", input_query=query_content2, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### toolReactPhase -The tool invocation scene based on the React template - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" -phase_name = "toolReactPhase" - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config -) - -# round-1 -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) -query_content = "Please help me check if there were any issues with the server at 127.0.0.1 at 10 o'clock, I need your assistance in determining this." -query = Message( - role_name="human", role_type="user", tools=tools, input_query=query_content, -) - -# phase.pre_print(query) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file +--- +group: + title: ❤️ Codefuse-muAgent + order: -1 +title: QuickStart +order: 1 +toc: content +--- + +## Quick Start + +For a complete example, see [examples/muagent_examples](htpps://) + +### First, prepare the relevant configuration information + +``` +import os, sys + +api_key = "sk-xxx" +api_base_url= "https://api.openai.com/v1" +model_name = "gpt-3.5-turbo" +embed_model = "{{embed_model_name}}" +embed_model_path = "{{embed_model_path}}" +# +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" +``` + +### Then, set up LLM configuration and Embedding model configuration + +``` +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.connector.phase import BasePhase +from muagent.connector.schema import Message + + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) +``` + +### Finally, choose an existing scenario to execute + +``` +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# Choose a scenario +phase_name = "baseGroupPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config +) + +# round-1 needs to be completed by code interpreter +query_content = "Confirm whether employee_data.csv exists locally and view its columns and data types; then draw a bar chart" +query = Message( + role_name="human", role_type="user", tools=[], input_query=query_content, +) +# phase.pre_print(query) # This function is used to pre-print the Prompt of the Agents' execution chain +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + + +# round-2 requires the execution of a tool +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) +query_content = "Please help me check if the server at 127.0.0.1 had any issues at 10 o'clock, help me to determine" +query = Message( + role_name="human", role_type="user", tools=tools, input_query=query_content, +) +# phase.pre_print(query) # This function is used to pre-print the Prompt of the Agents' execution chain +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +## Phase Customization + +Refer to [How to Customize Phase](../connector/connector_agent.en-US.md) + +## Introduction and Usage of Scenes + +Below are some specific scene introductions and usages. +We also welcome everyone to brainstorm and construct some interesting cases. + +### baseTaskPhase + +Scenarios involving task segmentation and multi-step execution of xAgents + +``` +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "baseTaskPhase" +phase = BasePhase( +phase_name, embed_config=embed_config, llm_config=llm_config, +) + + +# round-1 +query_content = "Check if employee_data.csv exists locally and see what columns and data types it has; then draw a bar chart" +query = Message( + role_name="human", role_type="user", input_query=query_content, + ) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### codeReactPhase + +The code interpreter scenario based on React + +``` +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# then, create a data analyze phase +phase_name = "codeReactPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, + jupyter_work_path=JUPYTER_WORK_PATH, +) + +# round-1 +query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart" +query = Message( + role_name="human", role_type="user", + role_content=query_content, input_query=query_content, origin_query=query_content, + ) + +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### codeToolReactPhase + +The tool invocation and code interpreter scenario based on the React template + +``` +TOOL_SETS = [ + "StockName", "StockInfo", + ] +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "codeToolReactPhase" + +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +query_content = "Query the stock code of Kweichow Moutai and acquire the time series data of the last 10 days up to the current date (December 24th, 2023); then use code to draw a line chart and analyze it" + +query = Message(role_name="human", role_type="user", input_query=query_content, tools=tools) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### docChatPhase + +Knowledge Base Retrieval and Question-Answering Pipeline + +- example 1 + +``` +# create your knowledge base +from muagent.service.kb_api import create_kb, upload_files2kb +from muagent.utils.server_utils import run_async +from muagent.orm import create_tables + + +# use to test, don't create some directory +create_tables() + +# create a knowledge base +kb_name = "example_test" +run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH)) + +# add doc to knowledge base +file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") +files = [file] +upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH) + + +## start to chat with knowledge base +# log-level, print prompt, and llm predict +os.environ["log_verbose"] = "0" + +## example 1 +# set chat phase +phase_name = "docChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, +) + +# round-1 +query_content = "What modules does langchain have?" +query = Message( + role_name="human", role_type="user", input_query=query_content, + doc_engine_name=kb_name, score_threshold=1.0, top_k=3 + ) + +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 +query_content = "What is the use of prompts?" +query = Message( + role_name="human", role_type="user", input_query=query_content, + doc_engine_name=kb_name, score_threshold=1.0, top_k=3 + ) + +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +- example 2 + +``` +## Customized register demo +from muagent.tools import DocRetrieval +class BaseDocRetrieval(IMRertrieval): + + def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): + self.knowledge_base_name = knowledge_base_name + self.search_top = search_top + self.score_threshold = score_threshold + self.embed_config = embed_config + self.kb_root_path = kb_root_path + + def run(self, query: str, search_top=None, score_threshold=None, ): + docs = DocRetrieval.run( + query=query, knowledge_base_name=self.knowledge_base_name, + search_top=search_top or self.search_top, + score_threshold=score_threshold or self.score_threshold, + embed_config=self.embed_config, + kb_root_path=self.kb_root_path + ) + return docs + + +doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) + +# set chat phase +phase_name = "docChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, + doc_retrieval=doc_retrieval +) + +# round-1 +query_content = "What modules does langchain have?" +query = Message( + role_name="human", role_type="user", input_query=query_content, +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + + +# round-2 +query_content = "What is the use of prompts?" +query = Message( + role_name="human", role_type="user", input_query=query_content, +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### metagpt_code_devlop + +The code construction Phase in metagpt + +``` +# log level, print prompt, and llm predict +os.environ["log_verbose"] = "2" +phase_name = "metagpt_code_development" + +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config +) + +query_content = "create a snake game" +query = Message(role_name="human", role_type="user", input_query=query_content) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### searchChatPhase + +Fixed scenario chain, search first then directly answer based on LLM + +``` +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +# This can be configured when the duckduckgo connection is not available +os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5h://127.0.0.1:13659" +phase_name = "searchChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config +) + + +# round-1 +query_content1 = "Who is the current President of the United States?" +query = Message( + role_name="human", role_type="user", input_query=query_content1, + search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 +query_content2 = "Who was the previous president of the United States, and do these two people have any relationship?" +query = Message( + role_name="human", role_type="user", input_query=query_content2, + search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### toolReactPhase + +The tool invocation scene based on the React template + +``` +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" +phase_name = "toolReactPhase" + +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config +) + +# round-1 +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) +query_content = "Please help me check if there were any issues with the server at 127.0.0.1 at 10 o'clock, I need your assistance in determining this." +query = Message( + role_name="human", role_type="user", tools=tools, input_query=query_content, +) + +# phase.pre_print(query) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` diff --git a/content/zh/coagent/overview/quick-start.md b/docs/docs/api-docs/MuAgent/overview/quick-start.zh-CN.md similarity index 89% rename from content/zh/coagent/overview/quick-start.md rename to docs/docs/api-docs/MuAgent/overview/quick-start.zh-CN.md index e565b36..a8595d2 100644 --- a/content/zh/coagent/overview/quick-start.md +++ b/docs/docs/api-docs/MuAgent/overview/quick-start.zh-CN.md @@ -1,386 +1,393 @@ ---- -title: 快速开始 -slug: 快速开始 -url: "coagent/快速开始" -aliases: -- "/coagent/快速开始" -- "/coagent/quick-start-zh" ---- - - - -## 快速使用 -### 首先,填写LLM配置 -``` -import os, sys -import openai - -# llm config -os.environ["API_BASE_URL"] = OPENAI_API_BASE -os.environ["OPENAI_API_KEY"] = "sk-xxx" -openai.api_key = "sk-xxx" -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" -``` - -### 然后设置LLM配置和向量模型配置 -``` -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig - -llm_config = LLMConfig( - model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) - -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) -``` - -### 最后选择一个已有场景进行执行 -``` -from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS -from coagent.connector.phase import BasePhase -from coagent.connector.schema import Message - -# 选择一个已实现得场景进行执行 - -# 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# 选择一个场景 -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 需要通过代码解释器来完成 -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", tools=[], - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 需要执行工具 -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下" -query = Message( - role_name="human", role_type="user", tools=tools, - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -``` -## 场景自定义 -见[如何自定义场景](/coagent/connector-zh) - -## 场景介绍和使用 - -下面是一些具体的场景介绍和使用。 - -欢迎大家开脑洞构造一些有趣的case。 - -### baseGroupPhase -autogen的group使用场景 - -``` -# 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# 设置日志级别,控制打印prompt或者llm 输出或其它信息 -os.environ["log_verbose"] = "0" - -phase_name = "baseGroupPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图" - -query = Message( - role_name="human", role_type="user", tools=[], - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### baseTaskPhase -xAgents的任务拆分及多步骤执行场景 - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "baseTaskPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) -# round-1 -query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### codeReactPhase -基于 React 的代码解释器场景 - -``` -# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) -import shutil -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' -shutil.copy(source_file, JUPYTER_WORK_PATH) - -# then, create a data analyze phase -phase_name = "codeReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, - jupyter_work_path=JUPYTER_WORK_PATH, -) - -# round-1 -query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图" -query = Message( - role_name="human", role_type="user", - role_content=query_content, input_query=query_content, origin_query=query_content, - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - -### codeToolReactPhase -基于 React 模板的工具调用和代码解释器场景 - - -``` -TOOL_SETS = [ - "StockName", "StockInfo", - ] -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) - -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "codeToolReactPhase" - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析" - -query = Message( - role_name="human", role_type="user", - input_query=query_content, role_content=query_content, - origin_query=query_content, tools=tools - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### docChatPhase -知识库检索问答链路 -``` -# create your knowledge base -from io import BytesIO -from pathlib import Path - -from coagent.service.kb_api import create_kb, upload_doc -from coagent.service.service_factory import get_kb_details -from coagent.utils.server_utils import run_async -kb_list = {x["kb_name"]: x for x in get_kb_details(KB_ROOT_PATH)} - - -# create a knowledge base -kb_name = "example_test" -data = { - "knowledge_base_name": kb_name, - "vector_store_type": "faiss", # default - "kb_root_path": KB_ROOT_PATH, - "embed_model": embed_config.embed_model, - "embed_engine": embed_config.embed_engine, - "embed_model_path": embed_config.embed_model_path, - "model_device": embed_config.model_device, -} -run_async(create_kb(**data)) - -# add doc to knowledge base -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") -files = [file] -# if embedding init failed, you can use override = True -data = [{"override": True, "file": f, - "knowledge_base_name": kb_name, "not_refresh_vs_cache": False, - "kb_root_path": KB_ROOT_PATH, "embed_model": embed_config.embed_model, - "embed_engine": embed_config.embed_engine, "embed_model_path": embed_config.embed_model_path, - "model_device": embed_config.model_device, - } - for f in files] - -for k in data: - file = Path(file).absolute().open("rb") - filename = file.name - - from fastapi import UploadFile - from tempfile import SpooledTemporaryFile - - temp_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024) - temp_file.write(file.read()) - temp_file.seek(0) - - k.update({"file": UploadFile(file=temp_file, filename=filename),}) - run_async(upload_doc(**k)) - - -# start to chat with knowledge base -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) -# round-1 -query_content = "langchain有哪些模块" -query = Message( - role_name="human", role_type="user", - origin_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content = "提示(prompts)有什么用?" -query = Message( - role_name="human", role_type="user", - origin_query=query_content, - doc_engine_name=kb_name, score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### metagpt_code_devlop -metagpt的代码构造链路 - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "metagpt_code_devlop" -llm_config = LLMConfig( - model_name="gpt-4", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], - api_base_url=os.environ["API_BASE_URL"], temperature=0.3 - ) -embed_config = EmbedConfig( - embed_engine="model", embed_model="text2vec-base-chinese", - embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" - ) - -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -query_content = "create a snake game by pygame" -query = Message(role_name="human", role_type="user", input_query=query_content, role_content=query_content, origin_query=query_content) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### searchChatPhase -固定场景链路,先搜索后基于LLM直接回答 - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "searchChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -query_content1 = "美国当前总统是谁?" -query = Message( - role_name="human", role_type="user", - role_content=query_content1, input_query=query_content1, origin_query=query_content1, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) - -output_message, output_memory = phase.step(query) - -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content2 = "美国上一任总统是谁,两个人有什么关系没?" -query = Message( - role_name="human", role_type="user", - role_content=query_content2, input_query=query_content2, origin_query=query_content2, - search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 - ) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` - - -### toolReactPhase -基于 React 模板的工具调用场景 - -``` -# log-level,print prompt和llm predict -os.environ["log_verbose"] = "2" - -phase_name = "toolReactPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, -) - -# round-1 -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下" -query = Message( - role_name="human", role_type="user", tools=tools, - role_content=query_content, input_query=query_content, origin_query=query_content - ) - -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file +--- +group: + title: ❤️ Codefuse-muAgent + order: -1 +title: 快速开始 +order: 1 +toc: content +--- + +## 快速使用 + +### 首先,填写 LLM 配置 + +``` +import os, sys +import openai + +# llm config +os.environ["API_BASE_URL"] = OPENAI_API_BASE +os.environ["OPENAI_API_KEY"] = "sk-xxx" +openai.api_key = "sk-xxx" +# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" +``` + +### 然后设置 LLM 配置和向量模型配置 + +``` +from coagent.llm_models.llm_config import EmbedConfig, LLMConfig + +llm_config = LLMConfig( + model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], + api_base_url=os.environ["API_BASE_URL"], temperature=0.3 + ) + +embed_config = EmbedConfig( + embed_engine="model", embed_model="text2vec-base-chinese", + embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" + ) +``` + +### 最后选择一个已有场景进行执行 + +``` +from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS +from coagent.connector.phase import BasePhase +from coagent.connector.schema import Message + +# 选择一个已实现得场景进行执行 + +# 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# 选择一个场景 +phase_name = "baseGroupPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +# round-1 需要通过代码解释器来完成 +query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + role_name="human", role_type="user", tools=[], + role_content=query_content, input_query=query_content, origin_query=query_content, + ) + +# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 需要执行工具 +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下" +query = Message( + role_name="human", role_type="user", tools=tools, + role_content=query_content, input_query=query_content, origin_query=query_content, + ) + +# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +``` + +## 场景自定义 + +见[如何自定义场景](../connector/connector_agent.zh-CN.md) + +## 场景介绍和使用 + +下面是一些具体的场景介绍和使用。 + +欢迎大家开脑洞构造一些有趣的 case。 + +### baseGroupPhase + +autogen 的 group 使用场景 + +``` +# 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# 设置日志级别,控制打印prompt或者llm 输出或其它信息 +os.environ["log_verbose"] = "0" + +phase_name = "baseGroupPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +# round-1 +query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图" + +query = Message( + role_name="human", role_type="user", tools=[], + role_content=query_content, input_query=query_content, origin_query=query_content, + ) + +# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### baseTaskPhase + +xAgents 的任务拆分及多步骤执行场景 + +``` +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "baseTaskPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) +# round-1 +query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + role_name="human", role_type="user", + role_content=query_content, input_query=query_content, origin_query=query_content, + ) + +output_message, output_memory = phase.step(query) + +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### codeReactPhase + +基于 React 的代码解释器场景 + +``` +# if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path) +import shutil +source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv' +shutil.copy(source_file, JUPYTER_WORK_PATH) + +# then, create a data analyze phase +phase_name = "codeReactPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, + jupyter_work_path=JUPYTER_WORK_PATH, +) + +# round-1 +query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图" +query = Message( + role_name="human", role_type="user", + role_content=query_content, input_query=query_content, origin_query=query_content, + ) + +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### codeToolReactPhase + +基于 React 模板的工具调用和代码解释器场景 + +``` +TOOL_SETS = [ + "StockName", "StockInfo", + ] +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) + +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "codeToolReactPhase" + +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析" + +query = Message( + role_name="human", role_type="user", + input_query=query_content, role_content=query_content, + origin_query=query_content, tools=tools + ) + +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### docChatPhase + +知识库检索问答链路 + +``` +# create your knowledge base +from io import BytesIO +from pathlib import Path + +from coagent.service.kb_api import create_kb, upload_doc +from coagent.service.service_factory import get_kb_details +from coagent.utils.server_utils import run_async +kb_list = {x["kb_name"]: x for x in get_kb_details(KB_ROOT_PATH)} + + +# create a knowledge base +kb_name = "example_test" +data = { + "knowledge_base_name": kb_name, + "vector_store_type": "faiss", # default + "kb_root_path": KB_ROOT_PATH, + "embed_model": embed_config.embed_model, + "embed_engine": embed_config.embed_engine, + "embed_model_path": embed_config.embed_model_path, + "model_device": embed_config.model_device, +} +run_async(create_kb(**data)) + +# add doc to knowledge base +file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") +files = [file] +# if embedding init failed, you can use override = True +data = [{"override": True, "file": f, + "knowledge_base_name": kb_name, "not_refresh_vs_cache": False, + "kb_root_path": KB_ROOT_PATH, "embed_model": embed_config.embed_model, + "embed_engine": embed_config.embed_engine, "embed_model_path": embed_config.embed_model_path, + "model_device": embed_config.model_device, + } + for f in files] + +for k in data: + file = Path(file).absolute().open("rb") + filename = file.name + + from fastapi import UploadFile + from tempfile import SpooledTemporaryFile + + temp_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024) + temp_file.write(file.read()) + temp_file.seek(0) + + k.update({"file": UploadFile(file=temp_file, filename=filename),}) + run_async(upload_doc(**k)) + + +# start to chat with knowledge base +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +# set chat phase +phase_name = "docChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) +# round-1 +query_content = "langchain有哪些模块" +query = Message( + role_name="human", role_type="user", + origin_query=query_content, + doc_engine_name=kb_name, score_threshold=1.0, top_k=3 + ) + +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 +query_content = "提示(prompts)有什么用?" +query = Message( + role_name="human", role_type="user", + origin_query=query_content, + doc_engine_name=kb_name, score_threshold=1.0, top_k=3 + ) +output_message, output_memory = phase.step(query) + +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### metagpt_code_devlop + +metagpt 的代码构造链路 + +``` +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "metagpt_code_devlop" +llm_config = LLMConfig( + model_name="gpt-4", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], + api_base_url=os.environ["API_BASE_URL"], temperature=0.3 + ) +embed_config = EmbedConfig( + embed_engine="model", embed_model="text2vec-base-chinese", + embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese" + ) + +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +query_content = "create a snake game by pygame" +query = Message(role_name="human", role_type="user", input_query=query_content, role_content=query_content, origin_query=query_content) + +output_message, output_memory = phase.step(query) + +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### searchChatPhase + +固定场景链路,先搜索后基于 LLM 直接回答 + +``` +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "searchChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +# round-1 +query_content1 = "美国当前总统是谁?" +query = Message( + role_name="human", role_type="user", + role_content=query_content1, input_query=query_content1, origin_query=query_content1, + search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 + ) + +output_message, output_memory = phase.step(query) + +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 +query_content2 = "美国上一任总统是谁,两个人有什么关系没?" +query = Message( + role_name="human", role_type="user", + role_content=query_content2, input_query=query_content2, origin_query=query_content2, + search_engine_name="duckduckgo", score_threshold=1.0, top_k=3 + ) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` + +### toolReactPhase + +基于 React 模板的工具调用场景 + +``` +# log-level,print prompt和llm predict +os.environ["log_verbose"] = "2" + +phase_name = "toolReactPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, +) + +# round-1 +tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT]) +query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下" +query = Message( + role_name="human", role_type="user", tools=tools, + role_content=query_content, input_query=query_content, origin_query=query_content + ) + +# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` diff --git a/content/en/muagent/retrieval/custom_retrieval.md b/docs/docs/api-docs/MuAgent/retrieval/custom_retrieval.en-US.md similarity index 66% rename from content/en/muagent/retrieval/custom_retrieval.md rename to docs/docs/api-docs/MuAgent/retrieval/custom_retrieval.en-US.md index afa18df..b33427f 100644 --- a/content/en/muagent/retrieval/custom_retrieval.md +++ b/docs/docs/api-docs/MuAgent/retrieval/custom_retrieval.en-US.md @@ -1,105 +1,179 @@ ---- -title: Custom Retrieval -url: "muagent/custom-retrieval" -aliases: -- "/muagent/custom-retrieval" ---- - -## Basic Introduction -`Doc Retrieval` is the document vector database, which is the most mainstream method for knowledge base construction nowadays. It uses Text Embedding models to vectorize documents and stores them in a vector database. In the future, we will also support querying based on knowledge graph and automatically extracting entities and relationships through large models to explore various complex relationships in data. - -`Code Retrieval` LLM faces challenges in tasks such as code generation, repair, and component understanding, including lagging code training data and the inability to perceive the dependency structure of code context. During development, understanding existing codebases and dependencies, retrieving related code, querying metadata, etc., can take a significant amount of time. Therefore, we hope to support LLM with code outside of its knowledge system through code structure analysis and code retrieval. - -`Search Retrieval` In addition to existing document and code knowledge bases, in daily practice, we browse a large amount of web content to acquire more knowledge, helping us understand emerging scenarios, businesses, technologies, etc., hence we integrated duckduckgo search, an open-source search tool, to provide LLM with content beyond its knowledge reserve. - -## Retrieval Structure - -``` -class IMRertrieval: - def __init__(self,): - ''' - init your personal attributes - ''' - pass - - def run(self, ): - ''' - execute interface, and can use init' attributes - ''' - pass - - -class BaseDocRetrieval(IMRertrieval): - - def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): - self.knowledge_base_name = knowledge_base_name - self.search_top = search_top - self.score_threshold = score_threshold - self.embed_config = embed_config - self.kb_root_path = kb_root_path - - def run(self, query: str, search_top=None, score_threshold=None, ): - docs = DocRetrieval.run( - query=query, knowledge_base_name=self.knowledge_base_name, - search_top=search_top or self.search_top, - score_threshold=score_threshold or self.score_threshold, - embed_config=self.embed_config, - kb_root_path=self.kb_root_path - ) - return docs -``` - - -## Usage Example -``` -# retrieval your customized register demo -from muagent.tools import DocRetrieval - -class BaseDocRetrieval(IMRertrieval): - - def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): - self.knowledge_base_name = knowledge_base_name - self.search_top = search_top - self.score_threshold = score_threshold - self.embed_config = embed_config - self.kb_root_path = kb_root_path - - def run(self, query: str, search_top=None, score_threshold=None, ): - docs = DocRetrieval.run( - query=query, knowledge_base_name=self.knowledge_base_name, - search_top=search_top or self.search_top, - score_threshold=score_threshold or self.score_threshold, - embed_config=self.embed_config, - kb_root_path=self.kb_root_path - ) - - return docs - - -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) - -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, - doc_retrieval=doc_retrieval -) - - -# round-1 -query_content = "What modules does langchain have?" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - - -# round-2 -query_content = "What is the use of prompts?" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) -``` \ No newline at end of file +--- +group: + title: Retrieval + order: 3 +title: Custom retrieval +order: -1 +toc: content +--- + +## Basic Introduction + +`Doc Retrieval` is the document vector database, which is the most mainstream method for knowledge base construction nowadays. It uses Text Embedding models to vectorize documents and stores them in a vector database. In the future, we will also support querying based on knowledge graph and automatically extracting entities and relationships through large models to explore various complex relationships in data. + +`Code Retrieval` LLM faces challenges in tasks such as code generation, repair, and component understanding, including lagging code training data and the inability to perceive the dependency structure of code context. During development, understanding existing codebases and dependencies, retrieving related code, querying metadata, etc., can take a significant amount of time. Therefore, we hope to support LLM with code outside of its knowledge system through code structure analysis and code retrieval. + +`Search Retrieval` In addition to existing document and code knowledge bases, in daily practice, we browse a large amount of web content to acquire more knowledge, helping us understand emerging scenarios, businesses, technologies, etc., hence we integrated duckduckgo search, an open-source search tool, to provide LLM with content beyond its knowledge reserve. + +## Retrieval Structure + +``` +class IMRertrieval: + def __init__(self,): + ''' + init your personal attributes + ''' + pass + + def run(self, ): + ''' + execute interface, and can use init' attributes + ''' + pass + + +class BaseDocRetrieval(IMRertrieval): + + def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): + self.knowledge_base_name = knowledge_base_name + self.search_top = search_top + self.score_threshold = score_threshold + self.embed_config = embed_config + self.kb_root_path = kb_root_path + + def run(self, query: str, search_top=None, score_threshold=None, ): + docs = DocRetrieval.run( + query=query, knowledge_base_name=self.knowledge_base_name, + search_top=search_top or self.search_top, + score_threshold=score_threshold or self.score_threshold, + embed_config=self.embed_config, + kb_root_path=self.kb_root_path + ) + return docs +``` + +## Usage Example + +- import dependcy and set config + +``` +# retrieval your customized register demo +from muagent.tools import DocRetrieval +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.phase import BasePhase +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.base_configs.env_config import KB_ROOT_PATH +from muagent.retrieval.base_retrieval import IMRertrieval + + +# set your config +api_key = "" +api_base_url= "" +model_name = "" +embed_model = "" +embed_model_path = "" +``` + +- Customed retrieval + +``` +# retrieval your customized register demo +from muagent.tools import DocRetrieval +class BaseDocRetrieval(IMRertrieval): + + def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): + self.knowledge_base_name = knowledge_base_name + self.search_top = search_top + self.score_threshold = score_threshold + self.embed_config = embed_config + self.kb_root_path = kb_root_path + + def run(self, query: str, search_top=None, score_threshold=None, ): + docs = DocRetrieval.run( + query=query, knowledge_base_name=self.knowledge_base_name, + search_top=search_top or self.search_top, + score_threshold=score_threshold or self.score_threshold, + embed_config=self.embed_config, + kb_root_path=self.kb_root_path + ) + return docs +``` + +- llm&embedding Config + +``` +# +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) + +``` + +- Load Document into local + +``` + +# create your knowledge base +from muagent.service.kb_api import create_kb, upload_files2kb +from muagent.utils.server_utils import run_async +from muagent.orm import create_tables + + +# use to test, don't create some directory +create_tables() +# create a knowledge base +kb_name = "example_test" +run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH)) +# add doc to knowledge base +file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") +files = [file] +upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH) +``` + +- Doc RAG QA + +``` +doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) + + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) + + +# set chat phase +phase_name = "docChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, + doc_retrieval=doc_retrieval +) + +# round-1 +query_content = "What modules does langchain have?" +query = Message( + role_name="human", role_type="user", input_query=query_content, +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 +query_content = "What is the use of prompts?" +query = Message( + role_name="human", role_type="user", input_query=query_content, +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` diff --git a/content/zh/muagent/retrieval/custom_retrieval.md b/docs/docs/api-docs/MuAgent/retrieval/custom_retrieval.zh-CN.md similarity index 51% rename from content/zh/muagent/retrieval/custom_retrieval.md rename to docs/docs/api-docs/MuAgent/retrieval/custom_retrieval.zh-CN.md index 678adf2..e491099 100644 --- a/content/zh/muagent/retrieval/custom_retrieval.md +++ b/docs/docs/api-docs/MuAgent/retrieval/custom_retrieval.zh-CN.md @@ -1,103 +1,184 @@ ---- -title: 自定义 Retrieval 接入 -url: "muagent/custom-retrieval-zh" -aliases: -- "/muagent/custom-retrieval-zh" ---- - -## 基本介绍 -`Doc Retrieval` 文档向量数据库是当前最主流的知识库构建方法,使用Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。 - -`Code Retrieval` LLM在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为LLM提供知识体系外的代码。 - -`Search Retrieval` 除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了duckduckgosearch这款开源的搜索工具,能够为LLM提供知识储备以外的内容。 - -## Rertrieval 结构 - - -``` -class IMRertrieval: - - def __init__(self,): - ''' - init your personal attributes - ''' - pass - - def run(self, ): - ''' - execute interface, and can use init' attributes - ''' - pass - -class BaseDocRetrieval(IMRertrieval): - - def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): - self.knowledge_base_name = knowledge_base_name - self.search_top = search_top - self.score_threshold = score_threshold - self.embed_config = embed_config - self.kb_root_path = kb_root_path - - def run(self, query: str, search_top=None, score_threshold=None, ): - docs = DocRetrieval.run( - query=query, knowledge_base_name=self.knowledge_base_name, - search_top=search_top or self.search_top, - score_threshold=score_threshold or self.score_threshold, - embed_config=self.embed_config, - kb_root_path=self.kb_root_path - ) - return docs -``` - - -## 使用示例 -``` -# retrieval your customized register demo -from muagent.tools import DocRetrieval -class BaseDocRetrieval(IMRertrieval): - - def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): - self.knowledge_base_name = knowledge_base_name - self.search_top = search_top - self.score_threshold = score_threshold - self.embed_config = embed_config - self.kb_root_path = kb_root_path - - def run(self, query: str, search_top=None, score_threshold=None, ): - docs = DocRetrieval.run( - query=query, knowledge_base_name=self.knowledge_base_name, - search_top=search_top or self.search_top, - score_threshold=score_threshold or self.score_threshold, - embed_config=self.embed_config, - kb_root_path=self.kb_root_path - ) - return docs - - -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) - -# set chat phase -phase_name = "docChatPhase" -phase = BasePhase( - phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, - doc_retrieval=doc_retrieval -) - -# round-1 -query_content = "langchain有哪些模块" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -# round-2 -query_content = "提示(prompts)有什么用?" -query = Message( - role_name="human", role_type="user", input_query=query_content, -) -output_message, output_memory = phase.step(query) -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) - -``` \ No newline at end of file +--- +group: + title: Retrieval + order: 3 +title: 自定义 Retrieval 接入 +order: -1 +toc: content +--- + +## 基本介绍 + +`Doc Retrieval` 文档向量数据库是当前最主流的知识库构建方法,使用 Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。 + +`Code Retrieval` LLM 在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为 LLM 提供知识体系外的代码。 + +`Search Retrieval` 除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了 duckduckgosearch 这款开源的搜索工具,能够为 LLM 提供知识储备以外的内容。 + +## Rertrieval 结构 + +- 基本的 retrieval 结构定义 + +``` +from muagent.base_configs.env_config import KB_ROOT_PATH +# from muagent.retrieval.base_retrieval import IMRertrieval + +class IMRertrieval: + + def __init__(self,): + ''' + init your personal attributes + ''' + pass + + def run(self, ): + ''' + execute interface, and can use init' attributes + ''' + pass + +class BaseDocRetrieval(IMRertrieval): + + def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): + self.knowledge_base_name = knowledge_base_name + self.search_top = search_top + self.score_threshold = score_threshold + self.embed_config = embed_config + self.kb_root_path = kb_root_path + + def run(self, query: str, search_top=None, score_threshold=None, ): + docs = DocRetrieval.run( + query=query, knowledge_base_name=self.knowledge_base_name, + search_top=search_top or self.search_top, + score_threshold=score_threshold or self.score_threshold, + embed_config=self.embed_config, + kb_root_path=self.kb_root_path + ) + return docs +``` + +## 使用示例 + +- 准备配置和导包 + +``` +# retrieval your customized register demo +from muagent.tools import DocRetrieval +from muagent.base_configs.env_config import JUPYTER_WORK_PATH +from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent +from muagent.connector.chains import BaseChain +from muagent.connector.phase import BasePhase +from muagent.connector.schema import Role, Message, ChainConfig +from muagent.llm_models.llm_config import EmbedConfig, LLMConfig +from muagent.base_configs.env_config import KB_ROOT_PATH +from muagent.retrieval.base_retrieval import IMRertrieval + + +# set your config +api_key = "" +api_base_url= "" +model_name = "" +embed_model = "" +embed_model_path = "" +``` + +- 自定义检索器 + +``` +# retrieval your customized register demo +from muagent.tools import DocRetrieval +class BaseDocRetrieval(IMRertrieval): + + def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): + self.knowledge_base_name = knowledge_base_name + self.search_top = search_top + self.score_threshold = score_threshold + self.embed_config = embed_config + self.kb_root_path = kb_root_path + + def run(self, query: str, search_top=None, score_threshold=None, ): + docs = DocRetrieval.run( + query=query, knowledge_base_name=self.knowledge_base_name, + search_top=search_top or self.search_top, + score_threshold=score_threshold or self.score_threshold, + embed_config=self.embed_config, + kb_root_path=self.kb_root_path + ) + return docs +``` + +- llm 和 embedding 配置 + +``` +# +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) + +``` + +- 本地加载知识库 + +``` + +# create your knowledge base +from muagent.service.kb_api import create_kb, upload_files2kb +from muagent.utils.server_utils import run_async +from muagent.orm import create_tables + + +# use to test, don't create some directory +create_tables() +# create a knowledge base +kb_name = "example_test" +run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH)) +# add doc to knowledge base +file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl") +files = [file] +upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH) +``` + +- 知识库问答 + +``` +doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config) + + +llm_config = LLMConfig( + model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, + stop="**Observation:**" +) + +embed_config = EmbedConfig( + embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path +) + + +# set chat phase +phase_name = "docChatPhase" +phase = BasePhase( + phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH, + doc_retrieval=doc_retrieval +) + +# round-1 +query_content = "langchain有哪些模块" +query = Message( + role_name="human", role_type="user", input_query=query_content, +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) + +# round-2 +query_content = "提示(prompts)有什么用?" +query = Message( + role_name="human", role_type="user", input_query=query_content, +) +output_message, output_memory = phase.step(query) +print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list")) +``` diff --git a/content/en/muagent/tools/custom_tool.md b/docs/docs/api-docs/MuAgent/tools/custom_tool.en-US.md similarity index 93% rename from content/en/muagent/tools/custom_tool.md rename to docs/docs/api-docs/MuAgent/tools/custom_tool.en-US.md index 3098181..4ff55c5 100644 --- a/content/en/muagent/tools/custom_tool.md +++ b/docs/docs/api-docs/MuAgent/tools/custom_tool.en-US.md @@ -1,126 +1,126 @@ ---- -title: Custom Tool -url: "muagent/custom-tool" -aliases: -- "/muagent/custom-tool" ---- - -## Introduction -In MuAgent, it also supports the registration of Tools by Agents. By registering the BaseToolModel class with Python and writing -- Tool_name -- Tool_description -- ToolInputArgs -- ToolOutputArgs -- run - -and other relevant properties and methods, the quick integration of tools can be achieved. It also supports the direct use of the langchain Tool interface. For example, functions like the aforementioned XXRetrieval can also be registered as a Tool, to be ultimately called by an LLM. - - -## BaseTool Structure - -``` -from langchain.agents import Tool -from pydantic import BaseModel, Field -from typing import List, Dict -import json - - -class BaseToolModel: - name = "BaseToolModel" - description = "Tool Description" - - class ToolInputArgs(BaseModel): - """ - Input for MoveFileTool. - Tips: - default control Required, e.g. key1 is not Required/key2 is Required - """ - - key1: str = Field(default=None, description="hello world!") - key2: str = Field(..., description="hello world!!") - - class ToolOutputArgs(BaseModel): - """ - Input for MoveFileTool. - Tips: - default control Required, e.g. key1 is not Required/key2 is Required - """ - - key1: str = Field(default=None, description="hello world!") - key2: str = Field(..., description="hello world!!") - - @classmethod - def run(cls, tool_input_args: ToolInputArgs) -> ToolOutputArgs: - """excute your tool!""" - pass -``` - - -## Register Example - -``` -from pydantic import BaseModel, Field -from typing import List, Dict -import requests -from loguru import logger - -from .base_tool import BaseToolModel - -class Multiplier(BaseToolModel): - """ - Tips: - default control Required, e.g. key1 is not Required/key2 is Required - """ - - name: str = "Multiplier" - description: str = """useful for when you need to multiply two numbers together. \ - The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. \ - For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" - - class ToolInputArgs(BaseModel): - """Input for Multiplier.""" - - # key: str = Field(..., description="用户在高德地图官网申请web服务API类型KEY") - a: int = Field(..., description="num a") - b: int = Field(..., description="num b") - - class ToolOutputArgs(BaseModel): - """Output for Multiplier.""" - - res: int = Field(..., description="the result of two nums") - - @staticmethod - def run(a, b): - return a * b -``` - - -## Use Example -``` -from langchain.tools import StructuredTool -from muagent.tools import ( - WeatherInfo, Multiplier, toLangchainTools, - TOOL_DICT, TOOL_SETS -) - -# Function exec -tools = [ - StructuredTool( - name=Multiplier.name, - func=Multiplier.run, - description=Multiplier.description, - args_schema=Multiplier.ToolInputArgs, - ), - StructuredTool( - name=WeatherInfo.name, - func=WeatherInfo.run, - description=WeatherInfo.description, - args_schema=WeatherInfo.ToolInputArgs, - ) - ] - -tools = toLangchainTools([TOOL_DICT["Multiplier"]]) - -# tool run Test -print(tools[0].func(1,2)) -``` \ No newline at end of file +--- +group: + title: Tools + order: 2 +title: Custom Tool +order: -1 +toc: content +--- + +## Introduction + +In MuAgent, it also supports the registration of Tools by Agents. By registering the BaseToolModel class with Python and writing + +- Tool_name +- Tool_description +- ToolInputArgs +- ToolOutputArgs +- run + +and other relevant properties and methods, the quick integration of tools can be achieved. It also supports the direct use of the langchain Tool interface. For example, functions like the aforementioned XXRetrieval can also be registered as a Tool, to be ultimately called by an LLM. + +## BaseTool Structure + +``` +from langchain.agents import Tool +from pydantic import BaseModel, Field +from typing import List, Dict +import json + + +class BaseToolModel: + name = "BaseToolModel" + description = "Tool Description" + + class ToolInputArgs(BaseModel): + """ + Input for MoveFileTool. + Tips: + default control Required, e.g. key1 is not Required/key2 is Required + """ + + key1: str = Field(default=None, description="hello world!") + key2: str = Field(..., description="hello world!!") + + class ToolOutputArgs(BaseModel): + """ + Input for MoveFileTool. + Tips: + default control Required, e.g. key1 is not Required/key2 is Required + """ + + key1: str = Field(default=None, description="hello world!") + key2: str = Field(..., description="hello world!!") + + @classmethod + def run(cls, tool_input_args: ToolInputArgs) -> ToolOutputArgs: + """excute your tool!""" + pass +``` + +## Register Example + +``` +from pydantic import BaseModel, Field +from typing import List, Dict +import requests +from loguru import logger + +class Multiplier(BaseToolModel): + """ + Tips: + default control Required, e.g. key1 is not Required/key2 is Required + """ + + name: str = "Multiplier" + description: str = """useful for when you need to multiply two numbers together. \ + The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. \ + For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" + + class ToolInputArgs(BaseModel): + """Input for Multiplier.""" + + # key: str = Field(..., description="用户在高德地图官网申请web服务API类型KEY") + a: int = Field(..., description="num a") + b: int = Field(..., description="num b") + + class ToolOutputArgs(BaseModel): + """Output for Multiplier.""" + + res: int = Field(..., description="the result of two nums") + + @staticmethod + def run(a, b): + return a * b +``` + +## Use Example + +``` +from langchain.tools import StructuredTool +from muagent.tools import ( + WeatherInfo, Multiplier, toLangchainTools, + TOOL_DICT, TOOL_SETS +) + +# Function exec +tools = [ + StructuredTool( + name=Multiplier.name, + func=Multiplier.run, + description=Multiplier.description, + args_schema=Multiplier.ToolInputArgs, + ), + StructuredTool( + name=WeatherInfo.name, + func=WeatherInfo.run, + description=WeatherInfo.description, + args_schema=WeatherInfo.ToolInputArgs, + ) + ] + +tools = toLangchainTools([TOOL_DICT["Multiplier"]]) + +# tool run Test +print(tools[0].func(1,2)) +``` diff --git a/content/zh/muagent/tools/custom_tool.md b/docs/docs/api-docs/MuAgent/tools/custom_tool.zh-CN.md similarity index 85% rename from content/zh/muagent/tools/custom_tool.md rename to docs/docs/api-docs/MuAgent/tools/custom_tool.zh-CN.md index 7b9be52..be9c2a5 100644 --- a/content/zh/muagent/tools/custom_tool.md +++ b/docs/docs/api-docs/MuAgent/tools/custom_tool.zh-CN.md @@ -1,125 +1,126 @@ ---- -title: 自定义 Tool 接入 -url: "muagent/custom-tool-zh" -aliases: -- "/muagent/custom-tool-zh" ---- - -## 基本介绍 -在MuAgent中也支持Agent完成Tool的注册,通过Python注册模板BaseToolModel类,编写 -- Tool_nam -- Tool_descriptio -- ToolInputArgs -- ToolOutputArgs -- run - -等相关属性和方法即可实现工具的快速接入,同时支持langchain Tool接口的直接使用。 例如像上述 XXRetrieval 的功能也可以注册为Tool,最终由LLM执行调用。 - -## BaseTool 结构 - -``` -from langchain.agents import Tool -from pydantic import BaseModel, Field -from typing import List, Dict -import json - - -class BaseToolModel: - name = "BaseToolModel" - description = "Tool Description" - - class ToolInputArgs(BaseModel): - """ - Input for MoveFileTool. - Tips: - default control Required, e.g. key1 is not Required/key2 is Required - """ - - key1: str = Field(default=None, description="hello world!") - key2: str = Field(..., description="hello world!!") - - class ToolOutputArgs(BaseModel): - """ - Input for MoveFileTool. - Tips: - default control Required, e.g. key1 is not Required/key2 is Required - """ - - key1: str = Field(default=None, description="hello world!") - key2: str = Field(..., description="hello world!!") - - @classmethod - def run(cls, tool_input_args: ToolInputArgs) -> ToolOutputArgs: - """excute your tool!""" - pass -``` - - -## 注册示例 - -``` -from pydantic import BaseModel, Field -from typing import List, Dict -import requests -from loguru import logger - -from .base_tool import BaseToolModel - -class Multiplier(BaseToolModel): - """ - Tips: - default control Required, e.g. key1 is not Required/key2 is Required - """ - - name: str = "Multiplier" - description: str = """useful for when you need to multiply two numbers together. \ - The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. \ - For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" - - class ToolInputArgs(BaseModel): - """Input for Multiplier.""" - - # key: str = Field(..., description="用户在高德地图官网申请web服务API类型KEY") - a: int = Field(..., description="num a") - b: int = Field(..., description="num b") - - class ToolOutputArgs(BaseModel): - """Output for Multiplier.""" - - res: int = Field(..., description="the result of two nums") - - @staticmethod - def run(a, b): - return a * b -``` - - -## 使用示例 -``` -from langchain.tools import StructuredTool -from muagent.tools import ( - WeatherInfo, Multiplier, toLangchainTools, - TOOL_DICT, TOOL_SETS -) - -# 函数执行 -tools = [ - StructuredTool( - name=Multiplier.name, - func=Multiplier.run, - description=Multiplier.description, - args_schema=Multiplier.ToolInputArgs, - ), - StructuredTool( - name=WeatherInfo.name, - func=WeatherInfo.run, - description=WeatherInfo.description, - args_schema=WeatherInfo.ToolInputArgs, - ) - ] - -tools = toLangchainTools([TOOL_DICT["Multiplier"]]) - -# tool run 测试 -print(tools[0].func(1,2)) -``` \ No newline at end of file +--- +group: + title: Tools + order: 2 +title: 自定义 Tool 接入 +order: -1 +toc: content +--- + +## 基本介绍 + +在 MuAgent 中也支持 Agent 完成 Tool 的注册,通过 Python 注册模板 BaseToolModel 类,编写 + +- Tool_nam +- Tool_descriptio +- ToolInputArgs +- ToolOutputArgs +- run + +等相关属性和方法即可实现工具的快速接入,同时支持 langchain Tool 接口的直接使用。 例如像上述 XXRetrieval 的功能也可以注册为 Tool,最终由 LLM 执行调用。 + +## BaseTool 结构 + +``` +from langchain.agents import Tool +from pydantic import BaseModel, Field +from typing import List, Dict +import json + + +class BaseToolModel: + name = "BaseToolModel" + description = "Tool Description" + + class ToolInputArgs(BaseModel): + """ + Input for MoveFileTool. + Tips: + default control Required, e.g. key1 is not Required/key2 is Required + """ + + key1: str = Field(default=None, description="hello world!") + key2: str = Field(..., description="hello world!!") + + class ToolOutputArgs(BaseModel): + """ + Input for MoveFileTool. + Tips: + default control Required, e.g. key1 is not Required/key2 is Required + """ + + key1: str = Field(default=None, description="hello world!") + key2: str = Field(..., description="hello world!!") + + @classmethod + def run(cls, tool_input_args: ToolInputArgs) -> ToolOutputArgs: + """excute your tool!""" + pass +``` + +## 注册示例 + +``` +from pydantic import BaseModel, Field +from typing import List, Dict +import requests +from loguru import logger + +class Multiplier(BaseToolModel): + """ + Tips: + default control Required, e.g. key1 is not Required/key2 is Required + """ + + name: str = "Multiplier" + description: str = """useful for when you need to multiply two numbers together. \ + The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. \ + For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" + + class ToolInputArgs(BaseModel): + """Input for Multiplier.""" + + # key: str = Field(..., description="用户在高德地图官网申请web服务API类型KEY") + a: int = Field(..., description="num a") + b: int = Field(..., description="num b") + + class ToolOutputArgs(BaseModel): + """Output for Multiplier.""" + + res: int = Field(..., description="the result of two nums") + + @staticmethod + def run(a, b): + return a * b +``` + +## 使用示例 + +``` +from langchain.tools import StructuredTool +from muagent.tools import ( + WeatherInfo, Multiplier, toLangchainTools, + TOOL_DICT, TOOL_SETS +) + +# 函数执行 +tools = [ + StructuredTool( + name=Multiplier.name, + func=Multiplier.run, + description=Multiplier.description, + args_schema=Multiplier.ToolInputArgs, + ), + StructuredTool( + name=WeatherInfo.name, + func=WeatherInfo.run, + description=WeatherInfo.description, + args_schema=WeatherInfo.ToolInputArgs, + ) + ] + +tools = toLangchainTools([TOOL_DICT["Multiplier"]]) + +# tool run 测试 +print(tools[0].func(1,2)) +``` diff --git a/docs/docs/chatbot-roadmap-zh/index.html b/docs/docs/chatbot-roadmap-zh/index.html deleted file mode 100644 index 7ad1710..0000000 --- a/docs/docs/chatbot-roadmap-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - - - - - - diff --git a/docs/docs/chatbot-roadmap/index.html b/docs/docs/chatbot-roadmap/index.html deleted file mode 100644 index 432df83..0000000 --- a/docs/docs/chatbot-roadmap/index.html +++ /dev/null @@ -1,879 +0,0 @@ - - - - - - - - -ChatBot-RoadMap · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    ChatBot-RoadMap

    -
    -
    - - -

    - 中文  |  English  -

    -

    RoadMap

    -
    - 图片 -
    -
    -

    Roadmap Overview

    -
      -
    • Sandbox Environment ✅ -
        -
      • Isolated sandbox environment for code execution ✅
      • -
      • File upload and download ✅
      • -
      • Support for Java execution environment ⬜
      • -
      -
    • -
    • Vector Database & Retrieval ✅ -
        -
      • Task retrieval ✅
      • -
      • Tool retrieval ✅
      • -
      -
    • -
    • Prompt Management ✅
    • -
    • Memory Management ✅
    • -
    • Multi Agent Framework ✅ -
        -
      • PRD (Product Requirement Document), system analysis, interface design ⬜
      • -
      • Generate code based on requirement documents, system analysis, and interface design ⬜
      • -
      • Automated testing, automated debugger ⬜
      • -
      • Operations process integration (ToolLearning) ⬜
      • -
      • Fully automated end-to-end process ⬜
      • -
      -
    • -
    • Integration with LLM based on fastchat ✅
    • -
    • Integration with Text Embedding based on sentencebert ✅
    • -
    • Improved vector loading speed ✅
    • -
    • Connector ✅ -
        -
      • React Mode based on langchain ✅
      • -
      • Tool retrieval completed with langchain ✅
      • -
      -
    • -
    • General Capability for Web Crawl ⬜ -
        -
      • Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. ✅
      • -
      • Issue document ⬜
      • -
      • SDK Library Document ⬜
      • -
      -
    • -
    -

    v0.0

    -
      -
    • Sandbox Environment ✅ -
        -
      • Isolated sandbox environment for code execution ✅
      • -
      -
    • -
    • Integration with LLM based on fastchat ✅
    • -
    • Integration with Text Embedding based on sentencebert ✅
    • -
    • General Capability for Web Crawl: Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. ✅
    • -
    -

    Done -

    -

    v0.1

    -
      -
    • Sandbox Environment: File upload and download ✅
    • -
    • Vector Database & Retrieval ✅ -
        -
      • Task retrieval ✅
      • -
      • Tool retrieval ✅
      • -
      -
    • -
    • Connector ✅ -
        -
      • React Mode based on langchain ✅
      • -
      -
    • -
    • Integration with Text Embedding based on sentencebert: Improved vector loading speed ✅
    • -
    -

    Done -

    -

    v0.2

    -
      -
    • Prompt Management ✅
    • -
    • Memory Management ✅
    • -
    • Vector Database & Retrieval ✅
    • -
    -

    Done -

    -

    v0.3

    -
      -
    • Sandbox Environment ✅ -
        -
      • Support for Java execution environment ⬜
      • -
      -
    • -
    • Multi Agent ✅ -
        -
      • PRD (Product Requirement Document), system analysis, interface design ⬜
      • -
      • Generate code based on requirement documents, system analysis, and interface design ⬜
      • -
      • Automated testing, automated debugger ⬜
      • -
      • Operations process integration (ToolLearning) ⬜
      • -
      • Fully automated end-to-end process ⬜
      • -
      -
    • -
    • General Capability for Web Crawl ✅ -
        -
      • Issue document ⬜
      • -
      • SDK Library Document ⬜
      • -
      -
    • -
    -

    DDL: 2024.12.31 -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/docs/chatbot-\346\212\200\346\234\257\350\267\257\347\272\277/index.html" "b/docs/docs/chatbot-\346\212\200\346\234\257\350\267\257\347\272\277/index.html" deleted file mode 100644 index 7ad1710..0000000 --- "a/docs/docs/chatbot-\346\212\200\346\234\257\350\267\257\347\272\277/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - - - - - - diff --git a/docs/docs/chatbot/start-detail/index.html b/docs/docs/chatbot/start-detail/index.html deleted file mode 100644 index 5860121..0000000 --- a/docs/docs/chatbot/start-detail/index.html +++ /dev/null @@ -1,853 +0,0 @@ - - - - - - - - -Start-Detail · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Start-Detail

    -
    -
    - - -

    - 中文  |  English  -

    -

    If you need to deploy a privatized model, please install the NVIDIA driver yourself.

    -

    Preparation of Python environment

    -
      -
    • It is recommended to use conda to manage the python environment (optional)
    • -
    -
    # Prepare conda environment
    -conda create --name Codefusegpt python=3.9
    -conda activate Codefusegpt
    -
      -
    • Install related dependencies
    • -
    -
    cd Codefuse-ChatBot
    -pip install -r requirements.txt
    -

    Sandbox Environment Preparation

    - -
    # Build the image for the sandbox environment, see above for notebook version issues
    -bash docker_build.sh
    -

    Model Download (Optional)

    -

    If you need to use open-source LLM and Embedding models, you can download them from HuggingFace. -Here we take THUDM/chatglm2-6b and text2vec-base-chinese as examples:

    -
    # install git-lfs
    -git lfs install
    -
    -# install LLM-model
    -git lfs clone https://huggingface.co/THUDM/chatglm2-6b
    -cp ~/THUDM/chatglm2-6b ~/codefuse-chatbot/llm_models/
    -
    -# install Embedding-model
    -git lfs clone https://huggingface.co/shibing624/text2vec-base-chinese
    -cp ~/shibing624/text2vec-base-chinese ~/codefuse-chatbot/embedding_models/
    -

    Basic Configuration

    -
    # Modify the basic configuration for service startup
    -cd configs
    -cp model_config.py.example model_config.py
    -cp server_config.py.example server_config.py
    -
    -# model_config#11~12 If you need to use the OpenAI interface, the OpenAI interface key
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -# Replace with the api_base_url you need
    -os.environ["API_BASE_URL"] = "https://api.openai.com/v1"
    -
    -# vi model_config#LLM_MODEL The language model you need to choose
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -
    -# vi model_config#EMBEDDING_MODEL The private vector model you need to choose
    -EMBEDDING_ENGINE = 'model'
    -EMBEDDING_MODEL = "text2vec-base"
    -
    -# Example of vector model access, modify model_config#embedding_model_dict
    -# If the model directory is:
    -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese
    -# Configure as follows
    -"text2vec-base": "shibing624/text2vec-base-chinese"
    -
    -
    -# vi server_config#8~14, It's recommended to use a container to start the service to prevent environment conflicts when installing other dependencies using the codeInterpreter feature
    -DOCKER_SERVICE = True
    -# Whether to use a container sandbox
    -SANDBOX_DO_REMOTE = True
    -

    Starting the Service

    -

    By default, only the webui-related services are started, and fastchat is not started (optional).

    -
    # If you need to support the codellama-34b-int4 model, you need to patch fastchat
    -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# Modify examples/llm_api.py#258 to kwargs={"gptq_wbits": 4},
    -
    -# start llm-service (optional)
    -python examples/llm_api.py
    -

    For more LLM integration methods, seemore details… -

    -
    # After completing the server_config.py configuration, you can start with one click
    -cd examples
    -python start.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-chatbot-quickstart-zh/index.html b/docs/docs/codefuse-chatbot-quickstart-zh/index.html deleted file mode 100644 index 143cda2..0000000 --- a/docs/docs/codefuse-chatbot-quickstart-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-chatbot-quickstart-zh/ - - - - - - diff --git a/docs/docs/codefuse-chatbot-quickstart/index.html b/docs/docs/codefuse-chatbot-quickstart/index.html deleted file mode 100644 index abb89d7..0000000 --- a/docs/docs/codefuse-chatbot-quickstart/index.html +++ /dev/null @@ -1,807 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    - 中文  |  English  -

    -

    🚀 Quick Start

    -

    To deploy private models, please install the NVIDIA driver by yourself. -This project has been tested on Python 3.9.18 and CUDA 11.7 environments, as well as on Windows and macOS systems with x86 architecture. -For Docker installation, private LLM access, and related startup issues, see: Start-detail…

    -

    Preparation of Python environment

    -
      -
    • It is recommended to use conda to manage the python environment (optional)
    • -
    -
    # Prepare conda environment
    -conda create --name Codefusegpt python=3.9
    -conda activate Codefusegpt
    -
      -
    • Install related dependencies
    • -
    -
    cd Codefuse-ChatBot
    -pip install -r requirements.txt
    -

    Basic Configuration

    -
    # Modify the basic configuration for service startup
    -cd configs
    -cp model_config.py.example model_config.py
    -cp server_config.py.example server_config.py
    -
    -# model_config#11~12 If you need to use the OpenAI interface, the OpenAI interface key
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -# Replace with the api_base_url you need
    -os.environ["API_BASE_URL"] = "https://api.openai.com/v1"
    -
    -# vi model_config#LLM_MODEL The language model you need to choose
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -
    -# vi model_config#EMBEDDING_MODEL The private vector model you need to choose
    -EMBEDDING_ENGINE = 'model'
    -EMBEDDING_MODEL = "text2vec-base"
    -
    -# Example of vector model access, modify model_config#embedding_model_dict
    -# If the model directory is:
    -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese
    -# Configure as follows
    -"text2vec-base": "shibing624/text2vec-base-chinese"
    -
    -
    -# vi server_config#8~14, It's recommended to use a container to start the service to prevent environment conflicts when installing other dependencies using the codeInterpreter feature
    -DOCKER_SERVICE = True
    -# Whether to use a container sandbox
    -SANDBOX_DO_REMOTE = True
    -

    Start the Service

    -

    By default, only webui related services are started, and fastchat is not started (optional).

    -
    # If you need to support the codellama-34b-int4 model, you need to patch fastchat
    -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# Modify examples/llm_api.py#258 to kwargs={"gptq_wbits": 4},
    -
    -# Start llm-service (optional)
    -python examples/llm_api.py
    -

    For more LLM access methods, see Details…

    -
    # After completing the server_config.py configuration, you can start with one click
    -cd examples
    -python start.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-chatbot-zh/index.html b/docs/docs/codefuse-chatbot-zh/index.html deleted file mode 100644 index 2b45206..0000000 --- a/docs/docs/codefuse-chatbot-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/codefuse-chatbot-zh/ - - - - - - diff --git a/docs/docs/codefuse-chatbot/index.html b/docs/docs/codefuse-chatbot/index.html deleted file mode 100644 index 70daf5a..0000000 --- a/docs/docs/codefuse-chatbot/index.html +++ /dev/null @@ -1,631 +0,0 @@ - - - - - - - - -Codefuse-ChatBot Development by Private Knowledge Augmentation · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Codefuse-ChatBot Development by Private Knowledge Augmentation

    -
    -
    - - -

    - 中文  |  English  -

    -

    This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface.

    -

    📜 Contents

    - -

    🤝 Introduction

    -

    💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. It transitions gradually from the traditional development and operations mode of querying information from various sources and operating on standalone, disparate platforms to an intelligent development and operations mode based on large-model Q&A, changing people’s development and operations habits.

    -
      -
    • 🧠 Intelligent Scheduling Core: Constructed a well-integrated scheduling core system that supports multi-mode one-click configuration, simplifying the operational process.Use Introduction
    • -
    • 💻 Comprehensive Code Repository Analysis: Achieved in-depth understanding at the repository level and coding and generation at the project file level, enhancing development efficiency.
    • -
    • 📄 Enhanced Document Analysis: Integrated document knowledge bases with knowledge graphs, providing deeper support for document analysis through enhanced retrieval and reasoning.
    • -
    • 🔧 Industry-Specific Knowledge: Tailored a specialized knowledge base for the DevOps domain, supporting the self-service one-click construction of industry-specific knowledge bases for convenience and practicality.
    • -
    • 🤖 Compatible Models for Specific Verticals: Designed small models specifically for the DevOps field, ensuring compatibility with related DevOps platforms and promoting the integration of the technological ecosystem.
    • -
    -

    🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.Access Demo

    -

    👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the CodefuseGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of “Making Development Seamless for Everyone.”

    -
    - Image -
    -

    🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.

    -

    👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the DevOpsGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of “Making Development Seamless for Everyone.”

    -

    🧭 Technical Route

    -
    - Image -
    -
      -
    • 🧠 Multi-Agent Schedule Core: Easily configurable to create interactive intelligent agents.
    • -
    • 🕷️ Multi Source Web Crawl: Offers the capability to crawl specified URLs for collecting the required information.
    • -
    • 🗂️ Data Processor: Effortlessly handles document loading, data cleansing, and text segmentation, integrating data from different sources.
    • -
    • 🔤 Text Embedding & Index::Users can easily upload files for document retrieval, optimizing the document analysis process.
    • -
    • 🗄️ Vector Database & Graph Database: Provides flexible and powerful data management solutions.
    • -
    • 📝 Prompt Control & Management::Precisely defines the contextual environment for intelligent agents.
    • -
    • 🚧 SandBox::Safely executes code compilation and actions.
    • -
    • 💬 LLM::Supports various open-source models and LLM interfaces.
    • -
    • 🛠️ API Management:: Enables rapid integration of open-source components and operational platforms.
    • -
    -

    For implementation details, see: Technical Route Details

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-eval-quickstart-zh/index.html b/docs/docs/codefuse-devops-eval-quickstart-zh/index.html deleted file mode 100644 index 1de51fe..0000000 --- a/docs/docs/codefuse-devops-eval-quickstart-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-devops-eval-quickstart-zh/ - - - - - - diff --git a/docs/docs/codefuse-devops-eval-quickstart/index.html b/docs/docs/codefuse-devops-eval-quickstart/index.html deleted file mode 100644 index 314bbdc..0000000 --- a/docs/docs/codefuse-devops-eval-quickstart/index.html +++ /dev/null @@ -1,854 +0,0 @@ - - - - - - - - -Evaluate · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Evaluate

    -
    -
    - - -

    🚀 How to Evaluate

    -

    If you need to test your own huggingface-formatted model, the overall steps are as follows:

    -
      -
    1. Write the loader function for the model.
    2. -
    3. Write the context_builder function for the model.
    4. -
    5. Register the model in the configuration file.
    6. -
    7. Run the testing script. -If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing.
    8. -
    -

    1. Write the loader function

    -

    If the model requires additional processing after loading (e.g. adjusting the tokenizer), you need to inherit the ModelAndTokenizerLoader class in src.context_builder.context_builder_family.py and override the corresponding load_model and load_tokenizer functions. You can refer to the following example:

    -
    class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader):
    -    def __init__(self):
    -        super().__init__()
    -        pass
    -    
    -    @override
    -    def load_model(self, model_path: str):
    -    # Implementation of the method
    -        pass
    -    
    -    @override
    -    def load_tokenizer(self, model_path: str):
    -    # Implementation of the method
    -        pass
    -

    2. Write the context_builder function for the Model

    -

    If the input needs to be converted to a specific format (e.g. chatml format or other human-bot formats), you need to inherit the ContextBuilder class in src.context_builder.context_builder_family and override the make_context function. This function is used to convert the input to the corresponding required format. An example is shown below:

    -
    class QwenChatContextBuilder(ContextBuilder):
    -    def __init__(self):
    -        super().__init__()
    -        
    -    @override
    -    def make_context(self, model, tokenizer, query: str, system: str = "hello!"):
    -    # Implementation of the method
    -        pass
    -

    3. Register the model in the configuration file

    -

    Go to the model_conf.json file in the conf directory and register the corresponding model name and the loader and context_builder that will be used for this model. Simply write the class names defined in the first and second steps for the loader and context_builder. Here is an example:

    -
    {
    -  "Qwen-Chat": {
    -  "loader": "QwenModelAndTokenizerLoader",
    -  "context_builder": "QwenChatContextBuilder"
    -  }
    -}
    -

    4. Execute the testing script

    -

    Run the following code to initiate the test:

    -
    python src/run_eval.py \
    ---model_path path_to_model \
    ---model_name model_name_in_conf \
    ---model_conf_path path_to_model_conf \
    ---eval_dataset_list all \
    ---eval_dataset_fp_conf_path path_to_dataset_conf \
    ---eval_dataset_type test \
    ---data_path path_to_downloaded_devops_eval_data \
    ---k_shot 0
    -

    👀 👀 The specific evaluation process is as follows 📖 Evaluate Tutorial

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-eval-zh/index.html b/docs/docs/codefuse-devops-eval-zh/index.html deleted file mode 100644 index 36801f3..0000000 --- a/docs/docs/codefuse-devops-eval-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/codefuse-devops-eval-zh/ - - - - - - diff --git a/docs/docs/codefuse-devops-eval/index.html b/docs/docs/codefuse-devops-eval/index.html deleted file mode 100644 index f9ea843..0000000 --- a/docs/docs/codefuse-devops-eval/index.html +++ /dev/null @@ -1,593 +0,0 @@ - - - - - - - - -codefuse-devops-eval · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    codefuse-devops-eval

    -
    -
    - - -

    Comming soon

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-model-quickstart-zh/index.html b/docs/docs/codefuse-devops-model-quickstart-zh/index.html deleted file mode 100644 index 7589704..0000000 --- a/docs/docs/codefuse-devops-model-quickstart-zh/index.html +++ /dev/null @@ -1,848 +0,0 @@ - - - - - - - - -快速使用 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速使用

    -
    -
    - - -

    依赖安装

    -

    需要先 PIP 安装一下 Github 地址下的 requirement.txt 中的包,可以参考一下代码 -pip install -r requirements.txt

    -

    模型下载

    -

    模型下载相关信息如下: -🤗 Huggingface 地址

    - - - - - - - - - - - - - - - - - - - - -
    -基座模型对齐模型
    7BDevOps-Model-7B-BaseDevOps-Model-7B-Chat
    14BDevOps-Model-14B-BaseDevOps-Model-14B-Chat
    -

    🤖 ModelScope 地址

    - - - - - - - - - - - - - - - - - - - - -
    -基座模型对齐模型
    7BDevOps-Model-7B-BaseDevOps-Model-7B-Chat
    14BDevOps-Model-14B-BaseDevOps-Model-14B-Chat
    -

    找到自己想要下载的 Chat 模型版本,当前提供了 7B 和 14B 的模型

    -

    模型使用

    -

    根据以下代码来和 Chat 模型进行交互

    -
    from transformers import AutoModelForCausalLM, AutoTokenizer
    -from transformers.generation import GenerationConfig
    -
    -tokenizer = AutoTokenizer.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True)
    -
    -model = AutoModelForCausalLM.from_pretrained("path_to_DevOps-Model-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
    -
    -# 指定 generation_config
    -model.generation_config = GenerationConfig.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True)
    -
    -# 第一轮对话
    -resp, hist = model.chat(query='你是谁', tokenizer=tokenizer, history=None)
    -print(resp)
    -# 我是 DevOps-Model,一个由蚂蚁集团平台技术事业群风险智能团队和北京大学联合研发的人工智能机器人,可以与用户进行自然语言交互,并协助解答 DevOps 全生命周期中的各种问题。如果您有任何需要协助的问题或者想要进行闲聊,都可以和我交流哦。
    -
    -# 第二轮对话
    -resp2, hist2 = model.chat(query='Java 中 HashMap 和 Hashtable 有什么区别', tokenizer=tokenizer, history=hist)
    -print(resp2)
    -# HashMap 和 Hashtable 都是 Java 中常用的哈希表实现,它们的主要区别在于:
    -# 1. Hashtable 是线程安全的,而 HashMap 不是线程安全的,因此在多线程环境下,Hashtable 的性能更稳定。
    -# 2. Hashtable 中的方法都是同步的,而 HashMap 的方法不是同步的,因此在多线程环境下,Hashtable 的性能更好。
    -# 3. Hashtable 中的 key 和 value 都必须实现 Serializable 接口,而 HashMap 中的 key 和 value 可以是任何对象,包括基本数据类型。
    -# 4. Hashtable 的初始容量是 11,而 HashMap 的初始容量是 16。
    -# 总之,如果需要在多线程环境下使用哈希表,并且需要保证线程安全,那么应该使用 Hashtable;如果不需要考虑线程安全,或者需要快速地进行哈希表操作,那么应该使用 HashMap。
    -
    -# 第三轮对话
    -resp3, hist3 = model.chat(query='线程安全代表什么', tokenizer=tokenizer, history=hist2)
    -print(resp3)
    -# 线程安全是指在多线程环境下,程序能够正确地处理并发访问,并且不会出现数据竞争、死锁、饥饿等异常情况。线程安全的程序可以保证在不同的线程之间共享同一个数据结构时,数据的正确性和一致性。线程安全的实现通常需要使用同步机制,如锁、原子操作等,来保证对共享数据的访问是线程安全的。在 Java 中,可以通过 synchronized 关键字、Lock 接口等机制来实现线程安全。
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-model-quickstart/index.html b/docs/docs/codefuse-devops-model-quickstart/index.html deleted file mode 100644 index 0e9fbae..0000000 --- a/docs/docs/codefuse-devops-model-quickstart/index.html +++ /dev/null @@ -1,863 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    Dependency Installation

    -

    Please install the packages listed in the requirements.txt file from the GitHub address first. You can refer to the following code:

    -
    pip install -r requirements.txt
    -

    Model Download

    -

    Model download information is as follows:

    -

    🤗 Huggingface Address

    - - - - - - - - - - - - - - - - - - - - -
    -Base ModelAligned Model
    7BDevOps-Model-7B-BaseDevOps-Model-7B-Chat
    14BDevOps-Model-14B-BaseDevOps-Model-14B-Chat
    -

    🤖 ModelScope Address

    - - - - - - - - - - - - - - - - - - - - -
    -Base ModelAligned Model
    7BDevOps-Model-7B-BaseDevOps-Model-7B-Chat
    14BDevOps-Model-14B-BaseDevOps-Model-14B-Chat
    -

    Find the version of the Chat model you want to download; currently, 7B and 14B models are provided.

    -

    Model Usage

    -

    Interact with the Chat model using the following code:

    -
    from transformers import AutoModelForCausalLM, AutoTokenizer
    -from transformers.generation import GenerationConfig
    -
    -tokenizer = AutoTokenizer.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True)
    -
    -model = AutoModelForCausalLM.from_pretrained("path_to_DevOps-Model-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
    -
    -# 指定 generation_config
    -model.generation_config = GenerationConfig.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True)
    -
    -# First round of conversation
    -resp, hist = model.chat(query='你是谁', tokenizer=tokenizer, history=None)
    -print(resp)
    -# 我是 DevOps-Model,一个由蚂蚁集团平台技术事业群风险智能团队和北京大学联合研发的人工智能机器人,可以与用户进行自然语言交互,并协助解答 DevOps 全生命周期中的各种问题。如果您有任何需要协助的问题或者想要进行闲聊,都可以和我交流哦。
    -
    -# Second round of conversation
    -resp2, hist2 = model.chat(query='Java 中 HashMap 和 Hashtable 有什么区别', tokenizer=tokenizer, history=hist)
    -print(resp2)
    -# HashMap 和 Hashtable 都是 Java 中常用的哈希表实现,它们的主要区别在于:
    -# 1. Hashtable 是线程安全的,而 HashMap 不是线程安全的,因此在多线程环境下,Hashtable 的性能更稳定。
    -# 2. Hashtable 中的方法都是同步的,而 HashMap 的方法不是同步的,因此在多线程环境下,Hashtable 的性能更好。
    -# 3. Hashtable 中的 key 和 value 都必须实现 Serializable 接口,而 HashMap 中的 key 和 value 可以是任何对象,包括基本数据类型。
    -# 4. Hashtable 的初始容量是 11,而 HashMap 的初始容量是 16。
    -# 总之,如果需要在多线程环境下使用哈希表,并且需要保证线程安全,那么应该使用 Hashtable;如果不需要考虑线程安全,或者需要快速地进行哈希表操作,那么应该使用 HashMap。
    -
    -# Third round of conversation
    -resp3, hist3 = model.chat(query='线程安全代表什么', tokenizer=tokenizer, history=hist2)
    -print(resp3)
    -# 线程安全是指在多线程环境下,程序能够正确地处理并发访问,并且不会出现数据竞争、死锁、饥饿等异常情况。线程安全的程序可以保证在不同的线程之间共享同一个数据结构时,数据的正确性和一致性。线程安全的实现通常需要使用同步机制,如锁、原子操作等,来保证对共享数据的访问是线程安全的。在 Java 中,可以通过 synchronized 关键字、Lock 接口等机制来实现线程安全。
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-model-train-zh/index.html b/docs/docs/codefuse-devops-model-train-zh/index.html deleted file mode 100644 index fb41d4f..0000000 --- a/docs/docs/codefuse-devops-model-train-zh/index.html +++ /dev/null @@ -1,809 +0,0 @@ - - - - - - - - -训练解析 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    训练解析

    -
    -
    - - -

    训练流程

    -

    根据查阅文献可知,大部分领域模型都是在对话模型的基础上,通过SFT微调来进行知识注入。而SFT微调所需要QA预料基本都来自于ChatGPT生成。然而,该方案可能存在QA语料无法完全覆盖领域知识的情况。 -因此,DevOps-Model采用的是预训练加训 + SFT微调的方案,如图2.1所示。我们认为针对领域大模型,预训练的加训是必要的,因为其可以将领域内的一些知识在预训练阶段注入到大模型,如果这些知识在通用大模型预训练时没有出现过,那会让大模型学习到新的知识;如果出现过,就可以让大模型进一步加深印象。第二步则是大模型对齐,目的是让大模型可以根据问题来回答最合适的内容。

    -

    -

    -

    训练数据

    -

    数据收集

    -

    模型的定位是中文 DevOps 领域大模型,因此收集与中文DevOps相关的预训练数据和QA数据。

    -
      -
    • 预训练数据主要来自互联网技术博客、技术文档、技术书籍等,最终收集到了 50G+ 的预训练语料数据;
    • -
    • 针对 QA 数据,我们的目的是想让模型不但对齐到通用的问答能力,而且针对 DevOps 领域也可以学会如何更好的回答问题,因此不但收集了通用领域的单轮和多轮对话数据,还针对 DevOps 领域,通过爬取和 ChatGPT 生成的方式产出了属于 DevOps 领域的问答数据。最终我们精心筛选了约 200K 的 QA 数据进行 SFT微调训练,具体数据量如下表所示。
    • -
    - - - - - - - - - - - - - - - - - - - - - -
    数据类型数据量级
    通用单轮 QA50K
    通用多轮 QA20K
    DevOps 领域 QA130K
    -

    数据筛选

    -

    -

    -

    由于预训练数据大部分是从互联网上收集的数据,质量会参差不齐,而大模型训练中数据是最重要的一环,我们建立了如上图所示的清洗 Pipeline,来针对收集到的数据进行质量的全面过滤。

    -
      -
    1. 首先,由专家经验和人工筛选,总结出来了一批文档级别的 Heuristic 过滤规则,这一步主要用来过滤掉那些质量非常差的文档;
    2. -
    3. 然后,即便是一篇质量稍差的文章中,也有可能还是含有一些有价值的领域知识,我们也需要尽可能的进行收集。此处,我们对文章进行段落拆分,将文章拆分成一个个段落;
    4. -
    5. 然后,我们将拆分后的段落会再次通过步骤1进行过滤,便得到了一批经过规则过滤后的段落;
    6. -
    7. 然后,我们摘取了其中 1000 个段落,由经验丰富的专业开发人员来进行打标,获得高质量的打标数据;
    8. -
    9. 最后,我们根据打标后的结果来训练了一个打分模型来针对段落进行质量的打分,段落的向量模型选用了预训练好的中文版本的 Sentence-Bert,打分算法选用了逻辑回归,为了避免打分模型的误差,会再通过帕累托分布来根据段落的质量打分进行采样来决定要不要过滤这个段落。 -经过这个 Pipeline 后,我们最终沉淀下 15G 左右的数据来进行大模型的预训练加训。
    10. -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-model-train/index.html b/docs/docs/codefuse-devops-model-train/index.html deleted file mode 100644 index bc0d9c4..0000000 --- a/docs/docs/codefuse-devops-model-train/index.html +++ /dev/null @@ -1,820 +0,0 @@ - - - - - - - - -Train Detail · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Train Detail

    -
    -
    - - -

    Training Process

    -

    According to the literature review, it is known that most domain models are based on conversational models and undergo knowledge infusion through Supervised Fine-Tuning (SFT). However, the QA corpus required for SFT fine-tuning largely comes from ChatGPT generation, which may not fully cover domain knowledge.

    -

    Therefore, the DevOps-Model adopts a pre-training plus training followed by SFT fine-tuning approach, as illustrated in Figure 2.1. We believe that for large domain models, additional pre-training is necessary. This can inject some domain knowledge into the large model during the pre-training phase. If this knowledge has not been covered during the general large model’s pre-training, it will allow the large model to learn new information; if it has been covered, it will further reinforce the model’s knowledge. The second step is model alignment, aiming to enable the large model to provide the most appropriate content in response to questions.

    -

    -

    -

    Training Data

    -

    Data Collection

    -

    The model is positioned as a large Chinese DevOps domain model, so we collect pre-training and QA data related to Chinese DevOps.

    -

    The pre-training data mainly comes from the internet, including technical blogs, documentation, and books, amounting to over 50GB of pre-training corpus data. -For the QA data, our goal is not only to align the model with general Q&A capabilities but also to learn how to answer questions better in the DevOps domain. Therefore, we collected both general single-turn and multi-turn dialogue data and generated domain-specific QA data for the DevOps field through crawling and using ChatGPT. Ultimately, we carefully selected around 200K pieces of QA data for SFT fine-tuning training, as shown in the table below.

    - - - - - - - - - - - - - - - - - - - - - -
    Data TypeVolume
    General Single-turn QA50K
    General Multi-turn QA20K
    DevOps Domain QA130K
    -

    Data Selection

    -

    -

    -

    Since most of the pre-training data is collected from the internet, the quality can be uneven. As data is the most crucial component in large model training, we established a cleaning Pipeline as shown above to thoroughly filter the quality of the collected data.

    -

    First, experts and manual screening have summarized a set of heuristic filtering rules at the document level, primarily to filter out those documents of very poor quality. -Then, even within an article of slightly lower quality, there may still be some valuable domain knowledge, which we need to collect as much as possible. Here, we split the article into paragraphs. -Next, the split paragraphs are filtered again using the rules from step 1, yielding a batch of paragraphs that have passed rule-based filtering. -We then picked out 1000 paragraphs for labeling by experienced professional developers to obtain high-quality labeled data. -Finally, we trained a scoring model based on the labeling results to score the quality of paragraphs. The vector model for paragraphs was the pre-trained Chinese version of Sentence-Bert, and the scoring algorithm was logistic regression. To avoid errors in the scoring model, we used the Pareto distribution to decide whether to filter a paragraph based on its quality score. -After this Pipeline, we finally settled on approximately 15GB of data for the pre-training plus training of the large model.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops-model-zh/index.html b/docs/docs/codefuse-devops-model-zh/index.html deleted file mode 100644 index 9c496e8..0000000 --- a/docs/docs/codefuse-devops-model-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/codefuse-devops-model-zh/ - - - - - - diff --git a/docs/docs/codefuse-devops-model/index.html b/docs/docs/codefuse-devops-model/index.html deleted file mode 100644 index f6ab8bc..0000000 --- a/docs/docs/codefuse-devops-model/index.html +++ /dev/null @@ -1,601 +0,0 @@ - - - - - - - - -codefuse-devops-model · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    codefuse-devops-model

    -
    -
    - - -

    Comming soon

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-devops/index.html b/docs/docs/codefuse-devops/index.html deleted file mode 100644 index bb34439..0000000 --- a/docs/docs/codefuse-devops/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-devops/ - - - - - - diff --git a/docs/docs/codefuse-evalution-quickstart-zh/index.html b/docs/docs/codefuse-evalution-quickstart-zh/index.html deleted file mode 100644 index 3cbe837..0000000 --- a/docs/docs/codefuse-evalution-quickstart-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-evalution-quickstart-zh/ - - - - - - diff --git a/docs/docs/codefuse-evalution-quickstart/index.html b/docs/docs/codefuse-evalution-quickstart/index.html deleted file mode 100644 index 4ba6474..0000000 --- a/docs/docs/codefuse-evalution-quickstart/index.html +++ /dev/null @@ -1,1052 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    Generation environment:

    -

    CodeFuse-13B: Python 3.8 or above,PyTorch 1.12 or above, with a recommendation for 2.0 or above, Transformers 4.24.0 or above ,CUDA 11.4 or above (for GPU users and flash-attention users, this option should be considered).

    -

    CodeFuse-CodeLlama-34B:python>=3.8,pytorch>=2.0.0,transformers==4.32.0,Sentencepiece,CUDA 11.

    -

    Evaluation Environment

    -

    The evaluation of the generated codes involves compiling and running in multiple programming languages. The versions of the programming language environments and packages we use are as follows:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    DependencyVersion
    Python3.10.9
    JDK18.0.2.1
    Node.js16.14.0
    js-md50.7.3
    C++11
    g++7.5.0
    Boost1.75.0
    OpenSSL3.0.0
    go1.18.4
    cargo1.71.1
    -

    In order to save everyone the trouble of setting up the environments for these languages, we create a Docker image with the required environments and codefuseEval.

    -
    docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest
    -

    If you are familiar with docker, you can build the image from codefuseEval/docker/Dockerfile or configure the Dockerfile as you like it:

    -
    cd codefuseEval/docker
    -docker build [OPTIONS] .
    -

    After obtaining the image, you can build a container using the following command:

    -
    docker run -it --gpus all --mount type=bind,source=<LOCAL PATH>,target=<PATH IN CONTAINER> [OPTIONS] <IMAGE NAME:TAG>
    -

    Check result Command:

    -

    We provide the script to check the result for provided code LLMs. Please use following scripts to check corresponding results and the environment .

    -
    bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python
    -bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-13B/humaneval_result_python.jsonl humaneval_python 
    -

    How to use CodeFuseEval

    -
      -
    1. Download the model and update current model infomation in ckpt_config.json. Mainly update 「path」parameter in corresponding model and version.
    2. -
    3. Run following generation comand to generate result.
    4. -
    -
    bash codefuseEval/script/generation.sh MODELNAME MODELVERSION EVALDATASET OUTFILE 
    -
    -eg:
    -bash codefuseEval/script/generation.sh CodeFuse-13B v1 humaneval_python result/test.jsonl
    -
      -
    1. Run following evaluation command to evaluate the generated result for corresponding model and version.
    2. -
    -
    bash codefuseEval/script/evaluation.sh <RESULT_FILE> <METRIC> <PROBLEM_FILE>
    -eg: 
    -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python
    -

    Evaluation

    -

    We recommend evaluating in the provided image. To evaluate the generated samples, save generated codes in the following JSON list format:

    -
    {"task_id": "../..", "generation: "..."}
    -{"task_id": "../..", "generation: "..."}
    -...
    -

    and evaluate them using the following script under the root directory of the repository (please execute with caution, the generated codes might have unexpected behaviours though with very low possibility. See the warnings in execution.py and uncomment the execution lines at your own risk):

    -

    Evaluation Data

    -

    Data are stored in codefuseEval/data, using JSON list format. We first integrated humaneval-X dataset.

    -
      -
    • task_id: indicates the target language and ID of the problem. Language is one of [“Python”, “Java”, “JavaScript”, “CPP”, “Go”].
    • -
    • prompt: the function declaration and docstring, used for code generation.
    • -
    • declaration: only the function declaration, used for code translation.
    • -
    • canonical_solution: human-crafted example solutions.
    • -
    • test: hidden test samples, used for evaluation
    • -
    • example_test: public test samples (appeared in prompt), used for evaluation.
    • -
    • prompt_text: prompt text
    • -
    • prompt_explain: prompt explanation
    • -
    • func_title: code function title
    • -
    • prompt_text_chinese: Chinese prompt
    • -
    -

    Evaluation Metrics

    -

    In addition to the unbiased pass@k indicators currently provided in Codex, we will also integrate the relevant indicators of huggingface open source with CodeBLEU for integration. -The main indicators currently recommended for users are as follows:

    -
      -
    • codebleu
    • -
    • pass@k
    • -
    • bleu
    • -
    • bleurt
    • -
    -

    For other related metrics, you can check the code of the metric or the evaluation code to meet your requirements.

    -

    At the same time, we supplemented the indicators of the total and average generation time of the model for the dataset total_time_cost and Average time cost

    -

    Output during each generation, making it convenient for users to measure the generation performance of the model in the same environment. This indicator is passive output, and it will be output every time it is generated.

    -

    Evaluation Command:

    -
    bash codefuseEval/script/evaluation.sh <RESULT_FILE> <METRIC> <PROBLEM_FILE> <TEST_GROUDTRUTH>
    -eg: 
    -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python
    -

    At the same time, we currently provide the following flags, which can directly bring the sample answers in the test data set as generated answers for testing.

    -
      -
    • TEST_GROUDTRUTH default False
    • -
    -

    When TEST_GROUDTRUTH is True, the self-test mode is turned on, PROBLEM_FILE will be read, and the sample answer will be substituted as the generated answer for testing.

    -

    When TEST_GROUDTRUTH is False, open the evaluation mode, read RESULT_FILE and PROBLEM_FILE, and substitute the generated answer for testing.

    -

    More Infomation

    -

    Evaluation self model and dataset

    -
      -
    1. Registry your evaluate dataset.
    2. -
    -
      -
    • Download evaluation dataset to store in codefuseEval/data or other directory. Dataset must be jsonl.
    • -
    • Setup information dataset EVAL_DATASET,DATASET_SUPPORT and DATASET_LANGUAGE in codefuseEval/util.py for dataset path, dataset task_mode and generation code language
    • -
    -
      -
    1. Registry your evaluate model.
    2. -
    -
      -
    • Download evaluation model to store in codefuseEval/model or other directory.
    • -
    • Write your evaluation model processor code in codefuseEval/processor package.
    • -
    -

    We designed an infrastructure called Processor. Its main purpose is to handle the differences between different models. It mainly needs to complete three abstract functions:

    -
      -
    • load_model_tokenizer:Due to differences in model loading parameters and tokenizer terminators, models need to use different parameters for adaptation and loading. The current function is mainly to help users load and adapt different models.
    • -
    • process_before: Since prompt adapts to different prompt styles according to different types of evaluation tasks or different models selected by users, the 「process_before」function is extracted mainly to help users process prompts.
    • -
    • process_after:Due to the diversity of model generation results, in order to adapt to the evaluation framework, the generated result data can be spliced into appropriate use cases for automated operation. The current function mainly processes the generated results to adapt to the evaluation data set and results based on the task type and data set conditions.
    • -
    -

    You can extend the BaseProcessor in codefuseEval/processor/base.py and implement above functions

    -
      -
    • Setup information model in ckpt_config.json. For Example as follow
    • -
    -
    {
    -  "CodeFuse-13B": {     //model name
    -    "v1": {             //model version
    -      "path": "/mnt/model/CodeFuse13B-evol-instruction-4K/",       // model path
    -      "processor_class": "codefuseEval.process.codefuse13b.Codefuse13BProcessor",  // model processor
    -      "tokenizer": {                 // tokenizer params to token input string.
    -        "truncation": true,
    -        "padding": true,
    -        "max_length": 600
    -      },
    -      "generation_config": {        //generation config params. 
    -        "greedy": {                 //If JsonObject, it is a decode mode, you can set 「decode_mode」param to load params defined in the decode_mode.
    -          "do_sample": false,
    -          "num_beams": 1,
    -          "max_new_tokens": 512
    -        },
    -        "beams": {
    -          "do_sample": false,
    -          "num_beams": 5,
    -          "max_new_tokens": 600,
    -          "num_return_sequences": 1
    -        },
    -        "dosample": {
    -          "da_sample": true
    -        },
    -        "temperature": 0.2,          //If not JsonObject, it is a default param, we will set in generation_config default. You can cover param in decode_mode same name param.
    -        "max_new_tokens": 600,
    -        "num_return_sequences": 1,
    -        "top_p": 0.9,
    -        "num_beams": 1,
    -        "do_sample": true         
    -      },
    -      "batch_size": 1,            // batch size for generate
    -      "sample_num": 1,            // The number of samples generated by a single piece of data
    -      "decode_mode": "beams"      // choose decode mode defined in generation_config
    -    }
    -  }
    -

    Check dataset Command:

    -

    To check whether the reference values provided by the evaluation data set are correct, -we provide the following command to check the dataset.

    -

    CodeCompletion

    -
    bash codefuseEval/script/check_dataset.sh humaneval_python
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_java
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_js
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_rust
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_go
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_cpp
    -

    NL2Code

    -
    bash codefuseEval/script/check_dataset.sh mbpp
    -

    CodeTrans

    -
    bash codefuseEval/script/check_dataset.sh codeTrans_python_to_java
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_python_to_cpp
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_java
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_python
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_python
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_cpp
    -

    CodeScience

    -
    bash codefuseEval/script/check_dataset.sh codeCompletion_matplotlib
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_numpy
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_pandas
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_pytorch
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_scipy
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_sklearn
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_tensorflow
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_matplotlib
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_numpy
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_pandas
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_pytorch
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_scipy
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_sklearn
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_tensorflow
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-evalution-zh/index.html b/docs/docs/codefuse-evalution-zh/index.html deleted file mode 100644 index c1f85b4..0000000 --- a/docs/docs/codefuse-evalution-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/b10.codefuse-evalution/ - - - - - - diff --git a/docs/docs/codefuse-evalution/index.html b/docs/docs/codefuse-evalution/index.html deleted file mode 100644 index 3c8d1f1..0000000 --- a/docs/docs/codefuse-evalution/index.html +++ /dev/null @@ -1,620 +0,0 @@ - - - - - - - - -CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model

    -
    -
    - - -

    CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model

    - -

    CodeFuseEval is a Code Generation benchmark that combines the multi-tasking scenarios of CodeFuse Model with the benchmarks of HumanEval-x and MBPP. This benchmark is designed to evaluate the performance of models in various multi-tasking tasks, including code completion, code generation from natural language, test case generation, cross-language code translation, and code generation from Chinese commands, among others.Continuously open, stay tuned !

    -

    - English Introduction -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-mft-vlm-quickstart-zh/index.html b/docs/docs/codefuse-mft-vlm-quickstart-zh/index.html deleted file mode 100644 index 8cb8fdd..0000000 --- a/docs/docs/codefuse-mft-vlm-quickstart-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-mft-vlm/%E5%BF%AB%E9%80%9F%E4%BD%BF%E7%94%A8/ - - - - - - diff --git a/docs/docs/codefuse-mft-vlm-quickstart/index.html b/docs/docs/codefuse-mft-vlm-quickstart/index.html deleted file mode 100644 index d96355b..0000000 --- a/docs/docs/codefuse-mft-vlm-quickstart/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/codefuse-mft-vlm/quickstart/ - - - - - - diff --git a/docs/docs/codefuse-mft-vlm-zh/index.html b/docs/docs/codefuse-mft-vlm-zh/index.html deleted file mode 100644 index 7a130e5..0000000 --- a/docs/docs/codefuse-mft-vlm-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/codefuse-mft-vlm/ - - - - - - diff --git a/docs/docs/codefuse-mft-vlm/index.html b/docs/docs/codefuse-mft-vlm/index.html deleted file mode 100644 index 54f0abc..0000000 --- a/docs/docs/codefuse-mft-vlm/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/overview/codefuse-mft-vlm/ - - - - - - diff --git a/docs/docs/codefuse-mft-vlm/quickstart/index.html b/docs/docs/codefuse-mft-vlm/quickstart/index.html deleted file mode 100644 index 7c3367f..0000000 --- a/docs/docs/codefuse-mft-vlm/quickstart/index.html +++ /dev/null @@ -1,890 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    Contents

    - -

    Install

    -

    Please run sh init_env.sh

    -

    Datasets

    -

    Here’s the table of datasets we used to train CodeFuse-VLM-14B:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    DatasetTask TypeNumber of Samples
    synthdog-enOCR800,000
    synthdog-zhOCR800,000
    cc3m(downsampled)Image Caption600,000
    cc3m(downsampled)Image Caption600,000
    SBUImage Caption850,000
    Visual Genome VQA (Downsampled)Visual Question Answer(VQA)500,000
    Visual Genome Region descriptions (Downsampled)Reference Grouding500,000
    Visual Genome objects (Downsampled)Grounded Caption500,000
    OCR VQA (Downsampled)OCR and VQA500,000
    -

    Please download these datasets on their own official websites.

    -

    Multimodal Alignment

    -

    Please run sh scripts/pretrain.sh or sh scripts/pretrain_multinode.sh

    -

    Visual Instruction Tuning

    -

    Please run sh scripts/finetune.sh or sh scripts/finetune_multinode.sh

    -

    Evaluation

    -

    Please run python scripts in directory llava/eval/. Our pre-trained CodeFuse-VLM-14B can be loaded with the following code:

    -
    import os
    -from llava.model.builder import load_mixed_pretrained_model
    -
    -model_path = '/pretrained/model/path'
    -tokenizer, model, image_processor, context_len = load_mixed_pretrained_model(model_path, None, 'qwen-vl-14b', os.path.join(model_path, 'Qwen-VL-visual'), 'cross_attn', os.path.join(model_path, 'mm_projector/mm_projector.bin'))
    -

    You can also run scripts/merge_qwen_vl_weights.sh first and load the merged model by the following code:

    -
    from llava.model import LlavaQWenForCausalLM
    -
    -model = LlavaQWenForCausalLM.from_pretrained('/path/to/our/pretrained/model')
    -

    CodeFuse-VLM Product Video

    -

    Here’s the demo video of front-end code copilot backed by our VLM model

    -

    https://private-user-images.githubusercontent.com/22836551/300398424-201f667d-6b6b-4548-b3e6-724afc4b3071.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjE5MTIsIm5iZiI6MTcwNjUyMTYxMiwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzOTg0MjQtMjAxZjY2N2QtNmI2Yi00NTQ4LWIzZTYtNzI0YWZjNGIzMDcxLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5NDY1MlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWI0ZmJmZWNlNDZmNWM3NzA0OThlMmY1ODY4MDkxNWY5ZWNiNzRiYjJkYmE4NjEzM2EwYWRiNWY2ODc3N2ViYjEmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.BIvWGNx0XV7RoauxB0c2noEdbfZfu8-16LPHtCaCJ9k

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-config-zh/index.html b/docs/docs/codefuse-modelcache-config-zh/index.html deleted file mode 100644 index 0a64d05..0000000 --- a/docs/docs/codefuse-modelcache-config-zh/index.html +++ /dev/null @@ -1,730 +0,0 @@ - - - - - - - - -最佳配置 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    最佳配置

    -
    -
    - - -

    环境依赖

    -
      -
    • python版本: 3.8及以上
    • -
    • 依赖包安装: -pip install requirements.txt
    • -
    -

    服务启动

    -
      -
    • 在启动服务前,应该进行如下环境配置:
    • -
    • 安装关系数据库 mysql, 导入sql创建数据表,sql文件: reference_doc/create_table.sql
    • -
    • 安装向量数据库milvus
    • -
    • 在配置文件中添加数据库访问信息,配置文件为: -
        -
      • modelcache/config/milvus_config.ini
      • -
      • modelcache/config/mysql_config.ini
      • -
      -
    • -
    • 离线模型bin文件下载, 参考地址:https://huggingface.co/shibing624/text2vec-base-chinese/tree/main,并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中
    • -
    • 通过flask4modelcache.py脚本启动后端服务。
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-config/index.html b/docs/docs/codefuse-modelcache-config/index.html deleted file mode 100644 index 4df9816..0000000 --- a/docs/docs/codefuse-modelcache-config/index.html +++ /dev/null @@ -1,744 +0,0 @@ - - - - - - - - -How to better configure your cache · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    How to better configure your cache

    -
    -
    - - -

    Environment Dependencies

    -
      -
    • Python version: 3.8 or higher
    • -
    • To install dependencies: pip install requirements.txt
    • -
    -

    Service Startup

    -
      -
    • Before starting the service, the following environment configurations should be performed:
    • -
    • Install relational database MySQL, import SQL to create tables, SQL file: reference_doc/create_table.sql
    • -
    • Install vector database Milvus
    • -
    • Add database access information to the configuration files, which are: -
        -
      • modelcache/config/milvus_config.ini
      • -
      • modelcache/config/mysql_config.ini
      • -
      -
    • -
    • Download offline model bin files, refer to: https://huggingface.co/shibing624/text2vec-base-chinese/tree/main, and place the downloaded bin files into the model/text2vec-base-chinese folder
    • -
    • Start the backend service using the flask4modelcache.py script.
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-feature-zh/index.html b/docs/docs/codefuse-modelcache-feature-zh/index.html deleted file mode 100644 index be974be..0000000 --- a/docs/docs/codefuse-modelcache-feature-zh/index.html +++ /dev/null @@ -1,855 +0,0 @@ - - - - - - - - -功能特性 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    功能特性

    -
    -
    - - -

    功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模块功能
    ModelCacheGPTCache
    基础接口数据查询接口
    数据写入接口
    Embeddingembedding模型配置
    大模型embedding层
    bert模型长文本处理
    Large model invocation是否与大模型解耦
    embeddingg模型本地加载
    数据隔离模型数据隔离
    超参数隔离
    数据库MySQL
    Milvus
    OceanBase
    会话管理单轮回话
    system指令
    多轮回话
    数据管理数据持久化
    一键清空缓存
    租户管理支持多租户(多模型)
    milvus多表能力
    其他长短对话区分能力
    -

    核心功能

    -

    在ModelCache中,沿用了GPTCache的主要思想,包含了一系列核心模块:adapter、embedding、similarity和data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将embedding、similarity、data_manager等模块串联起来;embedding模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank模块用于对召回的向量进行相似度排序和评估;data_manager模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下:

    -
      -
    • 架构调整(轻量化集成):以类redis的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰LLM调用和安全审核等功能,适配所有大模型服务。
    • -
    • 多种模型加载方案: -
        -
      • 支持加载本地embedding模型,解决huggingface网络连通问题
      • -
      • 支持加载多种预训练模型embeding层
      • -
      -
    • -
    • 数据隔离能力 -
        -
      • 环境隔离:可依据环境,拉取不同的数据库配置,实现环境隔离(开发、预发、生产)
      • -
      • 多租户数据隔离:根据模型动态创建collection,进行数据隔离,用于大模型产品中多个模型/服务数据隔离问题
      • -
      -
    • -
    • 支持系统指令:采用拼接的方式,解决propmt范式中sys指令问题。
    • -
    • 长短文本区分:长文本会给相似评估带来更多挑战,增加了长短文本的区分,可单独配置判断阈值。
    • -
    • milvus性能优化:milvus consistency_level调整为"Session"级别,可以得到更好的性能。
    • -
    • 数据管理能力: -
        -
      • 一键清空缓存的能力,用于模型升级后的数据管理。
      • -
      • 召回hitquery,用于后续的数据分析和模型迭代参考。
      • -
      • 异步日志回写能力,用于数据分析和统计
      • -
      • 增加model字段和数据统计字段,用于功能拓展。
      • -
      -
    • -
    -

    未来会持续建设的功能:

    -
      -
    • 基于超参数的数据隔离
    • -
    • system promt分区存储能力,以提高相似度匹配的准确度和效率
    • -
    • 更通用的embedding模型和相似度评估算法
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-feature/index.html b/docs/docs/codefuse-modelcache-feature/index.html deleted file mode 100644 index 45d9242..0000000 --- a/docs/docs/codefuse-modelcache-feature/index.html +++ /dev/null @@ -1,886 +0,0 @@ - - - - - - - - -Feature · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Feature

    -
    -
    - - -

    From a functional standpoint, to address Huggingface network issues and improve inference speed, local inference capabilities for embeddings have been added. Given some limitations in the SQLAlchemy framework, we have rewritten the relational database interaction module for more flexible database operations. In practice, large model products need to interface with multiple users and models; thus, support for multi-tenancy has been added to ModelCache, as well as preliminary compatibility with system commands and multi-turn conversations.

    -

    Below is a feature comparison table for ModelCache and GPTCache modules:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModuleFunction
    ModelCacheGPTCache
    Basic InterfaceData query interface
    Data writing interface
    EmbeddingEmbedding model configuration
    Large model embedding layer
    BERT model long text processing
    Large model invocationDecoupling from large models
    Local loading of embedding model
    Data isolationModel data isolation
    Hyperparameter isolation
    DatabasesMySQL
    Milvus
    OceanBase
    Session managementSingle-turn dialogue
    System commands
    Multi-turn dialogue
    Data managementData persistence
    One-click cache clearance
    Tenant managementSupport for multi-tenancy
    Milvus multi-collection capability
    OtherLong-short dialogue distinction
    -

    Core Features

    -

    In ModelCache, the main ideas of GPTCache are carried forward, including a series of core modules: adapter, embedding, similarity, and data_manager. The adapter module’s main function is to handle the business logic for various tasks and connect modules like embedding, similarity, and data_manager; the embedding module is responsible for converting text into semantic vector representations, transforming user queries into vectors for recall or storage; the rank module ranks and evaluates the similarity of recalled vectors; the data_manager module manages the database. To better industrialize, we’ve made architectural and functional upgrades as follows:

    -
      -
    • -

      Architectural Adjustment (Lightweight Integration): Embedded in large model products in a cache mode similar to Redis, it provides semantic caching capabilities without interfering with LLM invocation and security audits, adaptable toall large model services.

      -
    • -
    • -

      Multiple Model Loading Schemes:

      -
        -
      • Support for loading local embedding models to resolve Huggingface connectivity issues.
      • -
      • Support for loading various pre-trained model embedding layers.
      • -
      -
    • -
    • -

      Data Isolation Capabilities:

      -
        -
      • Environmental Isolation: Depending on the environment, different database configurations can be pulled to achieve isolation (development, staging, production).
      • -
      • Multi-Tenant Data Isolation: Dynamically create collections according to the model to isolate data, addressing data isolation issues for multiple models/services in large model products.
      • -
      -
    • -
    • -

      Support for System Commands: Using concatenation to solve system command issues within the prompt paradigm.

      -
    • -
    • -

      Distinguishing Long and Short Texts: Long texts pose more challenges to similarity assessment, so the differentiation between long and short texts has been enhanced, allowing separate configuration of judgment thresholds.

      -
    • -
    • -

      Performance Optimization for Milvus: Adjusting Milvus’s consistency_level to “Session” level for better performance.

      -
    • -
    • -

      Data Management Capabilities:

      -
        -
      • One-click cache clearing ability for data management after model upgrades.
      • -
      • Recall hit queries for subsequent data analysis and model iteration reference.
      • -
      • Asynchronous log write-back capability for data analysis and statistics.
      • -
      • Added model fields and data statistics fields for feature expansion.
      • -
      • Future features that will continue to be built upon include:
      • -
      -
    • -
    • -

      Data isolation based on hyperparameters.

      -
    • -
    • -

      System prompt partitioned storage capability to improve the accuracy and efficiency of similarity matching.

      -
    • -
    • -

      More versatile embedding models and similarity evaluation algorithms.

      -
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-quickstart-zh/index.html b/docs/docs/codefuse-modelcache-quickstart-zh/index.html deleted file mode 100644 index bb66a12..0000000 --- a/docs/docs/codefuse-modelcache-quickstart-zh/index.html +++ /dev/null @@ -1,730 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    ModelCache易于使用,只需1步骤即可构建缓存测试Demo

    -

    快速开始

    -

    构建Cache

    -

    Cache的默认接口如下所示:

    -
    class Cache:
    -    # it should be called when start the cache system
    -    def __init__(self):
    -        self.has_init = False
    -        self.cache_enable_func = None
    -        self.embedding_func = None
    -        self.post_process_messages_func = None
    -        self.config = Config()
    -

    在创建ModelCache之前,请考虑以下问题:

    -
      -
    • 你将如何为查询生成嵌入向量?(embedding_func) 该函数将文本嵌入到一个用于上下文相似性搜索的密集向量中。ModelCache可以支持多种嵌入上下文的方法:Huggingface、ONNX和SentenceTransformers。默认逻辑中,使用了在中文领域表现更好的huggingface中的text2vec模型。只需将你的嵌入函数初始化为:text2vec.to_embeddings
    • -
    -
    data_manager = get_data_manager(CacheBase("mysql", config=mysql_config),
    -                                VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config))
    -
    -cache.init(
    -    embedding_func=data2vec.to_embeddings,
    -    data_manager=data_manager,
    -    similarity_evaluation=SearchDistanceEvaluation(),
    -    query_pre_embedding_func=query_multi_splicing,
    -    insert_pre_embedding_func=insert_multi_splicing,
    -)
    -
      -
    • 你将在哪里缓存数据?(data_manager缓存存储) 缓存存储用于存储所有标量数据,例如原始问题、提示、答案和访问时间。ModelCache支持多种缓存存储选项,如SQLite、MySQL和OceanBase。未来还将添加更多的NoSQL数据库选项。
    • -
    • 你将在哪里存储和搜索向量嵌入?(data_manager向量存储) 向量存储组件用于存储和搜索所有嵌入向量,以便在语义上找到最相似的结果。ModelCache支持使用FAISS等向量搜索库或Milvus等向量数据库。未来还将添加更多的向量数据库和云服务选项。
    • -
    -

    以下是一些示例:

    -
    data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension))
    -data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-quickstart/index.html b/docs/docs/codefuse-modelcache-quickstart/index.html deleted file mode 100644 index 3a5cf88..0000000 --- a/docs/docs/codefuse-modelcache-quickstart/index.html +++ /dev/null @@ -1,740 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    ModelCache is easy to use, and you can build a cache testing demo in just one step.

    -

    Quick Start

    -

    Building a Cache

    -

    The default interface for Cache is shown below:

    -
    class Cache:
    -    # it should be called when start the cache system
    -    def __init__(self):
    -        self.has_init = False
    -        self.cache_enable_func = None
    -        self.embedding_func = None
    -        self.post_process_messages_func = None
    -        self.config = Config()
    -

    Before creating a ModelCache, consider the following questions:

    -

    How will you generate embedding vectors for queries? (embedding_func) This function embeds text into a dense vector for contextual similarity search. ModelCache can support various methods of embedding context: Huggingface, ONNX, and SentenceTransformers. In the default logic, the text2vec model from huggingface, which performs better in the Chinese domain, is used. Simply initialize your embedding function to: text2vec.to_embeddings

    -
    data_manager = get_data_manager(CacheBase("mysql", config=mysql_config),
    -                                VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config))
    -cache.init(
    -    embedding_func=data2vec.to_embeddings,
    -    data_manager=data_manager,
    -    similarity_evaluation=SearchDistanceEvaluation(),
    -    query_pre_embedding_func=query_multi_splicing,
    -    insert_pre_embedding_func=insert_multi_splicing,
    -)
    -

    Where will you cache data? (data_manager cache storage) The cache storage is used to store all scalar data such as original questions, prompts, answers, and access times. ModelCache supports multiple cache storage options like SQLite, MySQL, and OceanBase. More NoSQL database options will be added in the future. -Where will you store and search vector embeddings? (data_manager vector storage) The vector storage component is used to store and search all embedding vectors to semantically find the most similar results. ModelCache supports vector search libraries like FAISS or vector databases like Milvus. More vector database and cloud service options will be added in the future. -Here are some examples:

    -
    data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension))
    -data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-release-zh/index.html b/docs/docs/codefuse-modelcache-release-zh/index.html deleted file mode 100644 index ee3d1bb..0000000 --- a/docs/docs/codefuse-modelcache-release-zh/index.html +++ /dev/null @@ -1,765 +0,0 @@ - - - - - - - - -最佳配置 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    最佳配置

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    时间功能版本号
    20230430完成GPTCache调研,开源流程在OpenAI接口上跑通,单节点形式
    202305091、完成技术选型及上下游交互方案
    2、重新开发数据库模块,替换SQLalchemy框架
    3、重构llm_handler模块,兼容codegpt,适配codegpt模型参数
    V0.1.0
    202305191、根据环境动态选择codegpt服务模式
    2、模型本地加载能力,以及预加载能力
    3、增加本地路径依据环境动态加载能力
    V0.1.1
    202305221、架构优化,调整为类redis结构,解藕大模型调用
    2、关系数据库由sqlite切换至OceanBase
    3、向量数据库由faiss切换至milvus
    4、模型数据隔离能力
    5、增加核心模块adapter_query、adapter_insert
    V0.2.0
    202305311、线上环境上线,动态感知能力
    2、embedding模型评测及选型
    3、增加预发环境及数据隔离能力
    4、增加原始query字段透出能力
    V0.2.1
    202306071、优化关系数据库访问性能
    2、优化环境和模型隔离能力
    V0.2.2
    202306301、在modelCache中增加大模型embedding层适配模块
    2、增加采纳率统计能力
    V0.2.3
    202307301、增加缓存统计功能
    2、增加数据删除功能接口
    3、缓存一键清空能力上线
    4、多轮会话能力研发,支持system指令和多轮对话
    v0.3.0
    202308301、增加异步处理能力,性能提升超20%
    2、架构变更,解藕embedding推理和业务处理逻辑
    3、黑名单过滤功能
    V0.3.1
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-release/index.html b/docs/docs/codefuse-modelcache-release/index.html deleted file mode 100644 index 48cc118..0000000 --- a/docs/docs/codefuse-modelcache-release/index.html +++ /dev/null @@ -1,780 +0,0 @@ - - - - - - - - -Release Note · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Release Note

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    时间功能版本号
    20230430Completed GPTCache research, open-source process running through OpenAI interface, single-node form
    202305091. Completed technology selection and upstream/downstream interaction scheme
    2. Redeveloped database module, replaced SQLAlchemy framework
    3. Refactored llm_handler module, compatible with codegpt, adapted codegpt model parameters 数
    V0.1.0
    202305191. Dynamically selected codegpt service mode based on environment
    2. Capability for local model loading and pre-loading
    3. Added dynamic loading capability for local paths based on environment
    V0.1.1
    202305221. Architecture optimized, adjusted to a Redis-like structure, decoupled large model invocation
    2. Switched relational database from SQLite to OceanBase
    3. Switched vector database from FAISS to Milvus
    4. Model data isolation capability
    5. Added core modules adapter_query, adapter_insert
    V0.2.0
    202305311. Online environment launched with dynamic sensing capability
    2. Embedding model evaluation and selection
    3. Added staging environment and data isolation capability
    4. Added exposure capability for the original query field
    V0.2.1
    202306071. Optimized relational database access performance
    2. Optimized environment and model isolation capabilities
    V0.2.2
    202306301. Added large model embedding layer adaptation module in modelCache
    2. Added adoption rate statistical capability
    V0.2.3
    202307301. Added cache statistics feature
    2. Added data deletion function interface
    3. One-click cache clearing capability launched
    4. Developed multi-turn conversation ability, supporting system commands and multi-turn dialogues
    v0.3.0
    202308301. Added asynchronous processing capability, performance improved by over 20%
    2. Architecture change, decoupled embedding inference and business processing logic
    3. Blacklist filtering feature
    V0.3.1
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-modelcache-zh/index.html b/docs/docs/codefuse-modelcache-zh/index.html deleted file mode 100644 index d4fbe7a..0000000 --- a/docs/docs/codefuse-modelcache-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/codefuse-modelcache-zh/ - - - - - - diff --git a/docs/docs/codefuse-modelcache/index.html b/docs/docs/codefuse-modelcache/index.html deleted file mode 100644 index 52b562a..0000000 --- a/docs/docs/codefuse-modelcache/index.html +++ /dev/null @@ -1,582 +0,0 @@ - - - - - - - - -CodeFuse-ModelCache · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-ModelCache

    -
    -
    - - -

    CodeFuse-ModelCache

    -

    CodeFuse-ModelCache

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-godellanguage-zh/index.html b/docs/docs/codefuse-query-godellanguage-zh/index.html deleted file mode 100644 index 31a43bb..0000000 --- a/docs/docs/codefuse-query-godellanguage-zh/index.html +++ /dev/null @@ -1,2762 +0,0 @@ - - - - - - - - -查询语言介绍 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    查询语言介绍

    -
    -
    - - -

    GödelScript 查询语言

    -

    目录

    - -

    GödelScript 基本概念和语法

    -

    简介

    -
    // script
    -fn hello(greeting: string) -> bool {
    -    return greeting = "hello world!"
    -}
    -
    -fn main() {
    -    output(hello())
    -}
    -

    GödelScript 即 Gödel 查询语言。GödelScript 是 CodeQuery 用于查询和数据处理的领域专用语言 (DSL)。GödelScript 使用了类 Rust 的语法,提供了严格的类型检查、方便快捷的类型推导、智能友好的错误提示信息,使用户能够快速上手。

    -

    GödelScript 编译器主要应用场景为:

    -
      -
    1. 面向用户编写简单或复杂查询,提供更便捷的写法,提高编写查询的效率;
    2. -
    3. 提供严格类型检查与类型推导,给予更智能的代码修改提示;
    4. -
    5. 提供严格的 ungrounded(未赋值/未绑定) 检测,避免触发 Soufflé Ungrounded Error;
    6. -
    7. Language Server 以及 IDE Extension 支持。
    8. -
    -

    基本程序构成

    -

    程序结构

    -

    GödelScript 程序可能包含:

    - -

    包含以上所有组成内容的样例:

    -
    // script
    -// 包引入/符号引入
    -use coref::java::* // 引入所有符号
    -use coref::java::{JavaDB, Class} // 选择性引入符号
    -
    -// 函数声明
    -fn default_db() -> JavaDB {
    -    return JavaDB::load("example.db")
    -}
    -
    -// schema 声明
    -schema File {
    -    @primary id: int
    -}
    -
    -// database 声明
    -database NewDB {
    -    file: *File
    -}
    -
    -// trait 声明
    -trait FileTrait {
    -    fn getId(self) -> int;
    -}
    -
    -// impl trait for
    -impl FileTrait for File {
    -    fn getId(self) -> int {
    -        return self.id
    -    }
    -}
    -
    -// impl
    -impl File {
    -    @data_constraint
    -    fn all() -> *File {
    -        yield File {id: 1}
    -        yield File {id: 2}
    -    }
    -}
    -
    -// query
    -query get_all_anno from
    -    Annotation anno in Annotation(default_db())
    -select
    -    anno.id as id
    -

    注释

    -

    GödelScript 采用类 C 语言的注释方式。

    -
    // 单行注释
    -
    -/*
    -* 1. 多行注释
    -* 2. 多行注释
    -*/
    -

    main 函数

    -

    GödelScript 查询脚本可以包含main函数,该函数无返回值。在不实现main函数,且没有写 query 声明的情况下,程序不会输出。

    -

    更多详细内容请看 main 函数

    -
    fn main() {
    -    output(query_1())
    -    output(query_2())
    -}
    -

    基础类型和编译器内建函数

    -

    GödelScript 包含基础类型int stringbool属于基础类型,但是不能作为值存储。

    -

    int类型 native 函数

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    函数类型解释
    pow(int, int) -> int乘方。参数只能非负数。
    rem(int, int) -> int取余。
    bitand(int, int) -> int按位与。
    bitor(int, int) -> int按位或。
    bitxor(int, int) -> int按位异或。
    bitnot(int) -> int按位非。
    neg(int) -> int算术取反。
    to_string(int) -> string转换为字符串。
    add(int, int) -> int+
    sub(int, int) -> int-
    mul(int, int) -> int*
    div(int, int) -> int/
    eq(int, int) -> bool=
    ne(int, int) -> bool!=
    gt(int, int) -> bool>
    ge(int, int) -> bool>=
    lt(int, int) -> bool<
    le(int, int) -> bool<=
    to_set(int) -> *int转为集合类型。
    -

    string类型 native 函数

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    函数类型解释
    len(string) -> int获取字符串长度。
    substr(string, int, int) -> string通过初始index和length来截取字符串。
    contains(string, string) -> bool判断一个字符串是否被包含在当前字符串中。
    matches(string, string) -> bool判断正则字符串是否完全匹配当前字符串。
    get_regex_match_result(string, string, int) -> string获取被正则字符串完全匹配当前字符串时的某一个捕获结果,该结果由第二个参数(int)确定。如 “abcdef”.get_regex_match_result(“a(.*)f”, 1) 的结果是 “bcde”。
    to_int(string) -> int转换为整数。
    add(string, string) -> string字符串拼接。
    eq(string, string) -> bool判断字符串相等。
    ne(string, string) -> bool判断字符串不相等。
    to_set(string) -> *string转为集合类型。
    -

    bool类型 native 函数

    -

    bool虽然作为基础类型存在,但是该类型不能作为数据参与中间计算,只能作为条件结果。

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    函数类型解释
    not(bool) -> bool条件取反。
    and(bool, bool) -> bool条件与。
    or(bool, bool) -> bool条件或。
    eq(bool, bool) -> bool相等。
    ne(bool, bool) -> bool不相等。
    -

    作用于集合的 native 函数

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    函数类型解释
    len(*T) -> int获取数据集合的数量。
    max(*int) -> int查找最大值。
    min(*int) -> int查找最小值。
    sum(*int) -> int求和。
    find(*T0) -> T1从一个集合中,通过主键查找数据。
    -

    全局 native 函数

    - - - - - - - - - - - - - - - -
    函数类型解释
    output((…) -> bool) -> 输出 query 内容。
    -

    database 的 native 函数

    - - - - - - - - - - - - - - - -
    函数类型解释
    load(string) -> T加载 database 。
    -

    schema 的 native 函数

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    函数类型解释
    to(self) -> T转换到其他类型的 schema,采用 duck type 检测。
    is(self) -> bool判断是否可以是其他类型的 schema,采用 duck type 检测。如果自身 schema 有主键,则底层只会通过主键判断是否可以是其他类型。
    key_eq(self, T) -> bool检查两个 schema 实例的主键是否相等。
    key_neq(self, T) -> bool检查两个 schema 实例的主键是否不等。
    -

    schema native 函数实例:

    -
    use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -fn example() -> bool {
    -    for(stmt in StatementParent(default_java_db())) {
    -        if (stmt.is<ElementParent>()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn convert() -> *ElementParent {
    -    for(stmt in StatementParent(default_java_db())) {
    -        yield stmt.to<ElementParent>()
    -    }
    -}
    -

    函数

    -

    GödelScript main 函数

    -

    main函数是 GödelScript 中唯一不声明返回值的函数。main函数只允许使用output,其他语句会导致编译错误;多次使用output(...)可以输出多个查询结果,查询结果会分表显示,表名即为output中调用的查询函数的函数名。

    -

    查询函数

    -

    查询函数的返回值类型推荐为bool,需要输出查询结果时,需要使用output()函数。

    -

    output()中调用的查询函数不再是常规思路中的用传参调用函数。参数列表在此时会变化为输出表的表结构,下面是两个查询函数的应用实例:

    -
      -
    1. -

      单表output

      -

      单表output特指在main函数中,只使用一次output来输出。

      -
      fn example(a: int, b: string) -> bool {...}
      -
      -fn main() {
      -    output(example()) // 此时参数列表变为输出表结构,不需要传参
      -}
      -

      对应的输出表结构为:

      -
      [
      -    {"a": 0, "b": "xxx"},
      -    {"a": 1, "b": "xxx"}
      -]
      -
    2. -
    3. -

      多表output

      -

      多表output是指在main函数中,使用多次output来输出。在这种情况下,输出数据会附带对应的表名。

      -
      fn example0(a: int, b: string) -> bool {...}
      -fn example1(a: string, b: int) -> bool {...}
      -
      -fn main() {
      -    output(example0())
      -    output(example1())
      -}
      -

      对应的输出表结构为:

      -
      {
      -    "example0":[
      -        {"a": 0, "b": "xxx"},
      -        {"a": 1, "b": "xxx"}
      -    ],
      -    "example1":[
      -        {"a": "xxx", "b": 0},
      -        {"a": "xxx", "b": 1}
      -    ]
      -}
      -
    4. -
    -

    下面是一个比较详细的例子,在这个例子中,我们直接构造了两组数据并输出。在下列代码中,需要注意的是:

    -
      -
    1. -

      GödelScript 中,布尔值可以使用truefalse关键字。

      -
    2. -
    3. -

      =符号在 GödelScript 中是比较特殊的符号,不能用常规的编程语言的思路来理解。GödelScript 是一种 Datalog 语言。在这里,=符号同时具备两种语义,一个是 赋值 一个是 判等。详情可看=运算符

      -
    4. -
    5. -

      在这个例子的条件语句中,ab均使用了=的赋值语义,因为intstring类型参数在函数体中被认为是ungrounded(未赋值/未绑定),必须要被赋值才能使用。

      -
    6. -
    7. -

      =赋值语句的返回值是true

      -
    8. -
    -
    fn example(a: int, b: string) -> bool {
    -    // = 符号同时具有赋值和比较的功能,取决于此时的左值是否已经“被赋值”
    -    // 这里的 a 和 b 所用的 = 符号均是赋值语义
    -    if (a = 1 && b = "1") {
    -        // GödelScript 使用关键字 true 和 false 来表示布尔值
    -        return true
    -    }
    -    if (a = 2 && b = "2") {
    -        return true
    -    }
    -}
    -
    -fn main() {
    -    output(example())
    -}
    -

    预期的输出结果应该为:

    -
    [
    -    {"a": 1, "b": "1"},
    -    {"a": 2, "b": "2"}
    -]
    -

    普通函数

    -

    普通函数用于封装一些复杂过程,这些函数必须要有明确的返回类型。 -其中返回类型可以存在两种情况:

    -
      -
    1. 单个返回值,箭头后面声明返回值类型。
    2. -
    -
    fn getFile(c: Class) -> File {
    -    return c.getRelativePath()
    -}
    -
      -
    1. 返回集合,箭头后面的返回值类型前需要加上*以表明其返回的是一个集合。
    2. -
    -
    fn getAllFiles(db: JavaDB) -> *File {
    -    for (f: File in File(db)) {
    -        yield f
    -    }
    -}
    -

    一般情况下要求单返回值使用return语句,而多返回值/返回集合使用yield语句。 -实际使用中,由于 GödelScript 底层使用 Datalog 引擎,故任何的运算都是基于集合的,单返回值实际上仅仅意味着返回的集合可能只包含一个数据,但是也可能包含多个数据。

    -

    语句

    -

    for 语句:从集合中声明变量

    -

    GödelScript 使用for关键字和类似循环语句的语法来从集合中声明变量:

    -
    for(f: File in getAllFiles()) {
    -    ...
    -}
    -

    其中f: File,冒号后面跟着的是f的类型,可省略。 -for语句中允许直接定义多个变量,后面定义的变量在初始化时可使用同一语句中在它前面定义的所有变量:

    -
    for(a in XmlAttribute(db), b in XmlAttribute(db), c in XmlElement(db)) {
    -    ...
    -}
    -
    -for(a in getAllFiles(), b in a.getAllPaths()) {
    -    ...
    -}
    -

    let 语句:声明单一变量

    -

    GödelScript 使用 let关键字来声明一个单一/中间变量:

    -
    let(f: File = c.getRelativePath()) {
    -    ...
    -}
    -

    其中f: File,冒号后面的类型可省略。 -let语句中允许直接定义多个变量,后面定义的变量在初始化时可使用同一语句中在它前面定义的所有变量:

    -
    let(a = 1, b = a + 1, c = b + 1) {
    -    ...
    -}
    -

    if 语句

    -

    GödelScript 的条件语句与许多过程式程序语言类似:

    -
    if (f.getName().contains("util") || f.getName().contains("com")) {
    -    ...
    -}
    -

    条件可以使用这些逻辑运算符进行连接:!取反,||或,&&与。

    -

    条件中的比较运算符:>大于,<小于,>=大于等于,<=小于等于,=等于或者赋值,!=不等于。

    -

    常规算术运算可以使用如下运算符:+加法,-减法/取负,*乘法,/除法。

    -
    赋值和判等运算符=
    -

    =符号在 GödelScript 中具有两种不同的语义:赋值和判等,具体的语义需要分情况进行讨论:

    -
      -
    1. -

      赋值

      -

      赋值一般出现在int string这类基础类型的变量参数上,这类变量作为函数的参数出现时,一般被认为是未赋值的。而具有这类变量的函数被调用时,传入的参数,实际上是作为筛选条件存在。

      -
      fn example(a: int) -> bool {
      -    // 这里比较反直觉,在过程式语言中,这里通常会被认为是判断 a == 1
      -    // 但是在 datalog 方言中,datalog 的每个函数实际上都是在算一个中间表 (view)
      -    // 所以这个函数本质上是生成了一个 view,数据为 [{"a": 1}]
      -    return a = 1 // assign a = 1
      -}
      -
      -fn test() -> bool {
      -    // 这里看似是在通过传参让 a = 2,实际上并不是
      -    // example() 自己会返回 view: [{"a": 1}]
      -    // 然后通过 a = 2 来约束结果,可以看到,我们这里没有拿到任何结果
      -    // 所以返回了 false
      -    return example(2) // false
      -}
      -
    2. -
    3. -

      判等

      -

      对于 schema 类型来说,任何一个 schema 背后都有一个全集,所以参数列表中的 schema 类型一般被认为是已经被赋值的。对于已经赋值的变量来说,=就是判等操作。

      -
      // 声明 schema
      -schema A {...}
      -
      -// 实现 schema 的成员函数
      -impl A {
      -    // 这里定义了 schema A 的全集
      -    @data_constraint
      -    pub fn __all__() -> *A {...}
      -}
      -
      -fn example(a: A) -> bool {
      -    for(temp in A::__all__()) {
      -        if (a = temp) {
      -            return true
      -        }
      -    }
      -}
      -

      同样,对于中间声明的有初始值的int或者string=也是判等操作。

      -
      fn example() -> bool {
      -    let (a = 1) { // assign a = 1
      -        if (a = 1) { // compare a = 1
      -            return true
      -        }
      -    }
      -}
      -
    4. -
    -

    match 语句

    -

    GödelScript 允许对intstring类型编写 match 语句,match 语句是类似 switch 的多条件分支语句,match 的条件必须为字面量:

    -
    match(a) {
    -    1 => return 0,
    -    2 => return 1,
    -    3 => if (a + 1 < 10) {
    -        return 10
    -    }
    -}
    -

    返回语句

    -

    GödelScript 使用returnyieldreturn用于单个返回值的函数,yield用于集合的返回。

    -
    fn a() -> int {
    -    return 0
    -}
    -
    -fn b() -> *int {
    -    yield 1
    -    yield 2
    -    yield 3
    -}
    -

    Schema

    -

    Schema 是 GödelScript 中的复杂数据表的结构。

    -

    结构声明

    -

    GödelScript 使用schema关键字来声明一个表结构:

    -
    schema File {
    -    id: int,
    -    name: string
    -}
    -

    如果某个字段在数据库中是作为主键存在的,可以使用@primary注解来表明其为主键:

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -

    有主键的表结构会使得查询速度得到显著提升,所以尽量绑定一个主键,主键应尽量为**int**类型。

    -

    方法实现

    -

    GödelScript 使用如下方式来声明和实现schema的相关方法:

    -
    impl File {
    -    // 静态方法
    -    fn f1() -> ... {...}
    -    // 成员方法,第一个参数必须为 self
    -    fn f2(self) -> ... {...}
    -    ...
    -}
    -
    静态方法
    -

    静态方法不需要self作为第一个参数,使用方式很简单,类名::方法名(...)

    -
    impl File {
    -    fn getSchemaName() -> string {
    -        return "File"
    -    }
    -}
    -
    -fn out(t: string) -> bool {
    -    if (t = File::getSchemaName()) {
    -        return true
    -    }
    -}
    -
    成员方法
    -

    成员方法的第一个参数必须为self,该参数无需写明类型。这类函数的调用方式是实例名.函数名(...)

    -
    impl File {
    -    fn getName(self) -> string {
    -        return self.name
    -    }
    -}
    -
    -fn out(path: string) -> bool {
    -    let (db = JavaDB::load("coref_java_src.db")) {
    -        for (f in File::__all__(db)) {
    -            if (path = f.getName()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    数据加载方法 fn __all__(db)
    -

    schema可以包含一个特别的静态方法,用于加载它在数据库中的数据集。

    -
    impl File {
    -    @data_constraint
    -    fn __all__(db: JavaDB) -> *File {
    -        ...
    -    }
    -}
    -

    这种方法必须包含特殊注解@data_constraint,表明该方法专用于加载,如果不写该注解,则该方法的返回为空集合。该方法返回类型必须为其本身的集合。

    -

    包含了该方法的schema可以使用一个语法糖来获取其全集:

    -
    fn out() -> bool {
    -    for(f in File(JavaDB::load("..."))) {
    -        ...
    -    }
    -    ...
    -}
    -// 等价于
    -fn out() -> bool {
    -    for(f in File::__all__(JavaDB::load("..."))) {
    -        ...
    -    }
    -    ...
    -}
    -
    自定义全集方法
    -

    schema允许使用不同于__all__名称的静态方法来表明一些集合也存在于该类型的全集中。该方法也必须包含特殊注解@data_constraint。该方法一般用于手动添加一些数据到该类型的全集中。

    -
    impl File {
    -    @data_constraint
    -    fn extend_example() -> *File {
    -        yield File {id: 1234567}
    -    }
    -}
    -

    构造匿名实例

    -

    GödelScript 允许用一个特定语法生成匿名实例。生成匿名实例的前提是该实例存在于该schema的全集中,除非该用法出现在@data_constraint方法中,否则结果为空。

    -
    schema A {
    -    @primary id: int,
    -    name: string
    -}
    -

    对应的应该使用如下语法来进行匿名实例的生成:

    -
    A {id: 1, name: "first"}
    -

    Schema 继承

    -

    GödelScript 中,schema继承非常便捷,使用样例如下:

    -
    schema MyFile extends File {}
    -
    父类 Field 继承
    -

    子类会默认将父类的所有 field 继承下来。所以无需手动重写。

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -
    -schema MyFile extends File {}
    -
    父类 Method 继承
    -

    子类会默认继承父类的所有 method,除了标注@data_constraint的方法。所以无需手动重写。但是需要注意的是,__all__方法较为特殊,不会被继承,所以需要重新编写__all__方法确定继承后的 schema 的全集。

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -
    -impl File {
    -    @data_constraint
    -    fn __all__() -> *File {...}
    -    fn getId(self) -> int {...}
    -    fn staticMethod() -> string {return "File"}
    -}
    -
    -schema MyFile extends File {}
    -
    Method Override
    -

    如果子类的实现中存在与父类同名的方法,则父类的方法会被子类方法覆盖

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -
    -impl File {
    -    fn staticMethod() -> string {return "File"}
    -}
    -
    -schema MyFile extends File {}
    -
    -impl MyFile {
    -    fn staticMethod() -> string {return "MyFile"}
    -}
    -

    此时File::staticMethodMyFile::staticMethod覆盖,所以调用子类的该方法时,获取的结果为"MyFile"

    -

    数据库

    -

    数据库声明

    -

    数据库的声明格式如下:

    -
    database DatabaseName {
    -    // table_name 对应的是 db 中真实的表名
    -    // GodelSchemaType 对应的是将表数据读入 godel 后,存储的对应的 schema
    -    table_name : *GodelSchemaType
    -}
    -

    冒号前是加载的数据库中的真实表名,冒号后是其对应的数据表格式,必须为schema类型。 -例如 db 中存在一张表,名字为annotation,对应的schemaAnnotation,写法为:

    -
    database JavaDB {
    -    // 从 db 的 annotation 表中读取数据,存入 Annotation 中
    -    annotation : *Annotation
    -}
    -

    另外需要保证Annotation结构必须和表结构一致,例如:

    -
    schema Annotation {
    -    @primary id: int, // primary注解表示该字段为主键,一个表也可以没有主键
    -    content: string
    -}
    -

    就必须要求annotation表中必须有idcontent字段,并且存储类型必须对应。

    -

    数据库加载

    -

    数据库类型拥有静态方法(database)::load(filename: string)

    -
    fn loadDatabaseExample() -> bool {
    -    // load 中传入的 string 为 db 的文件名,而不需要路径
    -    // 因为 db 的路径会在执行 godel 时,通过命令行参数传入
    -    let (db: JavaDB = JavaDB::load("...")) {
    -        ...
    -    }
    -}
    -

    数据表获取

    -

    上文中的例子中,要拿到annotation表,可以这样做:

    -
    fn getAnnotation() -> Annotation {
    -    // load 中传入的 string 为 db 的文件名,而不需要路径
    -    // 因为 db 的路径会在执行 godel 时,通过命令行参数传入
    -    let (db: JavaDB = JavaDB::load("...")) {
    -        // 直接使用 db.field 就可以拿到表数据了
    -        for (anno: Annotation in db.annotation) {
    -            ...
    -        }
    -    }
    -}
    -

    Trait

    -

    Trait 声明

    -

    trait声明语法如下:

    -
    trait Example {
    -    fn getId(self) -> int;
    -    fn getName(self) -> string;
    -    fn getValueByName(self, name: string) -> string;
    -}
    -

    Impl Trait

    -

    写法与impl类似,但是必须要将trait中声明的所有函数都实现出来,否则无法通过编译。

    -
    impl Example for XmlElement {
    -    fn getId(self) -> int {return self.id}
    -    fn getName(self) -> int {return self.name}
    -    fn getValueByName(self, name: string) -> int {
    -        for(attr in XmlAttribute(XmlDB::load("...")) {
    -            if (attr.getName() = name && attr.id = self.getAttribute().id) {
    -                return attr.getValue()
    -            }
    -        }
    -    }
    -}
    -

    Import

    -

    GödelScript 使用use关键字来引入其他文件的符号:

    -
    use coref::java::* // 引用全部符号
    -use coref::xml::Location // 引用单个符号
    -use coref::xml::{XmlDB, XmlElement} // 引用多个符号
    -

    模块引用规则

    -

    GödelScript 包管理器会在传入参数中含有-p {package dir path}时启用。

    -

    包管理器会对文件夹结构进行解析,遍历其中所有的.gdl后缀文件。在拿到文件的相对路径后,会将路径映射到对应的包路径。如果文件的相对路径中存在-,或者路径中存在一个文件夹名或者文件名或者.后跟随的第一个字符是数字, 则该路径不会被包管理器接受,但是包管理器不会对其进行报错,只进行忽略处理。

    -

    如果想知道忽略了哪些路径,可以使用-v参数,包管理器在有该参数的情况下会将忽略的路径作为warning报出。如果最终映射的路径中,存在路径冲突的情况,那么包管理器会将其作为error报出并退出编译进程。

    -
    packages:
    -  coref::cfamily    -> /.../Library/coref.cfamily.gdl
    -  coref::go         -> /.../Library/coref.go.gdl
    -  coref::java       -> /.../Library/coref.java.gdl
    -  coref::javascript -> /.../Library/coref.javascript.gdl
    -  coref::properties -> /.../Library/coref.properties.gdl
    -  coref::python     -> /.../Library/coref.python.gdl
    -  coref::sql        -> /.../Library/coref.sql.gdl
    -  coref::xml        -> /.../Library/coref.xml.gdl
    -modules
    -  +--coref -> coref
    -     |--xml -> coref::xml
    -     |--properties -> coref::properties
    -     |--cfamily -> coref::cfamily
    -     |--java -> coref::java
    -     |--javascript -> coref::javascript
    -     |--go -> coref::go
    -     |--sql -> coref::sql
    -     +--python -> coref::python
    -

    路径映射样例

    -
    Library
    -|-- coref.java.gdl
    -|-- coref.xml.gdl
    -+-- coref
    -    |-- go.gdl
    -    +-- a
    -        +-- b.gdl
    -=>
    -coref::java
    -coref::xml
    -coref::go
    -coref::a::b
    -

    该样例中,路径出现冲突

    -
    Library
    -|-- coref
    -|   |-- java.gdl
    -|   +-- python.gdl
    -+-- coref.python.gdl
    -=>
    -coref::java
    -coref::python -- \
    -                  > 出现冲突
    -coref::python -- /
    -

    该样例中,路径存在不合法字符

    -
    Library
    -|-- 0123.gdl
    -|-- my-godel-lib
    -|   +-- js.gdl
    -+-- lib-file.123.gdl
    -=>
    -0123
    -^ 第一个字符为数字
    -my-godel-lib::js
    -  ^     ^ 使用了 `-` 字符
    -lib-file::123
    -   ^      ^ 使用了一个字符为数字,并且路径中包含 `-`
    -

    符号冲突

    -

    在使用过程中,难免会遇到如下的情况。此时直接使用File会被告知符号冲突,需要指定其中一个符号。

    -
    use coref::java::Location
    -use coref::xml::Location
    -schema MyLoc extends Location {}
    -                     ^^^^^^^^
    -Error: "Location" is ambiguous, with multiple symbols
    -       "coref::java::Location, coref::xml::Location".
    -

    与其他语言类似,GödelScript允许通过完整路径的方式直接指定一个符号,但是该符号必须被引入。

    -
    use coref::java::Location
    -use coref::xml::Location
    -schema MyLoc extends coref::xml::Location {}
    -

    完整路径符号可以被用于以下情况:

    -
      -
    • schema 继承
    • -
    -
    schema JavaLocation extends coref::java::Location {}
    -
      -
    • 函数参数和返回值
    • -
    -
    fn return_java_file(f: coref::java::File) -> coref::java::File {
    -    ...
    -}
    -
      -
    • database 声明
    • -
    -
    database MyDB {
    -    java_file: coref::java::File,
    -    xml_file: coref::xml::File,
    -    java_loc: coref::java::Location,
    -    xml_loc: coref::xml::Location
    -}
    -
      -
    • query 列表类型声明
    • -
    -
    query example from
    -    coref::java::Location loc in coref::java::Location(coref::java::JavaDB::load("..."))
    -where
    -    ...
    -select
    -    ...
    -
      -
    • schema 静态方法调用
    • -
    -
    for(loc in coref::java::Location(coref::java::JavaDB::load("..."))) {
    -    ...
    -}
    -
    -stmt.to<coref::java::ElementParent>()
    -stmt.is<coref::java::ElementParent>()
    -

    Query

    -

    Query 用于进行一些简单的查询,编写的 query 一定会被输出,即使没有声明main函数。Query 的语法格式如下:

    -
    query 名字 from
    -    变量名 in 初始值,
    -    变量名 in 初始值,
    -    变量名 in 初始值
    -where 条件
    -select  as 输出的列名
    -     as 输出的列名,
    -     as 输出的列名,
    -     as 输出的列名
    -

    from 列表中的变量声明无需加上类型标注,编译器会进行自动推导,另外此处初始化不会使用=号,而是in关键字。此外,select 列表中,输出的列名不能和参与计算的变量名冲突,但是列名可以被省略。被省略的列名会在输出结果时采取随机名字,所以尽量不要省略。

    -

    下面是用 query 语法编写的hello world

    -
    query hello_world from
    -    info in "hello world"
    -select info as greeting
    -

    上面的代码等价于如下代码:

    -
    fn hello_world(greeting: string) -> bool {
    -    let (info = "hello world") {
    -        if (greeting = info) {
    -            return true
    -        }
    -    }
    -}
    -fn main() {
    -    output(hello_world())
    -}
    -

    样例和组成结构

    -

    Query 包含了查询名称,from列表,where筛选条件,select列表。

    -
    // script
    -use coref::java::{Callable, Class, Interface, JavaDB}
    -
    -fn db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -query class_method from
    -    Callable m in Callable(db()),
    -    Class c in Class(db())
    -where
    -    c.id = m.getBelongedClass().id
    -select
    -    c.getQualifiedName() as className,
    -    m.getName() as methodName,
    -    m.getSignature() as methodSignature
    -

    等价代码

    -

    上面的例子等价于如下代码:

    -
    // script
    -use coref::java::{Callable, Class, Interface, JavaDB}
    -
    -fn db() -> JavaDB {
    -  return JavaDB::load("coref_java_src.db")
    -}
    -
    -fn main() {
    -  output(class_method())
    -}
    -
    -fn class_method(className: string, methodName: string, methodSignature: string) -> bool {
    -  for (m in Callable(db()), c in Class(db())) {
    -    if (c.id = m.getBelongedClass().id) {
    -      if (className = c.getQualifiedName() &&
    -          methodName = m.getName() &&
    -          methodSignature = m.getSignature()) {
    -        return true
    -      }
    -    }
    -  }
    -}
    -

    Ungrounded Error: 未赋值/未绑定错误

    -

    GödelScript 会将未与数据绑定的符号判定为ungrounded(未赋值/未绑定)。基本判定规则为:

    -
      -
    • 未初始化的/未被使用的/未与集合绑定的符号 -
        -
      • 未被绑定的int string参数
      • -
      • 未被使用的 database 类型的参数
      • -
      • 函数体有语句,但是没有任何返回语句
      • -
      -
    • -
    • 在取非运算块中进行绑定的符号 -
        -
      • 例如 !(__tmp = 1)__tmp会被认为是未绑定的
      • -
      • 在取非运算块中调用 inline 函数或数据构造函数
      • -
      -
    • -
    -

    1. 未使用的 database/基础类型参数

    -

    函数代码块中,如果有一个语句分支没有使用参数中的database或者基础类型参数,则一定会导致ungrounded

    -
    fn test(db: JavaDB, a: int, b: string) -> bool {}
    -        ^^          ^       ^                  ^^
    -Error: ungrounded parameter "db, a, b" in this branch.
    -

    编译器会提示在哪一条执行分支中存在 unused paramemter,根据提示检查对应的执行路径,补全对 parameter 的约束即可。

    -

    存在某些函数,在调用的时候,参数虽然是基础类型,但是传入的都是字面量,那这时如果错误地报出了ungrounded,可以给该函数添加@inline注解,来避免错误的约束检测。

    -
    impl XXX {
    -    @inline
    -    fn getValueByAttributeNameByDefaultValue(self, attributeName: string) -> string {
    -        if (self.hasAttribute(attributeName)) {
    -            return self.getValueByAttributeName(attributeName)
    -        }
    -        if (!self.hasAttribute(attributeName)) {
    -            return "null"
    -        }
    -    }
    -}
    -
    -fn xxx() -> xx {
    -    ..
    -    attr.getValueByAttributeNameByDefaultValue("pattern")
    -                                               ^^^^^^^^^ 使用了字面量, 添加@inline来通过检测
    -}
    -

    2. 函数体有语句的情况下无返回语句

    -

    GödelScript 允许一个函数体不包含任何语句,即空函数体。但是如果函数体中有其他语句,则 GödelScript 会要求必须有至少一个返回语句,否则就会出现 ungrounded error。

    -
    fn test() -> int {}
    -                  ^^ 没有语句,可以通过编译
    -
    -fn test() -> int {
    -    let (a = 1) {}
    -    ^^^^^^^^^^^^^^ 有语句的情况下,没有返回语句,ungrounded
    -}
    -

    3. 取非运算块中使用 inline 函数或数据构造函数

    -

    上文提到了可以通过@inline注解来规避 ungrounded error。但是如果在取非运算中使用了含有该注解的函数,则必然会导致 ungrounded error。

    -

    同样,数据构造函数实际的作用就是对一个临时中间变量进行绑定,但是这会直接导致 ungrounded error。 -所以综上所述,在取非运算块中使用 inline 函数或者数据构造函数,必然会导致 ungrounded error,编译器会对所有类似的情况直接报错。

    -
    if (!check(method.to<ElementParent>())) {
    -           ^^^^^^^^^^^^^^^^^^^^^^^^^^ ungrounded
    -}
    -if (!check(ElementParent {id: 0})) {
    -           ^^^^^^^^^^^^^^ ungrounded
    -}
    -
    -@inline
    -fn for_test() -> ElementParent {
    -    ...
    -}
    -if (!check(for_test())) {
    -           ^^^^^^^^^^ 取非运算中存在 inline 函数,ungrounded
    -}
    -

    4. 对链式调用的取非运算

    -

    GödelScript 未对该情况执行ungrounded检测,但是该写法会导致在 Soufflé 中报ungrounded错误:

    -
    use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -fn get_field() -> *Field {
    -    for (field in Field(default_java_db())) {
    -        if (!field.getLocation().getFile().getRelativePath().contains("/test/")) {
    -            yield field
    -        }
    -    }
    -}
    -

    其中:

    -
    !field.getLocation().getFile().getRelativePath().contains("/test/")
    -

    实际会被翻译为类似如下的 Soufflé 代码片段:

    -
    !(__tmp = field, Field_getLocation(__tmp, __tmp_1), ..., contains("/test/", __tmp_4))
    -  ^^^^^                                   ^^^^^^^
    -

    其中用于中间存储的变量在!(...)中被绑定,但是由于取非操作符,这个绑定被认为是假设的,但是__tmp,__tmp_1却被认为是被声明出来整个语句范围内可见的变量,从而导致ungrounded

    -

    可以采取声明中间变量接住中间结果的方式来避免取非运算中的绑定操作:

    -
    fn get_field() -> *Field {
    -    for (field in Field(default_java_db())) {
    -        let (path = field.getLocation().getFile().getRelativePath()) {
    -            if (!path.contains("/test/")) {
    -                yield field
    -            }
    -        }
    -    }
    -}
    -

    查询示例

    -

    Java

    -

    未使用方法

    -
    // script
    -use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -// find unused methods
    -fn unused_method(unused: string) -> bool {
    -    for(c in Callable(default_java_db()), method in Callable(default_java_db()), caller in method.getCaller()) {
    -        if (c != caller && unused = method.getSignature()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(unused_method())
    -}
    -

    类继承关系

    -
    // script
    -use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -/**
    - * Find all class and the inheritances
    - * including parent class inheritance and ancestor class inheritance
    - */
    -fn class_hierarchy(className : string, superClassName : string) -> bool {
    -    for (c in Class(default_java_db()), ancestor in c.getAnAncestorClass()) {
    -        if (className = c.getQualifiedName() &&
    -            superClassName = ancestor.getQualifiedName()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() { 
    -    output(class_hierarchy())
    -}
    -

    类的所有方法信息

    -
    // script
    -use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -// Find all methods of the class
    -fn methods(className : string, methodName : string) -> bool {
    -    for (c in Class(default_java_db()), m in c.getAllMethods()) {
    -        if (className = c.getQualifiedName() &&
    -            methodName = m.getName()){
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() { 
    -    output(methods())
    -}
    -

    Python

    -

    获取函数圈复杂度

    -
    // script
    -use coref::python::*
    -
    -fn default_db() -> PythonDB {
    -    return PythonDB::load("coref_python_src.db")
    -}
    -
    -/**
    - * Get cyclomatic complexity of functions
    - *
    - * @param name   function name
    - * @param value  cyclomatic complexity of function
    - * @param path   path of file including this function
    - * @param sline  function start line
    - * @param eline  function end line
    - */
    -fn getCyclomaticComplexity(
    -    name: string,
    -    value: int,
    -    path: string,
    -    sline: int,
    -    eline: int) -> bool {
    -    // get metric function
    -    for (c in MetricFunction(default_db())) {
    -        if (path = c.getLocation().getFile().getRelativePath() &&
    -            name = c.getQualifiedName() &&
    -            value = c.getCyclomaticComplexity() &&
    -            sline = c.getLocation().getStartLineNumber() &&
    -            eline = c.getLocation().getEndLineNumber()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(getCyclomaticComplexity())
    -}
    -

    注释率统计

    -
    // script
    -use coref::python::*
    -
    -schema PublicVisitedElement extends CombineElement {}
    -
    -impl PublicVisitedElement {
    -    @data_constraint
    -    pub fn __all__(db: PythonDB) -> *PublicVisitedElement {
    -        for (tmp in Class(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -        for (tmp in Function(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -    }
    -}
    -
    -fn default_db() -> PythonDB {
    -    return PythonDB::load("coref_python_src.db")
    -}
    -
    -
    -// count number of total public element
    -fn countTotalPublicElement() -> int {
    -    return PublicVisitedElement(default_db()).len()
    -}
    -
    -// get public elements with Docstring comment
    -fn withDocstringCommentElement() -> *PublicVisitedElement {
    -    let (db = default_db()) {
    -        for (e in PublicVisitedElement(db), j in DocstringComment(db)) {
    -            if (e.key_eq(j.getDocumentableElement())) {
    -                yield e
    -            }
    -        }
    -    }
    -}
    -
    -// count number of public elements with Docstring comment
    -fn countTotalPublicDocumentedElement() -> int {
    -    return withDocstringCommentElement().len()
    -}
    -
    -fn withPublicDocumentedBelowElement() -> *PublicVisitedElement {
    -    let (db = default_db()) {
    -        for (e in PublicVisitedElement(db), j in Comment(db)) {
    -            if (e.key_eq(j.getDocumentedClassOrFunctionElement())) {
    -                yield e
    -            }
    -        }
    -    }
    -}
    -
    -// count number of public element with single line comment
    -fn countTotalPublicDocumentedBelowElement() -> int {
    -    return withPublicDocumentedBelowElement().len()
    -}
    -
    -
    -// calculate documented percentage
    -fn getDocumentedPercentage(documentedPercentage: int) -> bool {
    -    let (i = countTotalPublicElement(),
    -        j = countTotalPublicDocumentedElement(),
    -        k = countTotalPublicDocumentedBelowElement()) {
    -        if (i = 0) {
    -            if (documentedPercentage = -1) {
    -                return true
    -            }
    -        }
    -        if (i != 0) {
    -            if (documentedPercentage = (j + k) * 1000 / i) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(getDocumentedPercentage())
    -}
    -

    函数注释情况

    -
    // script
    -use coref::python::*
    -
    -schema PublicVisitedElement extends CombineElement {}
    -
    -impl PublicVisitedElement {
    -    @data_constraint
    -    pub fn __all__(db: PythonDB) -> *PublicVisitedElement {
    -        for (tmp in Class(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -        for (tmp in Function(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -    }
    -
    -    pub fn getName(self) -> string {
    -        let (tmp = Class(__all_data__).find(self)) {
    -            return tmp.getQualifiedName() 
    -        }
    -        let (tmp = Function(__all_data__).find(self)) {
    -            return tmp.getQualifiedName() 
    -        }
    -    }
    -}
    -
    -fn default_db() -> PythonDB {
    -    return PythonDB::load("coref_python_src.db")
    -}
    -
    -fn hasComment(e: PublicVisitedElement) -> bool {
    -    let (db = default_db()) {
    -        for (j in DocstringComment(db)) {
    -            if (e.key_eq(j.getDocumentableElement())) {
    -                return true
    -            }
    -        }
    -        for (j in Comment(db)) {
    -            if (e.key_eq(j.getDocumentedClassOrFunctionElement())) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -/**
    - * Get comment of each public element
    - *
    - * @param type          public visited element type
    - * @param name          public visited element name
    - * @param filePath      file path
    - * @param sline         element start line
    - * @param eline         element end line
    - * @param isCommented   if is commented
    - */
    -fn output_result(
    -    type: string,
    -    name: string,
    -    filePath: string,
    -    sline: int,
    -    eline: int,
    -    isCommented: int) -> bool {
    -    for (e in PublicVisitedElement(default_db())) {
    -        if (type = e.getType() && 
    -            name = e.getName() &&
    -            filePath = e.getLocation().getFile().getRelativePath() &&
    -            sline = e.getLocation().getStartLineNumber() &&
    -            eline = e.getLocation().getEndLineNumber()) {
    -            if (hasComment(e)) {
    -                if (isCommented = 1) {
    -                    return true
    -                }
    -            }
    -            if (!hasComment(e)) {
    -                if (isCommented = 0) {
    -                    return true
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(output_result())
    -}
    -

    JavaScript

    -

    AST Print

    -
    // script
    -use coref::javascript::*
    -
    -/**
    - * print AST
    - *
    - * @param filePath          file path
    - * @param parentId          parent node ID
    - * @param parentKind        parent node kind
    - * @param parentStartLine   parent node start line
    - * @param parentEndLine     parent node end line
    - * @param childId           child node ID
    - * @param childKind         child node kind
    - * @param childStartLine    child node start line
    - * @param childEndLine      child node end line
    - * @param index             child node index
    - */
    -fn out(
    -    filePath: string,
    -    parentId: int,
    -    parentKind: string,
    -    parentStartLine: int,
    -    parentEndLine: int,
    -    childId: int,
    -    childKind: string,
    -    childStartLine: int,
    -    childEndLine: int,
    -    index: int
    -) -> bool {
    -    let (db = JavascriptDB::load("coref_javascript_src.db")) {
    -        for (parent in Node(db),
    -            child in Node(db),
    -            parentSyntaxKind in SyntaxKind(),
    -            childSyntaxKind in SyntaxKind(),
    -            parentLocation in Location(db),
    -            childLocation in Location(db),
    -            file in File(db)) {
    -            if (parent.key_eq(child.getParent()) &&
    -                parentId = parent.id &&
    -                childId = child.id &&
    -                parentSyntaxKind.id = parent.getKind() &&
    -                childSyntaxKind.id = child.getKind() &&
    -                parentKind = parentSyntaxKind.getName() &&
    -                childKind = childSyntaxKind.getName() &&
    -                index = child.getIndex() &&
    -                parentLocation = parent.getLocation() &&
    -                childLocation = parent.getLocation() &&
    -                file = parentLocation.getFile() &&
    -                filePath = file.getRelativePath() &&
    -                parentStartLine = parentLocation.getStartLineNumber() &&
    -                parentEndLine = parentLocation.getEndLineNumber() &&
    -                childStartLine = childLocation.getStartLineNumber() &&
    -                childEndLine = childLocation.getEndLineNumber()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    圈复杂度

    -
    // script
    -use coref::javascript::*
    -
    -fn default_db() -> JavascriptDB {
    -    return JavascriptDB::load("coref_javascript_src.db")
    -}
    -
    -/**
    - * Output the cyclomatic complexity of each function
    - *
    - * @param filePath      file path
    - * @param functionName  function name
    - * @param complexity    cyclomatic complexity
    - * @param startLine     function start line
    - * @param endLine       function end line
    - */
    -fn out(filePath: string, functionName: string, complexity: int, startLine: int, endLine: int) -> bool {
    -    let (db = default_db()) {
    -        for (func in FunctionLikeDeclaration(db), file in File(db)) {
    -            if (complexity = func.getCyclomaticComplexity() &&
    -                functionName = func.getName() &&
    -                file = func.getLocation().getFile() &&
    -                filePath = file.getRelativePath() &&
    -                startLine = func.getLocation().getStartLineNumber() &&
    -                endLine = func.getLocation().getEndLineNumber()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Change Effect

    -
    // script
    -use coref::javascript::*
    -
    -fn default_db() -> JavascriptDB {
    -    return JavascriptDB::load("coref_javascript_src.db")
    -}
    -
    -fn getACallerFunction(function: FunctionLikeDeclaration, callerFunction: FunctionLikeDeclaration) -> bool {
    -    for (mayInvokeExpression in MayInvokeExpression(default_db())) {
    -        if (mayInvokeExpression = function.getACallSite() &&
    -            callerFunction = mayInvokeExpression.getEnclosingFunction()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn getAnEffectedFunction(function: FunctionLikeDeclaration, effectedFunction: FunctionLikeDeclaration) -> bool {
    -    if (getACallerFunction(function, effectedFunction)) {
    -        return true
    -    }
    -    for (callerFunction in FunctionLikeDeclaration(default_db())) {
    -        if (getACallerFunction(function, callerFunction) &&
    -            getAnEffectedFunction(callerFunction, effectedFunction)) {
    -            return true
    -        }
    -    }
    -}
    -
    -/**
    - * Query the effected functions according to the changed lines.
    - *
    - * @param function              the changed function id
    - * @param signature             the changed function signature
    - * @param functionPath          the changed function file path
    - * @param startLine             the changed function start line
    - * @param endLine               the changed function end line
    - * @param effectedFunction      the effected function id
    - * @param effectedSignature     the effected function signature
    - * @param effectedFunctionPath  the effected function file path
    - * @param effectedStartLine     the effected function start line
    - * @param effectedEndLine       the effected function end line
    - */
    -fn out(
    -    function: FunctionLikeDeclaration,
    -    signature: string,
    -    functionPath: string,
    -    startLine: int,
    -    endLine: int,
    -    effectedFunction: FunctionLikeDeclaration,
    -    effectedSignature: string,
    -    effectedFunctionPath: string,
    -    effectedStartLine: int,
    -    effectedEndLine: int
    -) -> bool {
    -    if (getAnEffectedFunction(function, effectedFunction)) {
    -        let (symbol = function.getSymbol(),
    -            effectedSymbol = effectedFunction.getSymbol(),
    -            location = function.getLocation(),
    -            effectedLocation = effectedFunction.getLocation()) {
    -            if (signature = symbol.getDescription() &&
    -                effectedSignature = effectedSymbol.getDescription() &&
    -                functionPath = location.getRelativePath() &&
    -                startLine = location.getStartLineNumber() &&
    -                endLine = location.getEndLineNumber() &&
    -                effectedFunctionPath = effectedLocation.getRelativePath() &&
    -                effectedStartLine = effectedLocation.getStartLineNumber() &&
    -                effectedEndLine = effectedLocation.getEndLineNumber()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    XML

    -

    获取 bean

    -
    // script
    -use coref::xml::*
    -
    -schema BeanXmlElement extends XmlElement {}
    -
    -impl BeanXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *BeanXmlElement {
    -        for (e in XmlElement(db)) {
    -            let (path = e.getLocation().getFile().getRelativePath()) {
    -                if (!path.contains("target") && e.getName() = "bean") {
    -                    yield BeanXmlElement {
    -                        id: e.id,
    -                        location_id: e.location_id,
    -                        parent_id: e.parent_id,
    -                        index_order: e.index_order
    -                    }
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema EntryXmlElement extends XmlElement {}
    -
    -impl EntryXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *EntryXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getName() = "entry") {
    -                yield EntryXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema PropertyXmlElement extends XmlElement {}
    -
    -impl PropertyXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *PropertyXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getName() = "property") {
    -                yield PropertyXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -fn default_db() -> XmlDB {
    -    return XmlDB::load("coref_xml_src.db")
    -}
    -
    -// get class name
    -fn getClassName(bean: BeanXmlElement) -> string {
    -    for (attr in bean.getAttribute()) {
    -        if (attr.getName() = "class") {
    -            return attr.getValue()
    -        }
    -    }
    -}
    -
    -// get key
    -fn getKey(e: EntryXmlElement) -> string {
    -    for (attr in e.getAttribute()) {
    -        if (attr.getName() = "key") {
    -            return attr.getValue()
    -        }
    -    }
    -}
    -
    -// output value and class info of the bean
    -fn output1(className: string, pName: string, kName: string) -> bool {
    -    let (db = default_db()) {
    -        for (bean in BeanXmlElement(db), p in PropertyXmlElement(db), e in EntryXmlElement(db)) {
    -            if (className = getClassName(bean) &&
    -                bean.key_eq(p.getParent()) &&
    -                p.key_eq(e.getParent().getParent()) &&
    -                pName = p.getName() &&
    -                kName = getKey(e)) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(output1())
    -}
    -

    POM

    -
    // script
    -use coref::xml::*
    -
    -schema DependencyElement extends XmlElement {}
    -
    -impl DependencyElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *DependencyElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "dependency") {
    -                yield DependencyElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema GroupElement extends XmlElement {}
    -
    -impl GroupElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *GroupElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "groupId") {
    -                yield GroupElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema VersionElement extends XmlElement {}
    -
    -impl VersionElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *VersionElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "version") {
    -                yield VersionElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema ArtifactElement extends XmlElement {}
    -
    -impl ArtifactElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *ArtifactElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "artifactId") {
    -                yield ArtifactElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema PomFile extends XmlFile {}
    -
    -impl PomFile {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *PomFile {
    -        for(f in XmlFile(db)) {
    -            if (f.getFileName() = "pom.xml") {
    -                yield PomFile {
    -                    id: f.id,
    -                    file_name: f.file_name,
    -                    relative_path: f.relative_path
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -// output relative path of the file, referenced jar name and version
    -fn out(fileName: string, m1: string, m2: string, m3: string) -> bool {
    -    let (db = XmlDB::load("coref_xml_src.db")) {
    -        for (f in PomFile(db),
    -            e1 in GroupElement(db),
    -            e2 in VersionElement(db),
    -            e3 in ArtifactElement(db),
    -            c1 in XmlCharacter(db),
    -            c2 in XmlCharacter(db),
    -            c3 in XmlCharacter(db),
    -            p in DependencyElement(db)) {
    -            if (f.key_eq(p.getLocation().getFile()) &&
    -                fileName = f.getRelativePath() &&
    -                p.key_eq(e1.getParent()) &&
    -                e1.key_eq(c1.getBelongedElement()) &&
    -                m1 = c1.getText() &&
    -                p.key_eq(e2.getParent()) &&
    -                e2.key_eq(c2.getBelongedElement()) &&
    -                m2 = c2.getText() &&
    -                p.key_eq(e3.getParent()) &&
    -                e3.key_eq(c3.getBelongedElement()) &&
    -                m3 = c3.getText()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    RPC

    -
    // script
    -use coref::xml::*
    -
    -// select XmlElement containing "mobileService"
    -schema MobileServiceXmlElement extends XmlElement{}
    -
    -impl MobileServiceXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *MobileServiceXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getElementName() = "mobileService") {
    -                yield MobileServiceXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -
    -    pub fn getServiceBeanValue(self) -> string {
    -        for (a in self.getAttribute()) {
    -            if (a.getName() = "serviceBean") {
    -                return a.getValue()
    -            }
    -        }
    -    }
    -}
    -
    -// select XmlElement containing "sofa:extension"
    -schema SofaExtensionXmlElement extends XmlElement{}
    -impl SofaExtensionXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *SofaExtensionXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getName() = "sofa:extension") {
    -                yield SofaExtensionXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -fn out(value: string) -> bool {
    -    let (db = XmlDB::load("coref_xml_src.db")) {
    -        for (m in MobileServiceXmlElement(db), s in SofaExtensionXmlElement(db), ancestor in m.getAnAncestor()) {
    -            if (s.key_eq(ancestor) && value = m.getServiceBeanValue()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Go

    -

    获取所有文件的基本信息

    -
    // script
    -use coref::go::*
    -
    -fn default_db() -> GoDB {
    -    return GoDB::load("coref_go_src.db")
    -}
    -/**
    - * @param name          file name
    - * @param funcCount     function/method quantity
    - * @param totallines    total lines of file
    - * @param codelines     code line of file
    - * @param commentlines  comment line of fine
    - * @param md5           md5 of this file
    - * @param sha256        sha256 of this file
    - */
    -fn out(
    -    name: string,
    -    funcCount: int,
    -    totallines: int,
    -    codelines: int,
    -    commentlines: int,
    -    md5: string,
    -    sha256: string) -> bool {
    -    for(f in File(default_db())) {
    -        if (name = f.getName() &&
    -            funcCount = f.getFunctionCount() &&
    -            md5 = f.getMd5Sum() &&
    -            sha256 = f.getSha256Sum() &&
    -            totallines = f.getLineInfo().getNumberOfTotalLines() &&
    -            codelines = f.getLineInfo().getNumberOfCodeLines() &&
    -            commentlines = f.getLineInfo().getNumberOfCommentLines()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    获取函数及其关联的注释

    -
    // script
    -use coref::go::*
    -
    -fn default_db() -> GoDB {
    -    return GoDB::load("coref_go_src.db")
    -}
    -
    -// Define a predicate called 'out' with parameters fileName, funcName, funcComment, and signature
    -fn out(fileName: string, funcName: string, funcComment: string, signature: string) -> bool {
    -    // Check if there exists a Function object 'func'
    -    for(func in Function(default_db())) {
    -        if (
    -            // Get the name of the file the function belongs to and assign it to the variable 'fileName'
    -            fileName = func.getBelongsFile().getName() &&
    -            // Get the name of the function and assign it to the variable 'funcName'
    -            funcName = func.getName() &&
    -            // Get the associated comment string for the function and assign it to the variable 'funcComment'
    -            funcComment = func.getAssociatedCommentString() &&
    -            // Get the function type signature and assign it to the variable 'signature'
    -            signature = func.getFunctionTypeSignature()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    获取函数圈复杂度

    -
    // script
    -use coref::go::*
    -
    -fn default_db() -> GoDB {
    -    return GoDB::load("coref_go_src.db")
    -}
    -
    -/**
    - * @param name: file name
    - * @param func: function name
    - * @param cmplx: function cyclomatic complexity
    - * @param sl,el,sc,ec: function location info
    - */
    -fn out(name: string, func: string, cmplx: int, sl: int, el: int) -> bool {
    -    for(f in GoFile(default_db()), function in Function(default_db())) {
    -        if ((!f.isAutoGenereatedFile()) &&
    -            f.key_eq(function.getBelongsFile()) &&
    -            name = f.getName() &&
    -            func = function.getName() &&
    -            cmplx = function.getCyclomaticComplexity() &&
    -            sl = function.getLocation().getStartLineNumber() &&
    -            el = function.getLocation().getEndLineNumber()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    查询调试和优化技巧

    -

    运行 GödelScript 脚本的时候,经常会出现运行时间超长的问题,这里提供一些基本判别方法和解决方案。

    -

    Schema 传参导致笛卡尔积过大

    -

    函数传参在没有@inline注解的情况下,默认是作为“限定”条件,而不是一个传入值存在。

    -

    例如下面的这个例子中,get获取到一个Class类型的传入参数,但是实际上最终的编译结果会类似下面的代码:

    -
    fn check(class: Class) -> bool {
    -    if (class.getName().contains("io")) {
    -        return true
    -    }
    -}
    -
    -// 实际的编译结果
    -fn check(class: Class) -> bool {
    -    // 实际上是要先拿 Class 全集
    -    for(__temp_class in Class::__all__(__all_data__)) {
    -        if (class = __temp_class ) {
    -            if (class.getName().contains("io")) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -

    所以在传参中 schema 类型很多时,会出现多个 schema 全集做笛卡尔积的情况,空间和时间开销急剧增加。 -解决方案也很简单,加一个@inline注解就可以:

    -
    @inline
    -fn check(class: Class) -> bool {
    -    if (class.getName().contains("io")) {
    -        return true
    -    }
    -}
    -
    -fn example() -> bool {
    -    for(class in Class(default_java_db())) {
    -        if (check(class)) {
    -            return true
    -        }
    -    }
    -}
    -
    -// inline 注解会强行在代码生成阶段将函数内联到语句中,避免多次加载表
    -// 实际的编译结果类似于
    -fn example() -> bool {
    -    for(class in Class(default_java_db())) {
    -        if (class.getName().contains("io")) {
    -            return true
    -        }
    -    }
    -}
    -

    多层 for 导致笛卡尔积过大

    -

    在一些情况下不可避免的会使用非常多层数的 for 来加载多表进行联查,导致笛卡尔积严重膨胀。可以通过提前减少 (过滤) 集合大小的方式来缩减笛卡尔积结果数量,例如:

    -
    fn getByIndex(self) -> Expression {
    -    let (db = default_java_db()) {
    -        for(e in Expression(db), p in Parameter(db)) {
    -            let (i = p.getIndex()) {
    -                if (e.key_eq(self.getValueByIndex(i))) {
    -                    return e
    -                }
    -            }
    -        }
    -    }
    -}
    -

    这个例子中,e, p 做笛卡尔积,导致中间过程占用时间太长。 -i 实际上是从 p 的一个方法中得到的集合,并且在实际使用中,这个集合非常小,远比 Parameter 全集小,所以可以把 i 集合的获取抽出来变成单独的函数,生成小集合,避免大集合之间笛卡尔积运算的同时,还保证了结果的等价:

    -
    fn getAllParameterIndex() -> *int {
    -    let (db = default_java_db()) {
    -        for (p in Parameter(db)) {
    -            yield p.getIndex()
    -        }
    -    }
    -}
    -
    -fn getByIndex(self) -> Expression {
    -    let (db = default_java_db()) {
    -        for(e in Expression(db), i in getAllParameterIndex()) {
    -            if (e.key_eq(self.getValueByIndex(i))) {
    -                return e
    -            }
    -        }
    -    }
    -}
    -

    e, p 的笛卡尔积就变成了 e, i 的笛卡尔积,从运算的层面来看,笛卡尔积开销变小,getIndex操作也被提前了,而不是在做笛卡尔积之后进行,所以性能大幅度提升。

    -

    不要滥用@inline/必须用@inline的优化策略

    -

    inline 函数的底层机制是在调用处展开,如果该函数不存在大量的 schema 传参,并且在很多位置都被调用,inline 可能会导致编译结果膨胀且重复计算次数指数级增加,有时反而不利于减少运行时间。 -如果存在必须要使用 inline 的情况 (比如规避ungrounded),但是使用之后反而出现运行速度变慢的情况,可以采取将内嵌语句拆分为 predicate 的方式来避免展开导致的编译结果膨胀。

    -

    下面的例子中,getValueByAttributeNameByDefaultValue为了避免attributeName被识别为ungrounded所以标注inline,后续在 if 分支中添加了一个条件语句,但是导致了执行时间从 3 秒变成 35 秒:

    -
    impl XmlElementBase {
    -  @inline
    -  fn getValueByAttributeNameByDefaultValue(self, attributeName: string) -> string {
    -    if (self.hasAttribute(attributeName)) {
    -      // return self.getValueByAttributeName(attributeName)
    -      // 更改为了如下语句:
    -      let(value = self.getValueByAttributeName(attributeName)) {
    -        if (value = "n/a") {
    -          return ""
    -        }
    -        if (value != "n/a") {
    -          return value
    -        }
    -      }
    -    }
    -    if (!self.hasAttribute(attributeName)) {
    -      return "null"
    -    }
    -  }
    -}
    -

    可以看到的是,增加了一层赋值和一层条件语句,在下文中,这个函数被调用了接近 20 次,导致了代码接近 20 次被重复展开,同时也造成了性能出现了一个数量级的差距。此时可以将更改的语句提取出来,由于提取出来的函数并没有使用复杂类型作为传参,所以不需要 inline 性能也没有损失,提取之后结果如下:

    -
    impl XmlElementBase {
    -  fn getTransValueByAttributeName(self, attributeName: string) -> string {
    -    let (value = self.getValueByAttributeName(attributeName)) {
    -      if (value = "n/a") {
    -        return ""
    -      }
    -      if (value != "n/a") {
    -        return value
    -      }
    -    }
    -  }
    -  @inline
    -  fn getValueByAttributeNameByDefaultValue(self, attributeName: string) -> string {
    -    if (self.hasAttribute(attributeName)) {
    -      return self.getTransValueByAttributeName(attributeName)
    -    }
    -    if (!self.hasAttribute(attributeName)) {
    -      return "null"
    -    }
    -  }
    -}
    -

    这样执行时间从 35 秒回到 3 秒,符合预期。

    -

    在本机使用查询脚本流程

    -

    参见安装、配置、运行

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-godellanguage/index.html b/docs/docs/codefuse-query-godellanguage/index.html deleted file mode 100644 index 1569259..0000000 --- a/docs/docs/codefuse-query-godellanguage/index.html +++ /dev/null @@ -1,2777 +0,0 @@ - - - - - - - - -GodelLanguage · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    GodelLanguage

    -
    -
    - - -

    GödelScript Query Language

    -

    Index

    - -

    Basic Concepts and Syntax of GödelScript

    -

    Introduction

    -
    // script
    -fn hello(greeting: string) -> bool {
    -    return greeting = "hello world!"
    -}
    -
    -fn main() {
    -    output(hello())
    -}
    -

    GödelScript, the Gödel query language, is a domain-specific language (DSL) for querying and data processing used by CodeQuery. GödelScript uses syntax similar to Rust, providing strict type checking, convenient type inference, and user-friendly error messages, allowing users to get started quickly.

    -

    Main use cases for the GödelScript compiler include:

    -
      -
    1. Writing simple or complex queries for users, offering more convenient syntax to improve query writing efficiency.
    2. -
    3. Providing strict type checking and type inference, offering smarter code modification suggestions.
    4. -
    5. Offering strict ungrounded detection to avoid triggering the common Soufflé Ungrounded Error.
    6. -
    7. Support for Language Server and IDE Extension.
    8. -
    -

    Basic Program Structure

    -

    Program Structure

    -

    A GödelScript program may include:

    - -

    An example containing all the above components:

    -
    // script
    -// Package import/symbol import
    -use coref::java::* // Import all symbols
    -use coref::java::{JavaDB, Class} // Selective symbol import
    -
    -// Function declaration
    -fn default_db() -> JavaDB {
    -    return JavaDB::load("example.db")
    -}
    -
    -// Schema declaration
    -schema File {
    -    @primary id: int
    -}
    -
    -// Database declaration
    -database NewDB {
    -    file: *File
    -}
    -
    -// Trait declaration
    -trait FileTrait {
    -    fn getId(self) -> int;
    -}
    -
    -// Impl trait for
    -impl FileTrait for File {
    -    fn getId(self) -> int {
    -        return self.id
    -    }
    -}
    -
    -// Impl
    -impl File {
    -    @data_constraint
    -    fn all() -> *File {
    -        yield File {id: 1}
    -        yield File {id: 2}
    -    }
    -}
    -
    -// Query
    -query get_all_anno from
    -    Annotation anno in Annotation(default_db())
    -select
    -    anno.id as id
    -

    Comments

    -

    GödelScript uses comment syntax similar to C-like languages.

    -
    // Single line comment
    -
    -/*
    -* 1. Multi-line comment
    -* 2. Multi-line comment
    -*/
    -

    The main Function

    -

    A GödelScript query script can include a main function, which has no return value. If the main function is not implemented and no query declarations are written, the program will not produce any output.

    -

    For more details, please refer to main function.

    -
    fn main() {
    -    output(query_1())
    -    output(query_2())
    -}
    -

    Basic Types and Built-in Compiler Functions

    -

    GödelScript includes basic types int, string, and bool. bool is a basic type but cannot be stored as a value.

    -

    int Type Native Functions

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    pow(int, int) -> intExponentiation. Arguments must be non-negative numbers.
    rem(int, int) -> intRemainder operation.
    bitand(int, int) -> intBitwise conjunction.
    bitor(int, int) -> intBitwise disjunction.
    bitxor(int, int) -> intBitwise exclusive disjunction.
    bitnot(int) -> intBitwise negation.
    neg(int) -> intArithmetic negation.
    to_string(int) -> stringConversion to a string.
    add(int, int) -> intAddition (+).
    sub(int, int) -> intSubtraction (-).
    mul(int, int) -> intMultiplication (*).
    div(int, int) -> intDivision (/).
    eq(int, int) -> boolEquality (=).
    ne(int, int) -> boolInequality (!=).
    gt(int, int) -> boolGreater than (>).
    ge(int, int) -> boolGreater than or equal to (>=).
    lt(int, int) -> boolLess than (<).
    le(int, int) -> boolLess than or equal to (<=).
    to_set(int) -> *intCast to a set type.
    -

    string Type Native Functions

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    len(string) -> intGets the length of a string.
    substr(string, int, int) -> stringSubstring extraction using initial index and length.
    contains(string, string) -> boolChecks if one string is contained within the current string.
    matches(string, string) -> boolChecks if a regular expression fully matches the current string.
    get_regex_match_result(string, string, int) -> stringGets a capture result from a full regex match on the current string, determined by the second parameter (int). For example, “abcdef”.get_regex_match_result(“a(.*)f”, 1) yields “bcde”.
    to_int(string) -> intConverts to an integer.
    add(string, string) -> stringString concatenation.
    eq(string, string) -> boolChecks string equality.
    ne(string, string) -> boolChecks string inequality.
    to_set(string) -> *stringCast to a set type.
    -

    bool Type Native Functions

    -

    While bool exists as a basic type, it cannot be used as data in intermediate calculations, only as a conditional result.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    not(bool) -> boolLogical negation.
    and(bool, bool) -> boolLogical conjunction.
    or(bool, bool) -> boolLogical disjunction.
    eq(bool, bool) -> boolEquality.
    ne(bool, bool) -> boolInequality.
    -

    Native Functions for Sets

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    len(*T) -> intGets the count of a data set.
    max(*int) -> intFinds the maximum value.
    min(*int) -> intFinds the minimum value.
    sum(*int) -> intSummation of the values.
    find(*T0) -> T1Finds a data entry from a set using a primary key.
    -

    Global Native Functions

    - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    output((…) -> bool) -> Outputs query content.
    -

    Database Native Functions

    - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    load(string) -> TLoads the database.
    -

    Schema Native Functions

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FunctionTypeExplanation
    to(self) -> TConverts to another schema type, using duck typing.
    is(self) -> boolDetermines if it can be another schema type, using duck typing. If the schema has a primary key, the underlying check will only use the primary key to determine compatibility.
    key_eq(self, T) -> boolChecks if the primary keys of two schema instances are equal.
    key_neq(self, T) -> boolChecks if the primary keys of two schema instances are not equal.
    -

    Schema native function example:

    -
    use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -fn example() -> bool {
    -    for(stmt in StatementParent(default_java_db())) {
    -        if (stmt.is<ElementParent>()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn convert() -> *ElementParent {
    -    for(stmt in StatementParent(default_java_db())) {
    -        yield stmt.to<ElementParent>()
    -    }
    -}
    -

    Functions

    -

    The main Function of GödelScript

    -

    The main function is the only function in GödelScript that does not declare a return type. The main function only allows the use of output, and other statements will result in a compilation error. Using output(…) multiple times can output multiple query results, which will be displayed in separate tables, with the table names corresponding to the names of the query functions called within output.

    -

    Query Functions

    -

    Query functions are recommended to have a bool return type and need to use output() to output query results.

    -

    The query functions called within output() are no longer invoked in the conventional manner of passing arguments to functions. At this point, the parameter list changes to represent the table schema of the output table. Here are two examples of how query functions are applied:

    -
      -
    1. -

      Single-table output

      -

      A single-table output specifically refers to using output only once within the main function to produce output.

      -
      fn example(a: int, b: string) -> bool {...}
      -
      -fn main() {
      -    output(example()) // At this point, the parameter list becomes the output table schema and requires no arguments
      -}
      -

      The corresponding output table schema would be:

      -
      [
      -    {"a": 0, "b": "xxx"},
      -    {"a": 1, "b": "xxx"}
      -]
      -
    2. -
    3. -

      Multi-table output

      -

      A multi-table output refers to using output multiple times within the main function to produce output. In this case, the output data will include corresponding table names.

      -
      fn example0(a: int, b: string) -> bool {...}
      -fn example1(a: string, b: int) -> bool {...}
      -
      -fn main() {
      -    output(example0())
      -    output(example1())
      -}
      -

      The corresponding output table schema would be:

      -
      {
      -    "example0":[
      -        {"a": 0, "b": "xxx"},
      -        {"a": 1, "b": "xxx"}
      -    ],
      -    "example1":[
      -        {"a": "xxx", "b": 0},
      -        {"a": "xxx", "b": 1}
      -    ]
      -}
      -
    4. -
    -

    Below is a more detailed example where we directly construct two sets of data for output. In the following code, note that:

    -
      -
    1. -

      In GödelScript, boolean values can be represented with the keywords true and false.

      -
    2. -
    3. -

      The = symbol in GödelScript is quite special and should not be interpreted in the same way as in conventional programming languages. GödelScript is a Datalog language. Here, the = symbol carries dual semantics: both assignment and equality comparison. Details can be found in = operator.

      -
    4. -
    5. -

      In the conditional statements of this example, both a and b use the assignment semantics of =, because the int and string type parameters are considered ungrounded (unassigned/unbound) within the function body and must be assigned before they can be used.

      -
    6. -
    7. -

      The return value of the = assignment statement is true.

      -
    8. -
    -
    fn example(a: int, b: string) -> bool {
    -    // The = symbol serves both assignment and comparison purposes, depending on whether the left-hand value has been "assigned"
    -    // Here, the = symbols for a and b are used with assignment semantics
    -    if (a = 1 && b = "1") {
    -        // GödelScript uses the keywords true and false to represent boolean values
    -        return true
    -    }
    -    if (a = 2 && b = "2") {
    -        return true
    -    }
    -}
    -
    -fn main() {
    -    output(example())
    -}
    -

    The expected output should be:

    -
    [
    -    {"a": 1, "b": "1"},
    -    {"a": 2, "b": "2"}
    -]
    -

    Regular Functions

    -

    Regular functions are used to encapsulate complex processes, and these functions must have a clear return type. -There are two possible return types:

    -
      -
    1. A single return value, followed by a declaration of the return type after the arrow.
    2. -
    -
    fn getFile(c: Class) -> File {
    -    return c.getRelativePath()
    -}
    -
      -
    1. A set of return values, the return type after the arrow needs to be prefixed with * to indicate it’s a set.
    2. -
    -
    fn getAllFiles(db: JavaDB) -> *File {
    -    for (f: File in File(db)) {
    -        yield f
    -    }
    -}
    -

    Generally, return is used for functions with a single return value, while yield is used for functions returning a set. -In practice, since GödelScript uses the Datalog engine underneath, all operations are based on sets; a single return value actually only means that the returned set may contain only one data item, but it could also contain multiple items.

    -

    Statements

    -

    for Statement: Declaring Variables from a Set

    -

    GödelScript uses the for keyword and syntax similar to loop statements to declare variables from a set:

    -
    for(f: File in getAllFiles()) {
    -    ...
    -}
    -

    The type after the colon for f: File can be omitted. -The for statement allows the direct definition of multiple variables, where subsequent variables can use all previously defined variables in the same statement during initialization:

    -
    for(a in XmlAttribute(db), b in XmlAttribute(db), c in XmlElement(db)) {
    -    ...
    -}
    -
    -for(a in getAllFiles(), b in a.getAllPaths()) {
    -    ...
    -}
    -

    let Statement: Declaring a Single Variable

    -

    GödelScript uses the let keyword to declare a single/intermediate variable:

    -
    let(f: File = c.getRelativePath()) {
    -    ...
    -}
    -

    The type after the colon for f: File can be omitted. -The let statement allows the direct definition of multiple variables, where subsequent variables can use all previously defined variables in the same statement during initialization:

    -
    let(a = 1, b = a + 1, c = b + 1) {
    -    ...
    -}
    -

    if Statement

    -

    Conditional statements in GödelScript are similar to many procedural programming languages:

    -
    if (f.getName().contains("util") || f.getName().contains("com")) {
    -    ...
    -}
    -

    Conditions can be connected using logical operators: ! for NOT, || for OR, and && for AND.

    -

    Comparative operators in conditions: > for greater than, < for less than, >= for greater than or equal to, <= for less than or equal to, = for equal to or assignment, != for not equal to.

    -

    Regular arithmetic operations can use the following operators: + for addition, - for subtraction/negation, * for multiplication, / for division.

    -
    Assignment and Equality Comparison Operator =
    -

    The = symbol in GödelScript carries two different semantics: assignment and equality comparison. The specific semantics need to be discussed based on the context:

    -
      -
    1. -

      Assignment

      -

      Assignment generally occurs with fundamental type variables such as int and string. These variables, when used as function parameters, are typically considered unassigned. When a function with such variables is called, the parameters passed in actually serve as filtering conditions.

      -
      fn example(a: int) -> bool {
      -    // This is somewhat counterintuitive; in procedural languages, this is usually taken to mean a == 1
      -    // However, in Datalog dialects, each function in Datalog is essentially calculating an intermediate table (view)
      -    // So this function is essentially generating a view with data [{"a": 1}]
      -    return a = 1 // assign a = 1
      -}
      -
      -fn test() -> bool {
      -    // Although it seems like we are passing a parameter to make a = 2, it's not really the case
      -    // example() itself returns the view: [{"a": 1}]
      -    // Then it is constrained by a = 2, and as you can see, we don't get any result here
      -    // So it returns false
      -    return example(2) // false
      -}
      -
    2. -
    3. -

      Equality Comparison

      -

      For schema types, since each schema type has a universe behind it, schema type parameters in the parameter list are generally considered to have been assigned. For variables that have already been assigned, = operates as an equality comparison.

      -
      // Declare schema
      -schema A {...}
      -
      -// Implement schema member functions
      -impl A {
      -    // Here we define the universe for schema A
      -    @data_constraint
      -    pub fn __all__() -> *A {...}
      -}
      -
      -fn example(a: A) -> bool {
      -    for(temp in A::__all__()) {
      -        if (a = temp) {
      -            return true
      -        }
      -    }
      -}
      -

      Similarly, for internally declared int or string with initial values, = also operates as an equality comparison.

      -
      fn example() -> bool {
      -    let (a = 1) { // assign a = 1
      -        if (a = 1) { // compare a = 1
      -            return true
      -        }
      -    }
      -}
      -
    4. -
    -

    match Statement

    -

    GödelScript allows writing match statements for int and string types. A match statement is similar to a switch statement with multiple conditional branches, and the conditions in the match must be literals:

    -
    match(a) {
    -    1 => return 0,
    -    2 => return 1,
    -    3 => if (a + 1 < 10) {
    -        return 10
    -    }
    -}
    -

    Return Statements

    -

    GödelScript uses return and yield. return is for functions with a single return value, and yield is for returning sets.

    -
    fn a() -> int {
    -    return 0
    -}
    -
    -fn b() -> *int {
    -	yield 1
    -	yield 2
    -	yield 3
    -}
    -

    Schema

    -

    Schema is a structure for complex data tables in GödelScript.

    -

    Structure Declaration

    -

    GödelScript uses the schema keyword to declare a table structure:

    -
    schema File {
    -    id: int,
    -    name: string
    -}
    -

    If a field exists as a primary key in the database, you can use the @primary annotation to indicate that it’s a primary key:

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -

    Table structures with a primary key significantly improve query speed, so try to bind a primary key, preferably of type int.

    -

    Method Implementation

    -

    GödelScript declares and implements methods related to schema as follows:

    -
    impl File {
    -    // Static method
    -    fn f1() -> ... {...}
    -	// Member method, the first argument must be self
    -	fn f2(self) -> ... {...}
    -	...
    -}
    -
    Static Methods
    -

    Static methods do not require self as the first argument and are straightforward to use: ClassName::MethodName(...).

    -
    impl File {
    -    fn getSchemaName() -> string {
    -        return "File"
    -    }
    -}
    -
    -fn out(t: string) -> bool {
    -    if (t = File::getSchemaName()) {
    -        return true
    -    }
    -}
    -
    Member Methods
    -

    The first argument for member methods must be self, without specifying its type. These functions are called using InstanceName.FunctionName(...).

    -
    impl File {
    -    fn getName(self) -> string {
    -        return self.name
    -    }
    -}
    -
    -fn out(path: string) -> bool {
    -    let (db = JavaDB::load("coref_java_src.db")) {
    -        for (f in File::__all__(db)) {
    -            if (path = f.getName()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    Data Loading Method fn __all__(db)
    -

    A schema can contain a special static method for loading its dataset from the database.

    -
    impl File {
    -    @data_constraint
    -    fn __all__(db: JavaDB) -> *File {
    -        ...
    -    }
    -}
    -

    This method must contain the special annotation @data_constraint, indicating that it is specialized for loading. Without this annotation, the method will return an empty set. The return type must be a set of itself.

    -

    A schema that includes this method can use syntactic sugar to get its full set:

    -
    fn out() -> bool {
    -    for(f in File(JavaDB::load("..."))) {
    -        ...
    -    }
    -    ...
    -}
    -// Equivalent to
    -fn out() -> bool {
    -    for(f in File::__all__(JavaDB::load("..."))) {
    -        ...
    -    }
    -    ...
    -}
    -
    Custom Full Set Method
    -

    A schema allows using static methods with different names than __all__ to indicate that some sets also exist within its full set. This method must also contain the special annotation @data_constraint. This method is generally used to manually add some data to the full set of that type.

    -
    impl File {
    -    @data_constraint
    -    fn extend_example() -> *File {
    -        yield File {id: 1234567}
    -    }
    -}
    -

    Constructing Anonymous Instances

    -

    GödelScript allows for the creation of anonymous instances with a specific syntax. The creation of anonymous instances is contingent on the instance existing within the full set of the schema, unless this usage appears within a @data_constraint method, in which case the result will be empty.

    -
    schema A {
    -    @primary id: int,
    -    name: string
    -}
    -

    The corresponding syntax to create an anonymous instance is as follows:

    -
    A {id: 1, name: "first"}
    -

    Schema Inheritance

    -

    Schema inheritance in GödelScript is very straightforward, exemplified as follows:

    -
    schema MyFile extends File {}
    -
    Parent Field Inheritance
    -

    The subclass will inherit all fields from the parent class by default, so there is no need to manually rewrite them.

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -
    -schema MyFile extends File {}
    -
    Parent Method Inheritance
    -

    The subclass will inherit all methods from the parent class by default, except for those marked with @data_constraint. There is no need to manually rewrite them. However, the __all__ method is special and will not be inherited, so you need to rewrite the __all__ method to determine the full set of the inherited schema.

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -
    -impl File {
    -    @data_constraint
    -    fn __all__() -> *File {...}
    -	fn getId(self) -> int {...}
    -    fn staticMethod() -> string {return "File"}
    -}
    -
    -schema MyFile extends File {}
    -
    Method Override
    -

    If the subclass implementation contains a method with the same name as the parent class, the parent method will be overridden by the subclass method.

    -
    schema File {
    -    @primary id: int,
    -    name: string
    -}
    -
    -impl File {
    -    fn staticMethod() -> string {return "File"}
    -}
    -
    -schema MyFile extends File {}
    -
    -impl MyFile {
    -    fn staticMethod() -> string {return "MyFile"}
    -}
    -

    In this case, File::staticMethod is overridden by MyFile::staticMethod, so when calling the subclass method, the result obtained will be "MyFile".

    -

    Database

    -

    Database Declaration

    -

    The declaration format for databases is as follows:

    -
    database DatabaseName {
    -    // table_name corresponds to the real table name in the db
    -    // GodelSchemaType corresponds to the schema in which the table data is stored after reading into godel
    -    table_name : *GodelSchemaType
    -}
    -

    Before the colon is the real table name in the loaded database; after the colon is the data table format, which must be a schema type. -For example, if a table called annotation exists in the db and corresponds to the Annotation schema, the declaration would be:

    -
    database JavaDB {
    -    // Reads data from the db's annotation table and stores it in Annotation
    -    annotation : *Annotation
    -}
    -

    Additionally, it is necessary to ensure that the Annotation structure matches the table structure. For example:

    -
    schema Annotation {
    -    @primary id: int, // The primary annotation indicates that this field is the primary key; a table can also have no primary key
    -    content: string
    -}
    -

    The annotation table must contain id and content fields with corresponding storage types.

    -

    Database Loading

    -

    Database types have a static method (database)::load(filename: string)

    -
    fn loadDatabaseExample() -> bool {
    -    // The string passed to load is the db's filename, not the path
    -    // The db's path will be passed as a command-line argument when executing godel
    -    let (db: JavaDB = JavaDB::load("...")) {
    -        ...
    -    }
    -}
    -

    Data Table Access

    -

    In the example above, to access the annotation table:

    -
    fn getAnnotation() -> Annotation {
    -    // The string passed to load is the db's filename, not the path
    -    // The db's path will be passed as a command-line argument when executing godel
    -    let (db: JavaDB = JavaDB::load("...")) {
    -        // Directly use db.field to access the table data
    -        for (anno: Annotation in db.annotation) {
    -            ...
    -        }
    -    }
    -}
    -

    Trait

    -

    Trait Declaration

    -

    The syntax for declaring a trait is as follows:

    -
    trait Example {
    -    fn getId(self) -> int;
    -    fn getName(self) -> string;
    -    fn getValueByName(self, name: string) -> string;
    -}
    -

    Impl Trait

    -

    The syntax is similar to impl, but you must implement all the functions declared in the trait to pass compilation.

    -
    impl Example for XmlElement {
    -    fn getId(self) -> int {return self.id}
    -    fn getName(self) -> int {return self.name}
    -    fn getValueByName(self, name: string) -> int {
    -        for(attr in XmlAttribute(XmlDB::load("...")) {
    -            if (attr.getName() = name && attr.id = self.getAttribute().id) {
    -                return attr.getValue()
    -            }
    -        }
    -    }
    -}
    -

    Import

    -

    GödelScript uses the use keyword to import symbols from other files:

    -
    use coref::java::* // Import all symbols
    -use coref::xml::Location // Import a single symbol
    -use coref::xml::{XmlDB, XmlElement} // Import multiple symbols
    -

    Module Import Rules

    -

    The GödelScript package manager is enabled when the input parameters include -p {package dir path}.

    -

    The package manager will parse the folder structure, traversing all .gdl files. After obtaining the relative path of the files, it will map the path to the corresponding package path. If the relative path contains -, or if a folder name or filename starts with a digit, the path will not be accepted by the package manager, but it will not issue an error and will simply ignore it.

    -

    If you want to know which paths were ignored, you can use the -v parameter. With this parameter, the package manager will report the ignored paths as warnings. If there are path conflicts in the mapped paths, the package manager will report them as errors and exit the compilation process.

    -
    packages:
    -  coref::cfamily    -> /.../Library/coref.cfamily.gdl
    -  coref::go         -> /.../Library/coref.go.gdl
    -  coref::java       -> /.../Library/coref.java.gdl
    -  coref::javascript -> /.../Library/coref.javascript.gdl
    -  coref::properties -> /.../Library/coref.properties.gdl
    -  coref::python     -> /.../Library/coref.python.gdl
    -  coref::sql        -> /.../Library/coref.sql.gdl
    -  coref::xml        -> /.../Library/coref.xml.gdl
    -modules
    -  +--coref -> coref
    -     |--xml -> coref::xml
    -     |--properties -> coref::properties
    -     |--cfamily -> coref::cfamily
    -     |--java -> coref::java
    -     |--javascript -> coref::javascript
    -     |--go -> coref::go
    -     |--sql -> coref::sql
    -     +--python -> coref::python
    -

    Path Mapping Example

    -
    Library
    -|-- coref.java.gdl
    -|-- coref.xml.gdl
    -+-- coref
    -    |-- go.gdl
    -    +-- a
    -        +-- b.gdl
    -=>
    -coref::java
    -coref::xml
    -coref::go
    -coref::a::b
    -

    In this example, there is a path conflict:

    -
    Library
    -|-- coref
    -|   |-- java.gdl
    -|   +-- python.gdl
    -+-- coref.python.gdl
    -=>
    -coref::java
    -coref::python -- \
    -                  > Conflict
    -coref::python -- /
    -

    In this example, there are invalid characters in the path:

    -
    Library
    -|-- 0123.gdl
    -|-- my-godel-lib
    -|   +-- js.gdl
    -+-- lib-file.123.gdl
    -=>
    -0123
    -^ The first character is a digit
    -my-godel-lib::js
    -  ^     ^ Uses the `-` character
    -lib-file::123
    -   ^      ^ First character after `.` is a digit, and the path contains `-`
    -

    Symbol Conflict

    -

    In use, it’s possible to encounter situations with symbol conflicts. In such cases, direct use of File will result in a symbol conflict, and you need to specify one of the symbols.

    -
    use coref::java::Location
    -use coref::xml::Location
    -schema MyLoc extends Location {}
    -                     ^^^^^^^^
    -Error: "Location" is ambiguous, with multiple symbols
    -       "coref::java::Location, coref::xml::Location".
    -

    Like other languages, GödelScript allows specifying a symbol directly through its full path, provided the symbol has been imported.

    -
    use coref::java::Location
    -use coref::xml::Location
    -schema MyLoc extends coref::xml::Location {}
    -

    Full path symbols can be used in the following situations:

    -
      -
    • Schema inheritance
    • -
    -
    schema JavaLocation extends coref::java::Location {}
    -
      -
    • Function parameters and return values
    • -
    -
    fn return_java_file(f: coref::java::File) -> coref::java::File {
    -    ...
    -}
    -
      -
    • Database declarations
    • -
    -
    database MyDB {
    -    java_file: coref::java::File,
    -    xml_file: coref::xml::File,
    -    java_loc: coref::java::Location,
    -    xml_loc: coref::xml::Location
    -}
    -
      -
    • Query list type declarations
    • -
    -
    query example from
    -	coref::java::Location loc in coref::java::Location(coref::java::JavaDB::load("..."))
    -where
    -	...
    -select
    -	...
    -
      -
    • Schema static method calls
    • -
    -
    for(loc in coref::java::Location(coref::java::JavaDB::load("..."))) {
    -    ...
    -}
    -
    -stmt.to<coref::java::ElementParent>()
    -stmt.is<coref::java::ElementParent>()
    -

    Query

    -

    Query is used for simple queries and is guaranteed to be output even without declaring a main function. The syntax format for query is as follows:

    -
    query name from
    -	variable in initial value,
    -    variable in initial value,
    -    variable in initial value
    -where condition
    -select value as output column name
    -	value as output column name,
    -    value as output column name,
    -    value as output column name
    -

    Variable declarations in the from list do not need type annotations, as the compiler will automatically infer them. Additionally, the select list does not use = but the in keyword. Also, in the select list, the output column name cannot conflict with the calculation variables, but the column name can be omitted. Omitted column names will take random names in the output results, so it’s best not to omit them.

    -

    Here is a hello world written in query syntax:

    -
    query hello_world from
    -	info in "hello world"
    -select info as greeting
    -

    The code above is equivalent to the following code:

    -
    fn hello_world(greeting: string) -> bool {
    -    let (info = "hello world") {
    -        if (greeting = info) {
    -            return true
    -        }
    -    }
    -}
    -fn main() {
    -    output(hello_world())
    -}
    -

    Example and Composition Structure

    -

    Query includes a query name, a from list, a where filter condition, and a select list.

    -
    // script
    -use coref::java::{Callable, Class, Interface, JavaDB}
    -
    -fn db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -query class_method from
    -    Callable m in Callable(db()),
    -    Class c in Class(db())
    -where
    -    c.id = m.getBelongedClass().id
    -select
    -    c.getQualifiedName() as className,
    -    m.getName() as methodName,
    -    m.getSignature() as methodSignature
    -

    Equivalent Code

    -

    The example above is equivalent to the following code:

    -
    // script
    -use coref::java::{Callable, Class, Interface, JavaDB}
    -
    -fn db() -> JavaDB {
    -  return JavaDB::load("coref_java_src.db")
    -}
    -
    -fn main() {
    -  output(class_method())
    -}
    -
    -fn class_method(className: string, methodName: string, methodSignature: string) -> bool {
    -  for (m in Callable(db()), c in Class(db())) {
    -    if (c.id = m.getBelongedClass().id) {
    -      if (className = c.getQualifiedName() &&
    -          methodName = m.getName() &&
    -          methodSignature = m.getSignature()) {
    -        return true
    -      }
    -    }
    -  }
    -}
    -

    Ungrounded Error

    -

    GödelScript will determine symbols that are not bound to a set as ungrounded. The basic rule of judgment is:

    -
      -
    • Uninitialized/unusued/unbound symbols -
        -
      • Unbound int, string arguments
      • -
      • Unused database type arguments
      • -
      • Function body has statements, but no return statements
      • -
      -
    • -
    • Symbols bound within negation blocks -
        -
      • For example, !(__tmp = 1), __tmp is considered unbound
      • -
      • Calling inline functions or data constructors in negation blocks
      • -
      -
    • -
    -

    1. Unused Database/Basic Type Parameters

    -

    In the function block, if there is a branch that does not use database or basic type parameters, it will inevitably lead to ungrounded:

    -
    fn test(db: JavaDB, a: int, b: string) -> bool {}
    -        ^^          ^       ^                  ^^
    -Error: ungrounded parameter "db, a, b" in this branch.
    -

    The compiler will indicate in which branch there is an unused parameter. Check the corresponding execution path and complete the parameter constraints based on the prompt.

    -

    If some functions have basic type parameters but always use literals when called, and if ungrounded is incorrectly reported, you can add an @inline annotation to the function to avoid incorrect constraint checks.

    -
    impl XXX {
    -    @inline
    -    fn getValueByAttributeNameByDefaultValue(self, attributeName: string) -> string {
    -        if (self.hasAttribute(attributeName)) {
    -            return self.getValueByAttributeName(attributeName)
    -        }
    -        if (!self.hasAttribute(attributeName) {
    -            return "null"
    -        }
    -    }
    -}
    -
    -fn xxx() -> xx {
    -    ..
    -    attr.getValueByAttributeNameByDefaultValue("pattern")
    -                                               ^^^^^^^^^ Use literals, add @inline to pass the check
    -}
    -

    2. No Return Statement in Non-Empty Function Body

    -

    GödelScript allows an empty function body without any statements. However, if there are other statements in the function body, GödelScript requires at least one return statement, otherwise an ungrounded error will occur.

    -
    fn test() -> int {}
    -                  ^^ No statements, passes compilation
    -
    -fn test() -> int {
    -    let (a = 1) {}
    -    ^^^^^^^^^^^^^^ Statements present, no return statement, ungrounded
    -}
    -

    3. Using Inline Functions or Data Constructors in Negation Blocks

    -

    As mentioned above, @inline annotation can be used to circumvent ungrounded errors. However, if inline functions are used in negation blocks, it will inevitably result in ungrounded errors.

    -

    Similarly, data constructors are used to bind temporary intermediate variables, but this will directly result in ungrounded errors. -Therefore, using inline functions or data constructors in negation blocks will inevitably lead to ungrounded errors, and the compiler will report errors for all such cases.

    -
    if (!check(method.to<ElementParent>())) {
    -           ^^^^^^^^^^^^^^^^^^^^^^^^^^ ungrounded
    -}
    -if (!check(ElementParent {id: 0})) {
    -           ^^^^^^^^^^^^^^ ungrounded
    -}
    -
    -@inline
    -fn for_test() -> ElementParent {
    -    ...
    -}
    -if (!check(for_test())) {
    -           ^^^^^^^^^^ Negation block contains inline function, ungrounded
    -}
    -

    4. Negation of Chained Calls

    -

    GödelScript does not perform ungrounded checks for negation of chained calls, but this writing will cause an ungrounded error in Soufflé:

    -
    use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -fn get_field() -> *Field {
    -    for (field in Field(default_java_db())) {
    -        if (!field.getLocation().getFile().getRelativePath().contains("/test/")) {
    -            yield field
    -        }
    -    }
    -}
    -

    Where:

    -
    !field.getLocation().getFile().getRelativePath().contains("/test/")
    -

    It will be translated to a Soufflé code fragment like this:

    -
    !(__tmp = field, Field_getLocation(__tmp, __tmp_1), ..., contains("/test/", __tmp_4))
    -  ^^^^^                                   ^^^^^^^
    -

    The variables used for intermediate storage being bound in !(...) but due to the negation operator, this binding is considered hypothetical. However, __tmp, __tmp_1 are then considered to be variables declared for the entire statement scope, leading to ungrounded.

    -

    This can be avoided by declaring intermediate variables to catch intermediate results in a negation operation:

    -
    fn get_field() -> *Field {
    -    for (field in Field(default_java_db())) {
    -        let (path = field.getLocation().getFile().getRelativePath()) {
    -            if (!path.contains("/test/")) {
    -                yield field
    -            }
    -        }
    -    }
    -}
    -

    Query Examples

    -

    Java

    -

    Unused Methods

    -
    // script
    -use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -// find unused methods
    -fn unused_method(unused: string) -> bool {
    -    for(c in Callable(default_java_db()), method in Callable(default_java_db()), caller in method.getCaller()) {
    -        if (c != caller && unused = method.getSignature()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(unused_method())
    -}
    -

    Class Inheritance Relationship

    -
    // script
    -use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -	return JavaDB::load("coref_java_src.db")
    -}
    -
    -/**
    - * Find all class and the inheritances
    - * including parent class inheritance and ancestor class inheritance
    - */
    -fn class_hierarchy(className : string, superClassName : string) -> bool {
    -    for (c in Class(default_java_db()), ancestor in c.getAnAncestorClass()) {
    -        if (className = c.getQualifiedName() &&
    -            superClassName = ancestor.getQualifiedName()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() { 
    -	output(class_hierarchy())
    -}
    -

    Querying All Methods in a Class

    -
    // script
    -use coref::java::*
    -
    -fn default_java_db() -> JavaDB {
    -	return JavaDB::load("coref_java_src.db")
    -}
    -
    -// Find all methods of the class
    -fn methods(className : string, methodName : string) -> bool {
    -    for (c in Class(default_java_db()), m in c.getAllMethods()) {
    -        if (className = c.getQualifiedName() &&
    -            methodName = m.getName()){
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() { 
    -	output(methods())
    -}
    -

    Python

    -

    Cyclomatic Complexity

    -
    // script
    -use coref::python::*
    -
    -fn default_db() -> PythonDB {
    -    return PythonDB::load("coref_python_src.db")
    -}
    -
    -/**
    - * Get cyclomatic complexity of functions
    - *
    - * @param name   function name
    - * @param value  cyclomatic complexity of function
    - * @param path   path of file including this function
    - * @param sline  function start line
    - * @param eline  function end line
    - */
    -fn getCyclomaticComplexity(
    -    name: string,
    -    value: int,
    -    path: string,
    -    sline: int,
    -    eline: int) -> bool {
    -    // get metric function
    -    for (c in MetricFunction(default_db())) {
    -        if (path = c.getLocation().getFile().getRelativePath() &&
    -            name = c.getQualifiedName() &&
    -            value = c.getCyclomaticComplexity() &&
    -            sline = c.getLocation().getStartLineNumber() &&
    -            eline = c.getLocation().getEndLineNumber()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(getCyclomaticComplexity())
    -}
    -

    Comment Rate

    -
    // script
    -use coref::python::*
    -
    -schema PublicVisitedElement extends CombineElement {}
    -
    -impl PublicVisitedElement {
    -    @data_constraint
    -    pub fn __all__(db: PythonDB) -> *PublicVisitedElement {
    -        for (tmp in Class(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -        for (tmp in Function(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -    }
    -}
    -
    -fn default_db() -> PythonDB {
    -    return PythonDB::load("coref_python_src.db")
    -}
    -
    -
    -// count number of total public element
    -fn countTotalPublicElement() -> int {
    -    return PublicVisitedElement(default_db()).len()
    -}
    -
    -// get public elements with Docstring comment
    -fn withDocstringCommentElement() -> *PublicVisitedElement {
    -    let (db = default_db()) {
    -        for (e in PublicVisitedElement(db), j in DocstringComment(db)) {
    -            if (e.key_eq(j.getDocumentableElement())) {
    -                yield e
    -            }
    -        }
    -    }
    -}
    -
    -// count number of public elements with Docstring comment
    -fn countTotalPublicDocumentedElement() -> int {
    -    return withDocstringCommentElement().len()
    -}
    -
    -fn withPublicDocumentedBelowElement() -> *PublicVisitedElement {
    -    let (db = default_db()) {
    -        for (e in PublicVisitedElement(db), j in Comment(db)) {
    -            if (e.key_eq(j.getDocumentedClassOrFunctionElement())) {
    -                yield e
    -            }
    -        }
    -    }
    -}
    -
    -// count number of public element with single line comment
    -fn countTotalPublicDocumentedBelowElement() -> int {
    -    return withPublicDocumentedBelowElement().len()
    -}
    -
    -
    -// calculate documented percentage
    -fn getDocumentedPercentage(documentedPercentage: int) -> bool {
    -    let (i = countTotalPublicElement(),
    -        j = countTotalPublicDocumentedElement(),
    -        k = countTotalPublicDocumentedBelowElement()) {
    -        if (i = 0) {
    -            if (documentedPercentage = -1) {
    -                return true
    -            }
    -        }
    -        if (i != 0) {
    -            if (documentedPercentage = (j + k) * 1000 / i) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(getDocumentedPercentage())
    -}
    -

    Comments in a Method

    -
    // script
    -use coref::python::*
    -
    -schema PublicVisitedElement extends CombineElement {}
    -
    -impl PublicVisitedElement {
    -    @data_constraint
    -    pub fn __all__(db: PythonDB) -> *PublicVisitedElement {
    -        for (tmp in Class(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -        for (tmp in Function(db)) {
    -            yield PublicVisitedElement {id: tmp.element_oid}
    -        }
    -    }
    -
    -    pub fn getName(self) -> string {
    -        let (tmp = Class(__all_data__).find(self)) {
    -            return tmp.getQualifiedName() 
    -        }
    -        let (tmp = Function(__all_data__).find(self)) {
    -            return tmp.getQualifiedName() 
    -        }
    -    }
    -}
    -
    -fn default_db() -> PythonDB {
    -    return PythonDB::load("coref_python_src.db")
    -}
    -
    -fn hasComment(e: PublicVisitedElement) -> bool {
    -    let (db = default_db()) {
    -        for (j in DocstringComment(db)) {
    -            if (e.key_eq(j.getDocumentableElement())) {
    -                return true
    -            }
    -        }
    -        for (j in Comment(db)) {
    -            if (e.key_eq(j.getDocumentedClassOrFunctionElement())) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -/**
    - * Get comment of each public element
    - *
    - * @param type          public visited element type
    - * @param name          public visited element name
    - * @param filePath      file path
    - * @param sline         element start line
    - * @param eline         element end line
    - * @param isCommented   if is commented
    - */
    -fn output_result(
    -    type: string,
    -    name: string,
    -    filePath: string,
    -    sline: int,
    -    eline: int,
    -    isCommented: int) -> bool {
    -    for (e in PublicVisitedElement(default_db())) {
    -        if (type = e.getType() && 
    -            name = e.getName() &&
    -            filePath = e.getLocation().getFile().getRelativePath() &&
    -            sline = e.getLocation().getStartLineNumber() &&
    -            eline = e.getLocation().getEndLineNumber()) {
    -            if (hasComment(e)) {
    -                if (isCommented = 1) {
    -                    return true
    -                }
    -            }
    -            if (!hasComment(e)) {
    -                if (isCommented = 0) {
    -                    return true
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(output_result())
    -}
    -

    JavaScript

    -

    AST Print

    -
    // script
    -use coref::javascript::*
    -
    -/**
    - * print AST
    - *
    - * @param filePath          file path
    - * @param parentId          parent node ID
    - * @param parentKind        parent node kind
    - * @param parentStartLine   parent node start line
    - * @param parentEndLine     parent node end line
    - * @param childId           child node ID
    - * @param childKind         child node kind
    - * @param childStartLine    child node start line
    - * @param childEndLine      child node end line
    - * @param index             child node index
    - */
    -fn out(
    -    filePath: string,
    -    parentId: int,
    -    parentKind: string,
    -    parentStartLine: int,
    -    parentEndLine: int,
    -    childId: int,
    -    childKind: string,
    -    childStartLine: int,
    -    childEndLine: int,
    -    index: int
    -) -> bool {
    -    let (db = JavascriptDB::load("coref_javascript_src.db")) {
    -        for (parent in Node(db),
    -            child in Node(db),
    -            parentSyntaxKind in SyntaxKind(),
    -            childSyntaxKind in SyntaxKind(),
    -            parentLocation in Location(db),
    -            childLocation in Location(db),
    -            file in File(db)) {
    -            if (parent.key_eq(child.getParent()) &&
    -                parentId = parent.id &&
    -                childId = child.id &&
    -                parentSyntaxKind.id = parent.getKind() &&
    -                childSyntaxKind.id = child.getKind() &&
    -                parentKind = parentSyntaxKind.getName() &&
    -                childKind = childSyntaxKind.getName() &&
    -                index = child.getIndex() &&
    -                parentLocation = parent.getLocation() &&
    -                childLocation = parent.getLocation() &&
    -                file = parentLocation.getFile() &&
    -                filePath = file.getRelativePath() &&
    -                parentStartLine = parentLocation.getStartLineNumber() &&
    -                parentEndLine = parentLocation.getEndLineNumber() &&
    -                childStartLine = childLocation.getStartLineNumber() &&
    -                childEndLine = childLocation.getEndLineNumber()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Cyclomatic complexity

    -
    // script
    -use coref::javascript::*
    -
    -fn default_db() -> JavascriptDB {
    -    return JavascriptDB::load("coref_javascript_src.db")
    -}
    -
    -/**
    - * Output the cyclomatic complexity of each function
    - *
    - * @param filePath      file path
    - * @param functionName  function name
    - * @param complexity    cyclomatic complexity
    - * @param startLine     function start line
    - * @param endLine       function end line
    - */
    -fn out(filePath: string, functionName: string, complexity: int, startLine: int, endLine: int) -> bool {
    -    let (db = default_db()) {
    -        for (func in FunctionLikeDeclaration(db), file in File(db)) {
    -            if (complexity = func.getCyclomaticComplexity() &&
    -                functionName = func.getName() &&
    -                file = func.getLocation().getFile() &&
    -                filePath = file.getRelativePath() &&
    -                startLine = func.getLocation().getStartLineNumber() &&
    -                endLine = func.getLocation().getEndLineNumber()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Change Effect

    -
    // script
    -use coref::javascript::*
    -
    -fn default_db() -> JavascriptDB {
    -    return JavascriptDB::load("coref_javascript_src.db")
    -}
    -
    -fn getACallerFunction(function: FunctionLikeDeclaration, callerFunction: FunctionLikeDeclaration) -> bool {
    -    for (mayInvokeExpression in MayInvokeExpression(default_db())) {
    -        if (mayInvokeExpression = function.getACallSite() &&
    -            callerFunction = mayInvokeExpression.getEnclosingFunction()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn getAnEffectedFunction(function: FunctionLikeDeclaration, effectedFunction: FunctionLikeDeclaration) -> bool {
    -    if (getACallerFunction(function, effectedFunction)) {
    -        return true
    -    }
    -    for (callerFunction in FunctionLikeDeclaration(default_db())) {
    -        if (getACallerFunction(function, callerFunction) &&
    -            getAnEffectedFunction(callerFunction, effectedFunction)) {
    -            return true
    -        }
    -    }
    -}
    -
    -/**
    - * Query the effected functions according to the changed lines.
    - *
    - * @param function              the changed function id
    - * @param signature             the changed function signature
    - * @param functionPath          the changed function file path
    - * @param startLine             the changed function start line
    - * @param endLine               the changed function end line
    - * @param effectedFunction      the effected function id
    - * @param effectedSignature     the effected function signature
    - * @param effectedFunctionPath  the effected function file path
    - * @param effectedStartLine     the effected function start line
    - * @param effectedEndLine       the effected function end line
    - */
    -fn out(
    -    function: FunctionLikeDeclaration,
    -    signature: string,
    -    functionPath: string,
    -    startLine: int,
    -    endLine: int,
    -    effectedFunction: FunctionLikeDeclaration,
    -    effectedSignature: string,
    -    effectedFunctionPath: string,
    -    effectedStartLine: int,
    -    effectedEndLine: int
    -) -> bool {
    -    if (getAnEffectedFunction(function, effectedFunction)) {
    -        let (symbol = function.getSymbol(),
    -            effectedSymbol = effectedFunction.getSymbol(),
    -            location = function.getLocation(),
    -            effectedLocation = effectedFunction.getLocation()) {
    -            if (signature = symbol.getDescription() &&
    -                effectedSignature = effectedSymbol.getDescription() &&
    -                functionPath = location.getRelativePath() &&
    -                startLine = location.getStartLineNumber() &&
    -                endLine = location.getEndLineNumber() &&
    -                effectedFunctionPath = effectedLocation.getRelativePath() &&
    -                effectedStartLine = effectedLocation.getStartLineNumber() &&
    -                effectedEndLine = effectedLocation.getEndLineNumber()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    XML

    -

    Getting bean

    -
    // script
    -use coref::xml::*
    -
    -schema BeanXmlElement extends XmlElement {}
    -
    -impl BeanXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *BeanXmlElement {
    -        for (e in XmlElement(db)) {
    -            let (path = e.getLocation().getFile().getRelativePath()) {
    -                if (!path.contains("target") && e.getName() = "bean") {
    -                    yield BeanXmlElement {
    -                        id: e.id,
    -                        location_id: e.location_id,
    -                        parent_id: e.parent_id,
    -                        index_order: e.index_order
    -                    }
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema EntryXmlElement extends XmlElement {}
    -
    -impl EntryXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *EntryXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getName() = "entry") {
    -                yield EntryXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema PropertyXmlElement extends XmlElement {}
    -
    -impl PropertyXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *PropertyXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getName() = "property") {
    -                yield PropertyXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -fn default_db() -> XmlDB {
    -    return XmlDB::load("coref_xml_src.db")
    -}
    -
    -// get class name
    -fn getClassName(bean: BeanXmlElement) -> string {
    -    for (attr in bean.getAttribute()) {
    -        if (attr.getName() = "class") {
    -            return attr.getValue()
    -        }
    -    }
    -}
    -
    -// get key
    -fn getKey(e: EntryXmlElement) -> string {
    -    for (attr in e.getAttribute()) {
    -        if (attr.getName() = "key") {
    -            return attr.getValue()
    -        }
    -    }
    -}
    -
    -// output value and class info of the bean
    -fn output1(className: string, pName: string, kName: string) -> bool {
    -    let (db = default_db()) {
    -        for (bean in BeanXmlElement(db), p in PropertyXmlElement(db), e in EntryXmlElement(db)) {
    -            if (className = getClassName(bean) &&
    -                bean.key_eq(p.getParent()) &&
    -                p.key_eq(e.getParent().getParent()) &&
    -                pName = p.getName() &&
    -                kName = getKey(e)) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(output1())
    -}
    -

    POM

    -
    // script
    -use coref::xml::*
    -
    -schema DependencyElement extends XmlElement {}
    -
    -impl DependencyElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *DependencyElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "dependency") {
    -                yield DependencyElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema GroupElement extends XmlElement {}
    -
    -impl GroupElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *GroupElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "groupId") {
    -                yield GroupElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema VersionElement extends XmlElement {}
    -
    -impl VersionElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *VersionElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "version") {
    -                yield VersionElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema ArtifactElement extends XmlElement {}
    -
    -impl ArtifactElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *ArtifactElement {
    -        for(e in XmlElement(db)) {
    -            if (e.getElementName() = "artifactId") {
    -                yield ArtifactElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -schema PomFile extends XmlFile {}
    -
    -impl PomFile {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *PomFile {
    -        for(f in XmlFile(db)) {
    -            if (f.getFileName() = "pom.xml") {
    -                yield PomFile {
    -                    id: f.id,
    -                    file_name: f.file_name,
    -                    relative_path: f.relative_path
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -// output relative path of the file, referenced jar name and version
    -fn out(fileName: string, m1: string, m2: string, m3: string) -> bool {
    -    let (db = XmlDB::load("coref_xml_src.db")) {
    -        for (f in PomFile(db),
    -            e1 in GroupElement(db),
    -            e2 in VersionElement(db),
    -            e3 in ArtifactElement(db),
    -            c1 in XmlCharacter(db),
    -            c2 in XmlCharacter(db),
    -            c3 in XmlCharacter(db),
    -            p in DependencyElement(db)) {
    -            if (f.key_eq(p.getLocation().getFile()) &&
    -                fileName = f.getRelativePath() &&
    -                p.key_eq(e1.getParent()) &&
    -                e1.key_eq(c1.getBelongedElement()) &&
    -                m1 = c1.getText() &&
    -                p.key_eq(e2.getParent()) &&
    -                e2.key_eq(c2.getBelongedElement()) &&
    -                m2 = c2.getText() &&
    -                p.key_eq(e3.getParent()) &&
    -                e3.key_eq(c3.getBelongedElement()) &&
    -                m3 = c3.getText()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    RPC

    -
    // script
    -use coref::xml::*
    -
    -// select XmlElement containing "mobileService"
    -schema MobileServiceXmlElement extends XmlElement{}
    -
    -impl MobileServiceXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *MobileServiceXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getElementName() = "mobileService") {
    -                yield MobileServiceXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -
    -    pub fn getServiceBeanValue(self) -> string {
    -        for (a in self.getAttribute()) {
    -            if (a.getName() = "serviceBean") {
    -                return a.getValue()
    -            }
    -        }
    -    }
    -}
    -
    -// select XmlElement containing "sofa:extension"
    -schema SofaExtensionXmlElement extends XmlElement{}
    -impl SofaExtensionXmlElement {
    -    @data_constraint
    -    pub fn __all__(db: XmlDB) -> *SofaExtensionXmlElement {
    -        for (e in XmlElement(db)) {
    -            if (e.getName() = "sofa:extension") {
    -                yield SofaExtensionXmlElement {
    -                    id: e.id,
    -                    location_id: e.location_id,
    -                    parent_id: e.parent_id,
    -                    index_order: e.index_order
    -                }
    -            }
    -        }
    -    }
    -}
    -
    -fn out(value: string) -> bool {
    -    let (db = XmlDB::load("coref_xml_src.db")) {
    -        for (m in MobileServiceXmlElement(db), s in SofaExtensionXmlElement(db), ancestor in m.getAnAncestor()) {
    -            if (s.key_eq(ancestor) && value = m.getServiceBeanValue()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Go

    -

    Message of All Files

    -
    // script
    -use coref::go::*
    -
    -fn default_db() -> GoDB {
    -    return GoDB::load("coref_go_src.db")
    -}
    -/**
    - * @param name          file name
    - * @param funcCount     function/method quantity
    - * @param totallines    total lines of file
    - * @param codelines     code line of file
    - * @param commentlines  comment line of fine
    - * @param md5           md5 of this file
    - * @param sha256        sha256 of this file
    - */
    -fn out(
    -    name: string,
    -    funcCount: int,
    -    totallines: int,
    -    codelines: int,
    -    commentlines: int,
    -    md5: string,
    -    sha256: string) -> bool {
    -    for(f in File(default_db())) {
    -        if (name = f.getName() &&
    -            funcCount = f.getFunctionCount() &&
    -            md5 = f.getMd5Sum() &&
    -            sha256 = f.getSha256Sum() &&
    -            totallines = f.getLineInfo().getNumberOfTotalLines() &&
    -            codelines = f.getLineInfo().getNumberOfCodeLines() &&
    -            commentlines = f.getLineInfo().getNumberOfCommentLines()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Methods and Corresponding Comments

    -
    // script
    -use coref::go::*
    -
    -fn default_db() -> GoDB {
    -    return GoDB::load("coref_go_src.db")
    -}
    -
    -// Define a predicate called 'out' with parameters fileName, funcName, funcComment, and signature
    -fn out(fileName: string, funcName: string, funcComment: string, signature: string) -> bool {
    -    // Check if there exists a Function object 'func'
    -    for(func in Function(default_db())) {
    -        if (
    -            // Get the name of the file the function belongs to and assign it to the variable 'fileName'
    -            fileName = func.getBelongsFile().getName() &&
    -            // Get the name of the function and assign it to the variable 'funcName'
    -            funcName = func.getName() &&
    -            // Get the associated comment string for the function and assign it to the variable 'funcComment'
    -            funcComment = func.getAssociatedCommentString() &&
    -            // Get the function type signature and assign it to the variable 'signature'
    -            signature = func.getFunctionTypeSignature()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Cyclomatic complexity

    -
    // script
    -use coref::go::*
    -
    -fn default_db() -> GoDB {
    -    return GoDB::load("coref_go_src.db")
    -}
    -
    -/**
    - * @param name: file name
    - * @param func: function name
    - * @param cmplx: function cyclomatic complexity
    - * @param sl,el,sc,ec: function location info
    - */
    -fn out(name: string, func: string, cmplx: int, sl: int, el: int) -> bool {
    -    for(f in GoFile(default_db()), function in Function(default_db())) {
    -        if ((!f.isAutoGenereatedFile()) &&
    -            f.key_eq(function.getBelongsFile()) &&
    -            name = f.getName() &&
    -            func = function.getName() &&
    -            cmplx = function.getCyclomaticComplexity() &&
    -            sl = function.getLocation().getStartLineNumber() &&
    -            el = function.getLocation().getEndLineNumber()) {
    -            return true
    -        }
    -    }
    -}
    -
    -fn main() {
    -    output(out())
    -}
    -

    Query Debugging and Optimization Techniques

    -

    When running GödelScript scripts, it is common to encounter issues with excessively long run times. Here, we provide some basic methods for diagnosis and solutions.

    -

    Schema Parameters Causing Excessive Cartesian Products

    -

    By default, function parameters without the @inline annotation are considered “qualification” conditions, not true input values.

    -

    For example, in the following case, get receives a Class type parameter, but the actual final compilation result will resemble the code below:

    -
    fn check(class: Class) -> bool {
    -    if (class.getName().contains("io")) {
    -        return true
    -    }
    -}
    -
    -// Actual compilation result
    -fn check(class: Class) -> bool {
    -    // Actually, it needs to fetch the entire Class set first
    -    for(__temp_class in Class::__all__(__all_data__)) {
    -        if (class = __temp_class) {
    -            if (class.getName().contains("io")) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -

    Therefore, when passing multiple schema types as parameters, there will be Cartesian products of multiple full schema sets, leading to a significant increase in space and time costs. -The solution is simple: just add an @inline annotation:

    -
    @inline
    -fn check(class: Class) -> bool {
    -    if (class.getName().contains("io")) {
    -        return true
    -    }
    -}
    -
    -fn example() -> bool {
    -    for(class in Class(default_java_db())) {
    -        if (check(class)) {
    -            return true
    -        }
    -    }
    -}
    -
    -// The inline annotation will forcibly inline the function into the statement during the code generation stage, avoiding multiple table loads
    -// The actual compilation result is similar to
    -fn example() -> bool {
    -    for(class in Class(default_java_db())) {
    -        if (class.getName().contains("io")) {
    -            return true
    -        }
    -    }
    -}
    -

    Multiple for Loops Causing Excessive Cartesian Products

    -

    In some cases, it is unavoidable to use multiple layers of for loops to load multiple tables for joint queries, causing severe inflation of Cartesian products. The number of Cartesian product results can be reduced by decreasing (filtering) the size of the sets in advance, as shown in the example:

    -
    fn getByIndex(self) -> Expression {
    -    let (db = default_java_db()) {
    -        for(e in Expression(db), p in Parameter(db)) {
    -            let (i = p.getIndex()) {
    -                if (e.key_eq(self.getValueByIndex(i))) {
    -                    return e
    -                }
    -            }
    -        }
    -    }
    -}
    -

    In this example, e and p form a Cartesian product, causing the intermediate process to take too long. -The set i is actually obtained from a method of p, and in actual use, this set is very small, much smaller than the full set of Parameter. Therefore, the retrieval of the i set can be extracted as a separate function to produce a small set, avoiding Cartesian product computations between large sets while ensuring result equivalence:

    -
    fn getAllParameterIndex() -> *int {
    -    let (db = default_java_db()) {
    -        for (p in Parameter(db)) {
    -            yield p.getIndex()
    -        }
    -    }
    -}
    -
    -fn getByIndex(self) -> Expression {
    -    let (db = default_java_db()) {
    -        for(e in Expression(db), i in getAllParameterIndex()) {
    -            if (e.key_eq(self.getValueByIndex(i))) {
    -                return e
    -            }
    -        }
    -	}
    -}
    -

    The Cartesian product of e and p becomes e and i. Operationally, the cost of the Cartesian product is reduced, and the getIndex operation is advanced, rather than taking place after the Cartesian product, significantly improving performance.

    -

    Do Not Overuse @inline / Must Use @inline Optimization Strategy

    -

    The underlying mechanism of inline functions is to expand at the call site. If the function does not have a large number of schema parameters and is called in many places, inline may lead to code bloat and an exponential increase in the number of redundant calculations, which may sometimes be counterproductive in reducing runtime. -If you must use inline, such as to avoid ungrounded, but find that using inline slows down the execution speed, you can split the embedded statements into predicates to prevent code bloat caused by expansion.

    -

    In the following example, getValueByAttributeNameByDefaultValue is marked with inline to prevent attributeName from being identified as ungrounded. Subsequently, a conditional statement was added in the if branch, causing the execution time to increase from 3 seconds to 35 seconds:

    -
    impl XmlElementBase {
    -  @inline
    -  fn getValueByAttributeNameByDefaultValue(self, attributeName: string) -> string {
    -    if (self.hasAttribute(attributeName)) {
    -      // return self.getValueByAttributeName(attributeName)
    -      // Changed to the following statement:
    -      let(value = self.getValueByAttributeName(attributeName)) {
    -        If (value = "n/a") {
    -          return ""
    -        }
    -        if (value != "n/a") {
    -          return value
    -        }
    -      }
    -    }
    -    if (!self.hasAttribute(attributeName)) {
    -      return "null"
    -    }
    -  }
    -}
    -

    As you can see, adding a level of assignment and a conditional statement, where this function is called nearly 20 times in the subsequent context, resulted in the code being expanded nearly 20 times. This also caused a magnitude difference in performance. At this point, you can extract the changed statement into a separate function. Since the extracted function does not use complex types as parameters, performance is not lost without inline, and after extraction, the result is as follows:

    -
    impl XmlElementBase {
    -  fn getTransValueByAttributeName(self, attributeName: string) -> string {
    -    let (value = self.getValueByAttributeName(attributeName)) {
    -      if (value = "n/a") {
    -        return ""
    -      }
    -      if (value != "n/a") {
    -        return value
    -      }
    -    }
    -  }
    -  @inline
    -  fn getValueByAttributeNameByDefaultValue(self, attributeName: string) -> string {
    -    if (self.hasAttribute(attributeName)) {
    -      return self.getTransValueByAttributeName(attributeName)
    -    }
    -    if (!self.hasAttribute(attributeName)) {
    -      return "null"
    -    }
    -  }
    -}
    -

    This way, the execution time is reduced from 35 seconds back to 3 seconds, meeting expectations.

    -

    Using Query Scripts Locally

    -

    For instructions on using query scripts on your machine, see Installation, Configuration, and Running.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-introduction-zh/index.html b/docs/docs/codefuse-query-introduction-zh/index.html deleted file mode 100644 index e5bdef7..0000000 --- a/docs/docs/codefuse-query-introduction-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-query-introduction-zh/ - - - - - - diff --git a/docs/docs/codefuse-query-introduction/index.html b/docs/docs/codefuse-query-introduction/index.html deleted file mode 100644 index dc204bc..0000000 --- a/docs/docs/codefuse-query-introduction/index.html +++ /dev/null @@ -1,800 +0,0 @@ - - - - - - - - -Introduction · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Introduction

    -
    -
    - - -

    Introduction

    -

    CodeFuse-Query is a code data platform that supports structured analysis of various programming languages. The core idea is to transform all code into data using various language parsers and to store this data in a structured format within a code database. Data analysis is then performed according to business needs using a custom query language, as shown in the diagram below: -image.png

    -

    2.1 Architecture of CodeFuse-Query

    -

    Overall, the CodeFuse-Query code data platform is divided into three main parts: the code data model, the code query DSL (Domain-Specific Language), and platform productization services. The main workflow is illustrated in the following diagram: -image.png

    -

    Code Datafication and Standardization: COREF

    -

    We have defined a model for code datafication and standardization called COREF, which requires all code to be converted to this model through various language extractors. -COREF mainly includes the following information: -COREF = AST (Abstract Syntax Tree) + ASG (Abstract Semantic Graph) + CFG (Control Flow Graph) + PDG (Program Dependency Graph) + Call Graph + Class Hierarchy + Documentation (Documentation/Commentary Information) -Note: As the computational complexity of each type of information varies, not all languages’ COREF information includes all of the above. The basic information mainly includes AST, ASG, Call Graph, Class Hierarchy, and Documentation, while other information (CFG and PDG) is still under development and will be gradually supported.

    -

    Code Query DSL

    -

    Based on the generated COREF code data, CodeFuse-Query uses a custom DSL language called Gödel for querying, thereby fulfilling code analysis requirements. -Gödel is a logic-based reasoning language, whose underlying implementation is based on the logical reasoning language Datalog. By describing “facts” and “rules,” the program can continuously derive new facts. Gödel is also a declarative language, focusing more on describing “what is needed” and leaving the implementation to the computational engine. -Since code has already been converted to relational data (COREF data stored in the form of relational tables), one might wonder why not use SQL directly, or use an SDK instead of learning a new DSL language. Because Datalog’s computation is monotonic and terminating. Simply put, Datalog sacrifices expressiveness to achieve higher performance, and Gödel inherits this feature.

    -
      -
    • Compared to SDKs, Gödel’s main advantage is its ease of learning and use. As a declarative language, users do not need to focus on intermediate computations and can simply describe their needs as they would with SQL.
    • -
    • Compared to SQL, Gödel’s advantages are stronger descriptive capabilities and faster computation speed, for example, describing recursive algorithms and multi-table joint queries, which are difficult for SQL.
    • -
    -

    Platformization and Productization

    -

    CodeFuse-Query includes the Sparrow CLI and the online service Query Centre. Sparrow CLI contains all components and dependencies, such as extractors, data models, compilers, etc., and users can completely generate and query code data locally using Sparrow CLI (for how to use Sparrow CLI, please see Section 3: Installation, Configuration, Running). If users have online query needs, they can use the Query Centre to experiment.

    -

    2.2 Languages Supported by CodeFuse-Query for Analysis

    -

    As of October 31, 2023, CodeFuse-Query supports data analysis for 11 programming languages. Among these, support for 5 languages (Java, JavaScript, TypeScript, XML, Go) is very mature, while support for the remaining 6 languages (Objective-C, C++, Python3, Swift, SQL, Properties) is in beta and has room for further improvement. The specific support status is shown in the table below:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    LanguageStatusNumber of Nodes in the COREF Model
    JavaMature162
    XMLMature12
    TS/JSMature392
    GoMature40
    OC/C++Beta53/397
    Python3Beta93
    SwiftBeta248
    SQLBeta750
    PropertiesBeta9
    -

    Note: The maturity level of the language status above is determined based on the types of information included in COREF and the actual implementation. Except for OC/C++, all languages support complete AST information and Documentation. For example, COREF for Java also supports ASG, Call Graph, Class Hierarchy, and some CFG information.

    -

    2.3 Use Cases of CodeFuse-Query

    -

    Querying Code Features

    -

    A developer wants to know which String type variables are used in Repo A, so they write a Gödel script as follows and submit it to the CodeFuse-Query system for results.

    -
    // script
    -use coref::java::*
    -
    -fn out(var: string) -> bool {
    -  for(v in Variable(JavaDB::load("coref_java_src.db"))) {
    -    if (v.getType().getName() = "String" && var = v.getName()) {
    -      return true
    -    }
    -  }
    -}
    -
    -fn main() {
    -  output(out())
    -}
    -

    Similar needs: Queries for classes, functions, variables, return values, call graphs, class inheritance, etc.

    -

    Outputting Static Analysis Capabilities

    -

    A security team member sets up a system to cross-verify that log data and code data are consistent. To complete a certain analysis task, they plan to derive static data D1 through Gödel queries, merge with dynamic data D2, and combine analysis to reach conclusion C. After verifying the technical feasibility on CodeFuse-Query, they integrate the system using the standard API provided by CodeFuse-Query. -Similar needs: Using static analysis as a system checkpoint, improving testing efficiency, merging the analyzed data into a documentation.

    -

    Code Rule Checker

    -

    A team lead finds that the team often introduces similar bugs, Bug A, and decides to establish a code rule and its checker to be applied during CodeReview. After writing an analysis query on the CodeFuse-Query platform and testing that it meets requirements, they codify the query as a code rule and roll it out to the CodeReview/CI phase. Since then, this bug has never occurred again. -Similar needs: Writing static defect scanning rules to intercept code risks.

    -

    Analyzing Code Characteristics

    -

    A developer from the R&D department wants to know the current proportion of Spring and Spring Boot projects in the code repository to quantify the promotion of the new framework. By writing a Gödel Query to describe different project analysis features, they queried 110,000 code repositories at once and obtained all the code data after a few dozen minutes, happily moving on to their KPIs. -Similar needs: Application profiling, code profiling, architectural analysis.

    -

    Getting Statistical Data

    -

    A researcher finds that traditional code complexity metrics struggle to accurately measure the complexity of the code. Inspired by international advanced experiences and a moment of insight, they design a set of complexity metrics and algorithms. After implementing it with Gödel and finding it already highly performant with little optimization, they quickly apply it to over 10 languages and more than 110,000 repositories. They now have an in-depth understanding of the overall complexity of the code repositories, unlike before when they had to parse the code and analyze the syntax tree themselves, which is so much more convenient. -Similar needs: Code statistics, code metrics, algorithm design, academic research.

    -

    Architectural Analysis

    -

    An architect recently promoted a new message middleware based on txt files, and existing analysis platforms couldn’t support analyzing dependencies in such systems. By quickly modeling the message format with Gödel, they soon obtain the dependency relationships between different components in the system. -Similar needs: System overview, architecture governance, lineage analysis.

    -

    Model Validation

    -

    A developer designs a system that requires users to play games before claiming coupons. They describe the model’s validation logic with Gödel, then use the CodeFuse-Query system to ensure that both current and future system implementations fully comply with the model. No longer worried about potential financial losses from the game! -Similar needs: System verification, network validation, permission verification.

    -

    2.4 Application Areas of CodeFuse-Query

    -

    Currently, CodeFuse-Query at Ant Group already supports CodeFuse large language model data cleaning, code metrics evaluation, R&D risk control, privacy security analysis, code intelligence, terminal package size management, and other scenarios with implemented applications, serving over a million monthly calls. -image.png

    -

    High-Quality Code Data Cleaning - CodeFuse Large Code Model

    -

    The CodeFuse Large Code Model is a model by Ant Group for handling code-related issues and has been open-sourced. For the CodeFuse large language model, the quality of the training data directly affects the model’s inference results. Low-quality code data can directly contaminate the language model’s output, for example: the model might learn incorrect code patterns, generating erroneous code; if the data only contains code in a single programming language, the model might not adapt well to code in other languages. -To control the quality of code data entering the model and thereby improve the model’s inferencing capabilities, we have drawn upon the Ant Group program analysis team’s years of practical experience coupled with industry consensus to clarify the definition of high-quality code. We have also implemented automated, large-scale code data cleaning using existing program analysis technologies. -CodeFuse-Query provides the following data cleaning capabilities for the CodeFuse Large Code Model:

    -
      -
    • High-quality code data cleaning: Clean code data, including vulnerability scanning for 7 languages (Python, Java, JavaScript, TypeScript, Go, C, C++), filtering by language type/star number, filtering out data with 0 valid lines of code, etc. We have currently accumulated about 2TB of cleaned code data from GitHub and internally at Ant Group.
    • -
    • Code Profiling: Implements high-performance, multi-dimensional automatic tagging for large-scale code, supporting 10 languages (Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go), 77 common tags, 40 Ant-specific tags, totaling 117 tags. The current auto-tagging performance can reach 40MB/s.
    • -
    • Other Atomic Abilities -
        -
      • Advanced code feature extraction, including extraction of AST (Abstract Syntax Tree), DFG (Data Flow Graph), etc. The AST information has been used for SFT training with about 97% accuracy.
      • -
      • Code snippet identification, used for extracting code from text data, convenient for formatting or adding Markdown: -
          -
        • Text extraction of code: Extracting code block information from text, parsing main languages, function and class definitions, only verifying a binary problem, that is, verifying whether the text contains code blocks with about 83% accuracy.
        • -
        • Identifying the programming language of a code snippet: Identifying the programming language of any code snippet, supporting 30+ languages, with about 80% accuracy.
        • -
        -
      • -
      • Code comment pair extraction: Supports extracting method-level comment-code pair information, covering 15 most popular languages on GitHub, used for Text To Code/Code To Text SFT training.
      • -
      -
    • -
    -

    Code Data Metrics - Guangmu

    -

    Guangmu is an internal product at Ant Group aimed at different R&D personnel and team managers, providing objective data and analysis results to assess code capabilities. -Guangmu offers individual code capability assessment reports, daily code capability metric data analysis, team code capability management, and code excellence award displays, all aimed at helping Ant Group’s R&D engineers continuously improve code quality, reduce code debt, and enhance R&D efficiency in the long run. -CodeFuse-Query provides Guangmu with two types of capabilities:

    -
      -
    • Code Evaluation Metrics: Code complexity, code annotation rate, standard development volume, etc.
    • -
    • Code Excellence Metrics: Code reuse degree.
    • -
    -

    Change Analysis - Youku Server-Side R&D Efficiency

    -

    The Youku Quality Assurance team started exploring server-side precision testing in 2023. After six months of technical sedimentation and system building, they established a precision testing system capable of change content identification, change impact analysis, testing capability recommendation, and test coverage assessment. -In this process, CodeFuse-Query can provide capabilities including:

    -
      -
    • Analyzing the impacted objects based on code change content (file + line number): methods, entry points (HTTP entry, HSF entry), call routes (all call routes from the entry to the changed method), database operations (tables, types of operations).
    • -
    • Enhancing the effectiveness and readiness of change analysis impact by combining the precise analysis capabilities of online dynamic call routes (method routes) and CodeFuse-Query static analysis call routes.
    • -
    -

    To date, Youku has integrated all core applications through CodeFuse-Query and based on static analysis data collection, has built a complete server-side code and traffic knowledge base.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-quickstart-zh/index.html b/docs/docs/codefuse-query-quickstart-zh/index.html deleted file mode 100644 index dabe532..0000000 --- a/docs/docs/codefuse-query-quickstart-zh/index.html +++ /dev/null @@ -1,805 +0,0 @@ - - - - - - - - -快速开始 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速开始

    -
    -
    - - -

    安装、配置、运行

    -

    硬件和软件要求

    -
      -
    • -

      硬件:4C8G

      -
    • -
    • -

      环境要求:java 1.8 和 python3.8 以上执行环境, 请保证 java python 可执行环境

      -
    • -
    -

    Sparrow 安装步骤和指导

    -
      -
    • CodeFuse-Query 下载包是一个 zip 存档,其中包含工具、脚本和各种特定于 CodeFuse-Query 的文件。如果您没有 CodeFuse-Query 许可证,那么下载此存档即表示您同意 CodeFuse-Query 条款和条件
    • -
    • 目前仅支持 mac,linux 系统下使用 CodeFuse-Query,下载地址为:(目前仅给出示例,开源后给出正式下载地址) - -
    • -
    • 您应该始终使用 CodeFuse-Query 捆绑包,确保版本兼容性
    • -
    -

    Tips:

    -
      -
    • mac系统下直接下载软件包会提示需要验证开发者
    • -
    -

    image.png

    -
      -
    • 可在安全性设置中进行修改验证
    • -
    -

    image.png

    -
      -
    • -

      点击仍然允许

      -
    • -
    • -

      详细步骤可参照:Mac 官方文档: 如何在 Mac 上安全地打开 App

      -
    • -
    • -

      或使用xattr -d com.apple.quarantine命令,删除 CodeFuse-Query 被 macOS 赋予的外部属性

      -
    • -
    • -

      xattr -d com.apple.quarantine是一个命令行指令,用于删除文件的 com.apple.quarantine 扩展属性。该扩展属性是 macOS 系统用来标记从外部来源下载的文件或应用程序的属性,以确保安全性。

      -
    • -
    -
    xattr -d com.apple.quarantine path/to/file
    -

    配置和初始化 CodeFuse-Query 开发环境

    -
      -
    • -

      解压缩:命令行解压或者直接点一下解压缩即可

      -
    • -
    • -

      需要具备 java8 和 python3.8 以上执行环境

      -
    • -
    • -

      CodeFuse-Query 解压后,您可以通过以下几种方式运行可执行文件来运行 sparrow 进程:

      -
    • -
    • -

      通过执行 <extraction-root>/sparrow-cli/sparrow,其中 <extraction-root> 是提取CodeFuse-Query包的文件夹。

      -
    • -
    • -

      通过添加 <extraction-root>/sparrow-cli 到您的 PATH,以便您可以直接运行可执行文件 sparrow。

      -
    • -
    -

    此时,您可以执行 sparrow 命令。

    -

    运行

    -

    执行步骤

    -
      -
    • -

      确认需要执行查询的源代码目录

      -
    • -
    • -

      抽取源代码的代码数据

      -
    • -
    • -

      基于代码数据编写 godel 脚本,获取自己想要的代码数据

      -
    • -
    • -

      godel 脚本如何编写参照 GödelScript 查询语言

      -
    • -
    -

    执行样例

    -

    数据抽取

    -
    <extraction-root>/sparrow-cli/sparrow database create -s <src> -lang <language> -o <output>
    -
      -
    • -

      <output> 代码库抽取出的代码数据的输出目录,后文数据库位置:<database>

      -
    • -
    • -

      <language> 需要进行代码抽取的语言,分析 java 则填写 java

      -
    • -
    • -

      <src> 需要扫描的源代码目录

      -
    • -
    • -

      在数据抽取步骤,获得脚本执行需要的数据库 <database>

      -
    • -
    -

    编写godel脚本

    -
      -
    • -

      假设具备如下 godel 脚本, 获取指定仓库的所有 java 方法名

      -
    • -
    • -

      godel 脚本具体编写可参照 GödelScript 查询语言

      -
    • -
    -
    // script
    -use coref::java::*
    -
    -// 定义全局java数据库
    -fn default_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -// 遍历所有方法,获取方法名,输出限制
    -fn getFunctionName(name: string) -> bool {
    -    let (db = default_db()) {
    -        for (method in Method(db)) {
    -            if (name = method.getName()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -
    -fn main() {
    -    output(getFunctionName())
    -}
    -

    脚本执行

    -
    <extraction-root>/sparrow-cli/sparrow query run -d <database> -gdl <gdl_path> -o <output>
    -
      -
    • -

      <database> 需要扫描的代码库抽取出的代码数据,与上文的 <output> 一致

      -
    • -
    • -

      <gdl_path> godel 脚本所在路径,可填写所在目录,会依次执行所在目录下所有以.gdl结尾的文件

      -
    • -
    • -

      <output> 输出路径目录,xxx.gdl 的执行结果会以 json 格式存入 <output>/xxx.json

      -
    • -
    • -

      可通过查看数据文件确认脚本执行是否正确

      -
    • -
    -

    例子

    -

    若存在以下java代码

    -
    public class HelloWorld {
    -    public static void main(String[] args) {
    -        HelloWorld tmp = new HelloWorld();
    -        String hello = tmp.getHello();
    -        String world = tmp.getWorld();
    -        System.out.println(hello + " " + world);
    -    }
    -
    -    public String getHello() {
    -        return "Hello";
    -    }
    -
    -    public String getWorld() {
    -        return "World";
    -    }
    -}
    -
    sparrow database create -s <example> -lang java -o ./db/
    -sparrow query run -d ./db/ -gdl example.gdl -o ./
    -
      -
    • -

      <example> 为上述给出的 java 文件存储目录

      -
    • -
    • -

      example.gdl 为上述给出的 gdl 示例,存储到当前目录

      -
    • -
    • -

      执行完毕后可在当前目录下找到 example.json 文件

      -
    • -
    -

    对应的脚本输出 json 文件内容如下

    -
    [{"name": "getHello"},
    -{"name": "getWorld"},
    -{"name": "main"}]
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-quickstart/index.html b/docs/docs/codefuse-query-quickstart/index.html deleted file mode 100644 index 30d75f5..0000000 --- a/docs/docs/codefuse-query-quickstart/index.html +++ /dev/null @@ -1,820 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    Installation, Configuration, and Running

    -

    Hardware and Software Requirements

    -
      -
    • -

      Hardware: 4C8G

      -
    • -
    • -

      Environment Requirements: Java 1.8 and Python 3.8 or above runtime environments. Please ensure Java and Python executables are available.

      -
    • -
    -

    Sparrow Installation Steps and Guidance

    -
      -
    • The CodeFuse-Query download package is a zip archive that contains tools, scripts, and various files specific to CodeFuse-Query. If you do not have a CodeFuse-Query license, downloading this archive indicates your agreement with the CodeFuse-Query Terms and Conditions.
    • -
    • CodeFuse-Query is currently only supported on Mac and Linux systems. The download links are: (currently, only a sample is given, the official download link will be provided after open-source release) - -
    • -
    • You should always use the CodeFuse-Query bundle to ensure version compatibility.
    • -
    -

    Tips:

    -
      -
    • On Mac systems, directly downloading the package may prompt a verification for the developer.
    • -
    -

    image.png

    -
      -
    • You can modify the verification in the security settings.
    • -
    -

    image.png

    -
      -
    • -

      Click “Allow Anyway.”

      -
    • -
    • -

      For detailed steps, please refer to the Mac Official Documentation: How to safely open an app on your Mac

      -
    • -
    • -

      Or use the xattr -d com.apple.quarantine command to remove the external attribute assigned to CodeFuse-Query by macOS.

      -
    • -
    • -

      xattr -d com.apple.quarantine is a command-line instruction used to delete a file’s com.apple.quarantine extended attribute. This attribute is used by the macOS system to mark files or applications downloaded from external sources to ensure security.

      -
    • -
    -
    xattr -d com.apple.quarantine path/to/file
    -

    Configuring and Initializing the CodeFuse-Query Development Environment

    -
      -
    • -

      Unzip using the command line or by simply clicking to unzip.

      -
    • -
    • -

      You need to have Java 8 and Python 3.8 or higher runtime environments.

      -
    • -
    • -

      After unzipping CodeFuse-Query, you can run the Sparrow process by running the executable in the following ways:

      -
    • -
    • -

      By executing <extraction-root>/sparrow-cli/sparrow, where <extraction-root> is the folder where you extracted the CodeFuse-Query package.

      -
    • -
    • -

      By adding <extraction-root>/sparrow-cli to your PATH, so you can directly run the executable sparrow.

      -
    • -
    -

    At this point, you can execute the sparrow command.

    -

    Running

    -

    Execution Steps

    -
      -
    • -

      Confirm the source code directory you need to query.

      -
    • -
    • -

      Extract code data from the source code.

      -
    • -
    • -

      Write a Gödel script based on the code data to obtain the desired code data.

      -
    • -
    • -

      For how to write Gödel scripts, refer to GödelScript Query Language

      -
    • -
    -

    Execution Example

    -

    Data Extraction

    -
    <extraction-root>/sparrow-cli/sparrow database create -s <src> -lang <language> -o <output>
    -
      -
    • -

      <output>: The output directory for the code data extracted from the codebase, referred to as <database> later.

      -
    • -
    • -

      <language>: The language of the code to be extracted, fill in “java” for analyzing Java.

      -
    • -
    • -

      <src>: The source code directory to be scanned.

      -
    • -
    • -

      In the data extraction step, you obtain the database <database> required for executing the script.

      -
    • -
    -

    Writing Gödel Scripts

    -
      -
    • -

      Assuming you have the following Gödel script to get all Java method names from a specified repository:

      -
    • -
    • -

      For specific Gödel script writing, refer to GödelScript Query Language

      -
    • -
    -
    // script
    -use coref::java::*
    -
    -// Define the global Java database
    -fn default_db() -> JavaDB {
    -    return JavaDB::load("coref_java_src.db")
    -}
    -
    -// Iterate over all methods, get the method name, output limit
    -fn getFunctionName(name: string) -> bool {
    -    let (db = default_db()) {
    -        for (method in Method(db)) {
    -            if (name = method.getName()) {
    -                return true
    -            }
    -        }
    -    }
    -}
    -
    -
    -fn main() {
    -    output(getFunctionName())
    -}
    -

    Script Execution

    -
    <extraction-root>/sparrow-cli/sparrow query run -d <database> -gdl <gdl_path> -o <output>
    -
      -
    • -

      <database>: The code data extracted from the codebase to be scanned, consistent with <output> above.

      -
    • -
    • -

      <gdl_path>: The path where the Gödel script is located, fill in the directory path, and it will execute all files ending with .gdl in that directory in sequence.

      -
    • -
    • -

      <output>: The output directory path, the result of executing xxx.gdl will be stored in <output>/xxx.json in JSON format.

      -
    • -
    • -

      You can verify if the script executed correctly by checking the data file.

      -
    • -
    -

    Example

    -

    Suppose there is the following Java code:

    -
    public class HelloWorld {
    -    public static void main(String[] args) {
    -        HelloWorld tmp = new HelloWorld();
    -        String hello = tmp.getHello();
    -        String world = tmp.getWorld();
    -        System.out.println(hello + " " + world);
    -    }
    -    
    -    public String getHello() {
    -        return "Hello";
    -    }
    -    
    -    public String getWorld() {
    -        return "World";
    -    }
    -}
    -
    sparrow database create -s <example> -lang java -o ./db/
    -sparrow query run -d ./db/ -gdl example.gdl -o ./
    -
      -
    • -

      <example> is the directory where the given Java file is stored.

      -
    • -
    • -

      example.gdl is the given Gödel script sample, saved in the current directory.

      -
    • -
    • -

      After execution, you can find the example.json file in the current directory.

      -
    • -
    -

    The corresponding script output JSON file content is as follows:

    -
    [{"name": "getHello"},
    -{"name": "getWorld"},
    -{"name": "main"}]
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-toolchain-zh/index.html b/docs/docs/codefuse-query-toolchain-zh/index.html deleted file mode 100644 index dee2444..0000000 --- a/docs/docs/codefuse-query-toolchain-zh/index.html +++ /dev/null @@ -1,748 +0,0 @@ - - - - - - - - -VSCode插件 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    VSCode插件

    -
    -
    - - -

    开发插件(VSCode)

    -

    安装

    -

    从VSCode官方插件市场安装(推荐)

    -

    插件地址

    -

    使用VSIX安装包安装

    -
      -
    1. 下载插件
    2. -
    3. 手动从 vsix 安装: -image.png
    4. -
    5. 或者使用指令直接从终端安装:
    6. -
    -
    code --install-extension [扩展vsix文件路径]
    -

    环境准备

    -
      -
    • Sparrow CLI ,参照 3 安装、配置、运行
    • -
    -

    扩展特性

    -

    本扩展提供了以下功能模块:

    -
      -
    • COREF AST Viewer
    • -
    • Gödel Language Server
    • -
    • Gödel Language Runner
    • -
    -

    COREF AST Viewer

    -

    以下功能需要在扩展设置中设置相关项后启用。目前仅支持于Java语言

    -

    Java 文件转成树状的 COREF Node

    -

    -

    Node 与代码位置的相互定位

    -

    -

    在Lib API Viewer 查看 Node 的API,Node 复制

    -

    -

    Lib API Viewer:查询与复制使用

    -

    -

    Gödel Language Server Features

    -

    以下功能均需要在设置扩展后启用。不设置相关项的情况下,语法高亮仍然可用。

    -

    错误信息提示

    -

    错误信息会随着代码的更新而自动更新。 -

    -

    符号信息提示和补全

    -

    包含local变量和全局符号信息的补全提示,关键字等信息会提供对应的使用样例,全局符号信息会提供更详细的内部信息,如包含的成员变量、成员方法、静态方法。

    -

    -
      -
    • 关键字补全和使用样例提示
    • -
    • local 变量类型信息和符号补全
    • -
    • . 跟随的符号信息和补全
    • -
    • :: 跟随的符号信息和补全
    • -
    • 注解使用样例提示
    • -
    • 全局符号类型信息 (内部结构,成员方法,静态方法)
    • -
    -

    跳转到定义

    -

    可以通过右键跳转定义或者ctrl/command+left click直接跳转到准确的符号定义位置。

    -

    -

    代码片段 (Snippets)

    -

    扩展提供了一些代码片段补齐以供快速编写 Gödel 1.0/script 代码。

    -

    -

    GödelScript Runner

    -

    需要在扩展中设置 sparrow cli 路径后使用。运行脚本之前需要先加载数据库。关于如何生成数据库 参考 3.4.章节 运行 中的数据抽取部分。

    -

    运行脚本

    -

    panel.gif -提供了四种不同的脚本运行按钮:

    -
      -
    1. 在要运行的脚本处右键执行。
    2. -
    3. 在 extension GodelScript Runner 面板上选择 Run GödelScript
    4. -
    5. 在 extension GodelScript Runner Setting 面板上选择 Run
    6. -
    7. 在 extension GodelScript Runner Setting 面板右上角点击运行按钮。
    8. -
    -

    数据库文件夹加载

    -
      -
    1. 在要运行的脚本处右键选择包含数据库的文件夹进行加载。
    2. -
    3. 在 extension GodelScript Runner 面板上选择 Load Database Directory
    4. -
    5. 在 extension GodelScript Runner Setting 面板上选择 Database
    6. -
    7. 在 extension GodelScript Runner Setting 面板右上角点击数据库加载按钮。
    8. -
    -

    扩展设置

    -

    COREF AST Viewer 设置

    -
      -
    • corefASTViewer.sparrowCliRoot -
        -
      • 指定 Sparrow CLI 的根目录,参照第3章节的安装部分
      • -
      -
    • -
    -

    Gödel Language Server 设置

    -

    扩展启动时,以下两项中存在任意一项未被设置,则会弹出提示。点击configure按钮会跳转至相应配置页面。

    -
      -
    • godelScript.executablePath -
        -
      • 用于指定 GödelScript 的可执行文件路径,默认为空。需要时请替换为实际的 GödelScript 可执行文件的绝对路径。
      • -
      • 如果已经下载 Sparrow CLI ,则 GödelScript 可执行文件为 [sparrow cli root]/godel-script/usr/bin/godel
      • -
      -
    • -
    • godelScript.libraryDirectoryPath -
        -
      • 用于指定 GödelScript 的库文件夹路径,默认为空。需要时请替换为 GödelScript 库文件夹绝对路径。
      • -
      • 如果已经下载 Sparrow CLI ,则库文件夹路径为 [sparrow cli root]/lib-1.0
      • -
      -
    • -
    -

    智能助手

    -

    待开放,尽情期待!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-toolchain/index.html b/docs/docs/codefuse-query-toolchain/index.html deleted file mode 100644 index ae5b8be..0000000 --- a/docs/docs/codefuse-query-toolchain/index.html +++ /dev/null @@ -1,763 +0,0 @@ - - - - - - - - -Toolchain · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Toolchain

    -
    -
    - - -

    Developing Plugins (VSCode)

    -

    Installation

    -

    Install from VSCode marketplace (Recommand)

    -

    VSCode Extension

    -

    Install from local via VSIX pack

    -
      -
    1. Download the plugin.
    2. -
    3. Manually install from vsix: -image.png
    4. -
    5. Or use the command directly from the terminal to install:
    6. -
    -
    code --install-extension [extension vsix file path]
    -

    Environment Preparation

    -
      -
    • Sparrow CLI, refer to Section 3 Installation, Configuration, and Running.
    • -
    -

    Extension Features

    -

    This extension provides the following feature modules:

    -
      -
    • COREF AST Viewer
    • -
    • Gödel Language Server
    • -
    • Gödel Language Runner
    • -
    -

    COREF AST Viewer

    -

    The following features need to be enabled in the extension settings. Currently, it only supports the Java language.

    -

    Convert Java Files into Tree-Like COREF Nodes

    -

    -

    Locate COREF Nodes and Code Positions Interactively

    -

    -

    View Node APIs and Copy Nodes in Lib API Viewer

    -

    -

    Lib API Viewer: Querying and Copying Usage

    -

    -

    Gödel Language Server Features

    -

    The following features need to be enabled after setting up the extension. Syntax highlighting is still available without setting related items.

    -

    Error Information Tips

    -

    Error information automatically updates with code changes. -

    -

    Symbol Information Tips and Completion

    -

    Completion suggestions that include local variables and global symbols. Keywords provide corresponding usage examples; global symbol information offers more detailed internal information, such as member variables, member methods, and static methods.

    -

    -
      -
    • Keyword completion and usage example tips
    • -
    • Local variable type information and symbol completion
    • -
    • . followed by symbol information and completion
    • -
    • :: followed by symbol information and completion
    • -
    • Annotation usage example tips
    • -
    • Global symbol type information (internal structure, member methods, static methods)
    • -
    -

    Go to Definition

    -

    You can jump to definitions with a right-click or ctrl/command+left click to go directly to the exact symbol definition location.

    -

    -

    Code Snippets (Snippets)

    -

    The extension provides some code snippets to quickly write Gödel 1.0/script code.

    -

    -

    GödelScript Runner

    -

    Use after setting the Sparrow CLI path in the extension. The database needs to be loaded before running the script. For how to generate a database, refer to Section 3.4, Running, in the data extraction part.

    -

    Running Scripts

    -

    panel.gif -There are four different script running buttons provided:

    -
      -
    1. Right-click to execute at the script you want to run.
    2. -
    3. Choose Run GödelScript on the extension GodelScript Runner panel.
    4. -
    5. Choose Run on the extension GodelScript Runner Setting panel.
    6. -
    7. Click the run button at the top right of the extension GodelScript Runner Setting panel.
    8. -
    -

    Database Folder Loading

    -
      -
    1. Right-click at the script you want to run and choose the folder containing the database to load.
    2. -
    3. Choose Load Database Directory on the extension GodelScript Runner panel.
    4. -
    5. Choose Database on the extension GodelScript Runner Setting panel.
    6. -
    7. Click the database load button at the top right of the extension GodelScript Runner Setting panel.
    8. -
    -

    Extension Settings

    -

    COREF AST Viewer Settings

    -
      -
    • corefASTViewer.sparrowCliRoot -
        -
      • Specify the root directory of Sparrow CLI, referring to Section 3 of the installation part.
      • -
      -
    • -
    -

    Gödel Language Server Settings

    -

    When the extension starts, a prompt will pop up if any one of the following two items is not set. Clicking the configure button will redirect to the respective configuration page.

    -
      -
    • godelScript.executablePath -
        -
      • Used to specify the executable path of GödelScript, default is empty. Please replace with the actual absolute path of the GödelScript executable when needed.
      • -
      • If Sparrow CLI is already downloaded, then the GödelScript executable file is [sparrow cli root]/godel-script/usr/bin/godel.
      • -
      -
    • -
    • godelScript.libraryDirectoryPath -
        -
      • Used to specify the library folder path of GödelScript, default is empty. Please replace with the absolute path of the GödelScript library folder when needed.
      • -
      • If Sparrow CLI is already downloaded, then the library folder path is [sparrow cli root]/lib-1.0.
      • -
      -
    • -
    -

    Smart Assistant

    -

    Stay tuned for the opening!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-usercase-zh/index.html b/docs/docs/codefuse-query-usercase-zh/index.html deleted file mode 100644 index 188d5f2..0000000 --- a/docs/docs/codefuse-query-usercase-zh/index.html +++ /dev/null @@ -1,698 +0,0 @@ - - - - - - - - -用户案例 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    用户案例

    -
    -
    - - -

    使用场景

    -

    查询代码特征

    -

    小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Gödel 如下,交给 CodeFuse-Query 系统给他返回了结果。

    -
    // script
    -use coref::java::*
    -
    -fn out(var: string) -> bool {
    -  for(v in Variable(JavaDB::load("coref_java_src.db"))) {
    -    if (v.getType().getName() = "String" && var = v.getName()) {
    -      return true
    -    }
    -  }
    -}
    -
    -fn main() {
    -  output(out())
    -}
    -

    类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。

    -

    代码规则检查器

    -

    小 TL 同学发现团队总是写出很多类似的 Bug A,他想针对 Bug A 制定一个代码规则和其检查器,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 -类似需求:编写静态缺陷扫描规则进行代码风险拦截。

    -

    获取统计数据

    -

    小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Gödel 实现出来以后,发现不怎么优化就已经性能非常高了,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,不知道方便了多少。 -类似需求:代码统计,代码度量,算法设计,学术研究。

    -

    应用领域

    -

    目前,CodeFuse-Query在蚂蚁集团已经支持 CodeFuse大语言模型数据清洗代码度量评估研发风险控制隐私安全分析代码智能、**终端包大小治理 **等多个场景的落地应用,服务月均调用量超过百万。

    -

    高质量代码数据清洗 - CodeFuse代码大模型

    -

    CodeFuse代码大模型是蚂蚁集团对外开源的处理代码相关问题的模型,对于CodeFuse大语言模型而言,训练的数据质量直接影响模型的推理结果。低质量的代码数据会直接污染语言模型的输出,例如:模型可能会学习到错误的代码模式,从而生成错误的代码;数据中只包含某种编程语言的代码,模型可能无法很好地适应其他编程语言的代码。 -为了把控进入模型的代码数据质量,进而提升模型的推理能力。我们基于蚂蚁程序分析团队多年的实践积累结合业界共识,梳理了高质量代码的定义方式,并利用已有程序分析技术实现了自动化、大规模的代码数据清洗。 -CodeFuse-Query为CodeFuse代码大模型提供了以下数据清洗能力:

    -
      -
    • 高质量代码数据清洗:对代码数据进行清洗,包括对 Python,Java,JavaScript,TypeScript,Go,C,C++ 7 种语言进行漏洞扫描,对语言种类 / star 数进行筛选,过滤有效代码行数为 0 的数据等。目前已沉淀清洗后的 GitHub 和蚂蚁内部代码数据总共约 2TB
    • -
    • 代码画像:实现对大规模代码进行高性能多维度的自动标注,支持 Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go 等 10 种语言,77 种通用标签,40 种蚂蚁特有标签,共 117 种标签。目前自动标注性能能够达到 40MB/s
    • -
    • 其他原子能力 -
        -
      • 高级代码特征提取,包括提取 AST(抽象语法树),DFG(数据流图)数据等。目前 AST 信息已用于 SFT 训练,准确率 97% 左右。
      • -
      • 代码片段识别,用于针对文本数据中的代码进行提取,方便进行代码格式化或加上 Markdown 格式: -
          -
        • 文本提取代码:从文本中提取代码块信息,支持主流语言的解析,函数及类定义,仅验证二分类问题,就是说仅验证文本是否含有代码块准确率 83% 左右。
        • -
        • 识别代码片段的编程语言种类:识别任意代码片段的编程语言种类,支持 30+ 种语言,准确率80%左右。
        • -
        -
      • -
      • 代码注释对提取:支持提取方法级别的注释-代码对信息,覆盖 15 种 GitHub 最流行的语言,用于 Text To Code/Code To Text 的 SFT 训练。
      • -
      -
    • -
    -

    变更分析-优酷服务端研发效能

    -

    优酷质量保障团队从2023年开始针对服务端精准测试的探索,经过半年的技术沉淀和体系搭建,形成了具备变更内容识别、变更影响分析、测试能力推荐、测试覆盖评估的精准测试体系。 -在此过程中,CodeFuse-Query能提供的能力主要有:

    -
      -
    • 根据代码变更内容(文件+行号),分析出影响的对象:方法、入口(http入口、hsf入口)、调用链路(从入口到变更方法的所有调用链路)、数据库操作(表、操作类型)
    • -
    • 结合线上动态调用链路(方法链路)、CodeFuse-Query静态分析调用链路的影响面精准分析能力,提升变更分析影响面的有效性、准备率
    • -
    -

    到目前为止,优酷已通过CodeFuse-Query接入所有核心应用,并基于静态分析采集数据,构建了服务端完整的代码知识库和流量知识库。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-usercase/index.html b/docs/docs/codefuse-query-usercase/index.html deleted file mode 100644 index dc4a178..0000000 --- a/docs/docs/codefuse-query-usercase/index.html +++ /dev/null @@ -1,713 +0,0 @@ - - - - - - - - -User Case · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    User Case

    -
    -
    - - -

    Use Cases

    -

    Querying Code Features

    -

    A developer wants to know which String type variables are used in Repo A, so he writes a Gödel script as follows and submits it to the CodeFuse-Query system for results.

    -
    // script
    -use coref::java::*
    -
    -fn out(var: string) -> bool {
    -  for(v in Variable(JavaDB::load("coref_java_src.db"))) {
    -    if (v.getType().getName() = "String" && var = v.getName()) {
    -      return true
    -    }
    -  }
    -}
    -
    -fn main() {
    -  output(out())
    -}
    -

    Similar needs: querying for classes, functions, variables, return values, call graphs, class inheritance, etc.

    -

    Code Rule Checker

    -

    A team leader found that the team always wrote many bugs similar to Bug A. He wanted to establish a code rule for Bug A and its checker and do a check at the CodeReview stage. Through writing a query analysis on the CodeFuse-Query platform, and after testing it on the platform to meet the requirements, he solidified this analysis query as a code rule and launched it to the CodeReview/CI stage. Since then, this bug has never happened again. -Similar needs: writing static defect scanning rules for code risk interception.

    -

    Obtaining Statistical Data

    -

    A researcher found that traditional code complexity metrics are difficult to accurately measure code complexity. By learning from international advanced experience and a stroke of genius, he designed a set of complexity metrics and algorithms. After implementing it with Gödel, he found that without much optimization, the performance was already very high, and it was quickly applied to more than 10 languages and over 110,000 repositories. He immediately had an in-depth understanding of the overall complexity of code repositories. Compared to the past, when he had to parse code and analyze syntax trees himself, and interface with systems, it’s hard to know how much more convenient it has become. -Similar needs: code statistics, code metrics, algorithm design, academic research.

    -

    Application Fields

    -

    Currently, CodeFuse-Query at Ant Group has already supported the implementation of multiple scenarios such as CodeFuse large language model data cleaning, code metric assessment, R&D risk control, privacy security analysis, code intelligence, end-package size governance, etc., with a monthly service call volume exceeding one million.

    -

    High-Quality Code Data Cleaning - CodeFuse Code Large Model

    -

    The CodeFuse code large model is a model for dealing with code-related issues open-sourced by Ant Group. For the CodeFuse large language model, the quality of the training data directly affects the inference results of the model. Low-quality code data will directly pollute the output of the language model. For example, the model may learn incorrect code patterns, thereby generating incorrect code. If the data only contains code in a certain programming language, the model may not adapt well to the code of other programming languages. -To control the quality of code data entering the model and thereby improve the inferential capability of the model, we have sorted out the definition of high-quality code based on years of practical experience of the Ant code analysis team combined with industry consensus, and implemented automated, large-scale code data cleaning using existing program analysis technology. -CodeFuse-Query provides the following data cleaning capabilities for the CodeFuse code large model:

    -
      -
    • High-quality code data cleaning: clean code data, including vulnerability scanning for Python, Java, JavaScript, TypeScript, Go, C, C++ 7 languages, filtering by language type/star count, filtering out data with 0 effective code lines, etc. Currently, about 2TB of cleaned GitHub and Ant internal code data has been accumulated.
    • -
    • Code Portrait: Implement high-performance, multi-dimensional automatic annotation of large-scale code, supporting Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go, and other 10 languages, 77 common tags, 40 Ant-specific tags, a total of 117 tags. Current auto-annotation performance can reach 40MB/s.
    • -
    • Other atomic capabilities -
        -
      • Advanced code feature extraction, including AST (Abstract Syntax Tree), DFG (Data Flow Graph) data extraction, etc. Currently, AST information has been used for SFT training, with an accuracy of about 97%.
      • -
      • Code snippet identification, used for extracting code from text data, convenient for code formatting or adding Markdown format: -
          -
        • Text extraction code: extract code block information from the text, support parsing of mainstream languages, function and class definitions, only validate binary classification, which is to verify whether the text contains code blocks, accuracy is about 83%.
        • -
        • Identify the programming language type of code snippets: identify the programming language type of any code snippet, support 30+ languages, accuracy is about 80%.
        • -
        -
      • -
      • Code comment pair extraction: support extraction of method-level comment-code pair information, cover 15 kinds of GitHub’s most popular languages, used for Text To Code/Code To Text SFT training.
      • -
      -
    • -
    -

    Change Analysis - Youku Server-side R&D Efficiency

    -

    From 2023, the Youku quality assurance team started exploring precise testing for the server-side. After half a year of technical accumulation and system building, a precise testing system with change content identification, change impact analysis, testing capability recommendation, testing coverage assessment was formed. -In this process, the capabilities that CodeFuse-Query can provide mainly include:

    -
      -
    • Analyze the affected objects based on the code changes (file + line number): methods, entry points (http entry, hsf entry), call routes (all call routes from entry to changed method), database operations (table, operation type)
    • -
    • Combined with the online dynamic call route (method route), CodeFuse-Query static analysis call route impact surface precise analysis capability, improve the effectiveness and preparation rate of change analysis impact surface
    • -
    -

    Up to now, Youku has integrated all core applications through CodeFuse-Query and has built a comprehensive server-side code knowledge base and network traffic knowledge base based on static analysis.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/codefuse-query-zh/index.html b/docs/docs/codefuse-query-zh/index.html deleted file mode 100644 index 7a68e51..0000000 --- a/docs/docs/codefuse-query-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/codefuse-query-zh/ - - - - - - diff --git a/docs/docs/codefuse-query/index.html b/docs/docs/codefuse-query/index.html deleted file mode 100644 index 582d7aa..0000000 --- a/docs/docs/codefuse-query/index.html +++ /dev/null @@ -1,550 +0,0 @@ - - - - - - - - -CodeFuse-Query · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-Query

    -
    -
    - - -

    CodeFuse-Query

    -

    CodeFuse-Query

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/configurations/index.html b/docs/docs/configurations/index.html deleted file mode 100644 index fb6c09d..0000000 --- a/docs/docs/configurations/index.html +++ /dev/null @@ -1,641 +0,0 @@ - - - - - - - - -Configurations · CodeFuse - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Configurations

    -
    -
    - - -

    本地私有化/大模型接口接入

    -

    依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。

    -

    📜 目录

    - -

    本地私有化模型接入

    -


    模型地址配置示例,model_config.py配置修改

    -
    # 建议:走huggingface接入,尽量使用chat模型,不要使用base,无法获取正确输出
    -# 注意:当llm_model_dict和VLLM_MODEL_DICT同时存在时,优先启动VLLM_MODEL_DICT中的模型配置
    -
    -# llm_model_dict 配置接入示例如下
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -# VLLM_MODEL_DICT 配置接入示例如下
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "THUDM/chatglm-6b",
    -}
    -


    模型路径填写示例

    -
    # 1、若把模型放到 ~/codefuse-chatbot/llm_models 路径下
    -# 若模型地址如下
    -model_dir: ~/codefuse-chatbot/llm_models/THUDM/chatglm-6b
    -
    -# 参考配置如下
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "THUDM/chatglm-6b",
    -}
    -
    -# or 若模型地址如下
    -model_dir: ~/codefuse-chatbot/llm_models/chatglm-6b
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "chatglm-6b",
    -}
    -
    -# 2、若不想移动相关模型到 ~/codefuse-chatbot/llm_models
    -# 同时删除 `模型路径重置` 以下的相关代码,具体见model_config.py
    -# 若模型地址如下
    -model_dir: ~/THUDM/chatglm-6b
    -# 参考配置如下
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "~/THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "~/THUDM/chatglm-6b",
    -}
    -
    # 3、指定启动的模型服务,两者保持一致
    -LLM_MODEL = "gpt-3.5-turbo-16k"
    -LLM_MODELs = ["gpt-3.5-turbo-16k"]
    -
    # server_config.py配置修改, 若LLM_MODELS无多个模型配置不需要额外进行设置
    -# 修改server_config.py#FSCHAT_MODEL_WORKERS的配置
    -"model_name": {'host': DEFAULT_BIND_HOST, 'port': 20057}
    -


    量化模型接入

    -
    # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁
    -cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -
    -# 若需要支撑qwen-72b-int4模型,需要给fastchat打一个补丁
    -cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# 量化需修改llm_api.py的配置
    -# dev_opsgpt/service/llm_api.py#559 取消注释 kwargs["gptq_wbits"] = 4
    -

    公开大模型接口接入

    -
    # model_config.py配置修改
    -# ONLINE_LLM_MODEL
    -# 其它接口开发来自于langchain-chatchat项目,缺少相关账号未经测试
    -
    -# 指定启动的模型服务,两者保持一致
    -LLM_MODEL = "gpt-3.5-turbo-16k"
    -LLM_MODELs = ["gpt-3.5-turbo-16k"]
    -

    外部大模型接口接入示例

    -
    # 1、实现新的模型接入类
    -# 参考  ~/dev_opsgpt/service/model_workers/openai.py#ExampleWorker
    -# 实现do_chat函数即可使用LLM的能力
    -
    -class XXWorker(ApiModelWorker):
    -    def __init__(
    -            self,
    -            *,
    -            controller_addr: str = None,
    -            worker_addr: str = None,
    -            model_names: List[str] = ["gpt-3.5-turbo"],
    -            version: str = "gpt-3.5",
    -            **kwargs,
    -    ):
    -        kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
    -        kwargs.setdefault("context_len", 16384) #TODO 16K模型需要改成16384
    -        super().__init__(**kwargs)
    -        self.version = version
    -
    -    def do_chat(self, params: ApiChatParams) -> Dict:
    -        '''
    -        执行Chat的方法,默认使用模块里面的chat函数。
    -        :params.messages : [
    -            {"role": "user", "content": "hello"}, 
    -            {"role": "assistant", "content": "hello"}
    -            ]
    -        :params.xx: 详情见 ApiChatParams 
    -        要求返回形式:{"error_code": int, "text": str}
    -        '''
    -        return {"error_code": 500, "text": f"{self.model_names[0]}未实现chat功能"}
    -
    -
    -# 最后在 ~/dev_opsgpt/service/model_workers/__init__.py 中完成注册
    -# from .xx import XXWorker
    -
    -# 2、通过已有模型接入类完成接入
    -# 或者直接使用已有的相关大模型类进行使用(缺少相关账号测试,欢迎大家测试后提PR)
    -
    # model_config.py#ONLINE_LLM_MODEL 配置修改
    -# 填写专属模型的 version、api_base_url、api_key、provider(与上述类名一致)
    -ONLINE_LLM_MODEL = {
    -    # 线上模型。请在server_config中为每个在线API设置不同的端口
    -
    -    "openai-api": {
    -        "model_name": "gpt-3.5-turbo",
    -        "api_base_url": "https://api.openai.com/v1",
    -        "api_key": "",
    -        "openai_proxy": "",
    -    },
    -    "example": {
    -        "version": "gpt-3.5",  # 采用openai接口做示例
    -        "api_base_url": "https://api.openai.com/v1",
    -        "api_key": "",
    -        "provider": "ExampleWorker",
    -    },
    -}
    -

    启动大模型服务

    -
    # start llm-service(可选)  单独启动大模型服务
    -python dev_opsgpt/service/llm_api.py
    -
    # 启动测试
    -import openai
    -# openai.api_key = "EMPTY" # Not support yet
    -openai.api_base = "http://127.0.0.1:8888/v1"
    -
    -# 选择你启动的模型
    -model = "example"
    -
    -# create a chat completion
    -completion = openai.ChatCompletion.create(
    -    model=model,
    -    messages=[{"role": "user", "content": "Hello! What is your name? "}],
    -    max_tokens=100,
    -)
    -# print the completion
    -print(completion.choices[0].message.content)
    -
    -# 正确输出后则确认LLM可正常接入
    -

    or

    -
    # model_config.py#USE_FASTCHAT 判断是否进行fastchat接入本地模型
    -USE_FASTCHAT = "gpt" not in LLM_MODEL
    -python start.py #224 自动执行 python service/llm_api.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/data/index.html b/docs/docs/data/index.html deleted file mode 100644 index d23940b..0000000 --- a/docs/docs/data/index.html +++ /dev/null @@ -1,878 +0,0 @@ - - - - - - - - -Data · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Data

    -
    -
    - - -

    ⏬ Data

    -

    Download

    -
      -
    • Method 1: Download the zip file (you can also simply open the following link with the browser): -
      wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip
      -
      then unzip it and you may load the data with pandas: -
      import os
      -import pandas as pd
      -
      -File_Dir="devopseval-exam"
      -test_df=pd.read_csv(os.path.join(File_Dir,"test","UnitTesting.csv"))
      -
    • -
    • Method 2: Directly load the dataset using Hugging Face datasets: -
      from datasets import load_dataset
      -dataset=load_dataset(r"DevOps-Eval/devopseval-exam",name="UnitTesting")
      -
      -print(dataset['val'][0])
      -# {"id": 1, "question": "单元测试应该覆盖以下哪些方面?", "A": "正常路径", "B": "异常路径", "C": "边界值条件","D": 所有以上,"answer": "D", "explanation": ""}  ```
      -
    • -
    -

    👀 Notes

    -

    To facilitate usage, we have organized the category name handlers and English/Chinese names corresponding to 55 subcategories. Please refer to category_mapping.json for details. The format is:

    -
    {
    -  "UnitTesting.csv": [
    -    "unit testing",
    -    "单元测试",
    -    {"dev": 5, "test": 32}
    -    "TEST"
    -  ],
    -  ...
    -  "file_name":[
    -  "English Name",
    -  "Chinese Name",
    -  "Sample Number",
    -  "Supercatagory Label(PLAN,CODE,BUILD,TEST,RELEASE,DEPOLY,OPERATE,MONITOR choose 1 out of 8)"
    -  ]
    -}
    -

    Each subcategory consists of two splits: dev and test. The dev set per subcategory consists of five exemplars with explanations for few-shot evaluation. And the test set is for model evaluation. Labels on the test split are also released.

    -

    Below is a dev example from ‘version control’:

    -
    id: 4
    -question: 如何找到Git特定提交中已更改的文件列表?
    -A: 使用命令 `git diff --name-only SHA`
    -B: 使用命令 `git log --name-only SHA`
    -C: 使用命令 `git commit --name-only SHA`
    -D: 使用命令 `git clone --name-only SHA`
    -answer: A
    -explanation: 
    -分析原因:
    -git diff --name-only SHA命令会显示与SHA参数对应的提交中已修改的文件列表。参数--name-only让命令只输出文件名,而忽略其他信息。其它选项中的命令并不能实现此功能。
    -

    🔥 AIOps Sample Example

    -

    👀 👀 Taking log parsing and time series anomaly detection as examples, here is a brief showcase of the AIOps samples:

    -

    LogParsing

    -
    id: 0
    -question:
    -Here are some running logs
    - 0 04:21:15,429 WARN Cannot open channel to 2 at election address /10.10.34.12:3888
    - 1 19:18:56,377 WARN ******* GOODBYE /10.10.34.11:52703 ********
    - 2 19:13:46,128 WARN ******* GOODBYE /10.10.34.11:52308 ********
    - 3 19:16:26,268 WARN ******* GOODBYE /10.10.34.11:52502 ********
    - 4 09:11:16,012 WARN Cannot open channel to 3 at election address /10.10.34.13:3888
    - 5 16:37:13,837 WARN Cannot open channel to 2 at election address /10.10.34.12:3888
    - 6 09:09:16,008 WARN Cannot open channel to 3 at election address /10.10.34.13:3888
    - 7 15:27:03,681 WARN Cannot open channel to 3 at election address /10.10.34.13:3888
    -The first three parts of the log are index, timestamp, and log level. Without considering these three parts, Here we assume that the variables in the logs are represented as '<*>', separated by spaces between tokens. What is the specific log template for the above logs? 
    -A: Notification time out: <*> 和 Connection broken for id <*>, my id = <*>, error =
    -B: Send worker leaving thread 和 Connection broken for id <*>, my id = <*>, error =
    -C: Received connection request /<*>:<*> 和 Interrupting SendWorker
    -D: Cannot open channel to <*> at election address /<*>:<*> 和 ******* GOODBYE /<*>:<*> ********
    -answer: D
    -explanation: The log includes the fixed template fragments "Cannot open channel to <> at election address /<>:<>" and "****** GOODBYE /<>:<> ********," both of which appear in option D. Meanwhile, the template fragments in the other options do not match the content in the log. Therefore, option D is the most consistent with the log template.
    -

    TimeSeriesAnomalyDetection

    -
    id: 0
    -question:
    -Analyze the following time series
    -[50,62,74,84,92,97,99,98,94,87,77,65,265,40,28,17,8,3,0,0,4,10,20,31,43,56,68,79,89,95,99,99,96,91,82,71,59,46,34,22,12,5,1,0,2,7,15,25,37,49]
    -Please identify the indices of obvious outlier points. Outlier points generally refer to points that significantly deviate from the overall trend of the data.
    -A: 46
    -B: 0
    -C: 37
    -D: 12
    -answer: D
    -explanation: According to the analysis, the value 265 in the given time series at 12 o'clock is significantly larger than the surrounding data, indicating a sudden increase phenomenon. Therefore, selecting option D is correct.
    -

    🔧 ToolLearning Sample Example

    -

    👀 👀The data format of ToolLearning samples is compatible with OpenAI’s Function Calling.

    -

    Please refer to tool_learning_info.md for details. -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/content/en/docs/b1.codefusechatbot.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/codefusechatbot.en-US.md similarity index 86% rename from content/en/docs/b1.codefusechatbot.md rename to docs/docs/developer-docs/CodeFuse-ChatBot/master/codefusechatbot.en-US.md index 198f850..39fcc63 100644 --- a/content/en/docs/b1.codefusechatbot.md +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/codefusechatbot.en-US.md @@ -1,21 +1,19 @@ --- -title: Codefuse-ChatBot Development by Private Knowledge Augmentation -slug: codefuse-chatbot -language: en -description: 介绍主要功能 -aliases: -- "/docs/codefuse-chatbot" +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + index: true + order: -1 +title: CodeFuse-ChatBot +toc: content --- -

    - 中文  |  English  -

    - - This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface. - ## 📜 Contents + - [🤝 Introduction](#-introduction) - [🧭 Technical Route](#-technical-route) @@ -23,19 +21,18 @@ This project is an open-source AI intelligent assistant, specifically designed f 💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. It transitions gradually from the traditional development and operations mode of querying information from various sources and operating on standalone, disparate platforms to an intelligent development and operations mode based on large-model Q&A, changing people's development and operations habits. -- **🧠 Intelligent Scheduling Core:** Constructed a well-integrated scheduling core system that supports multi-mode one-click configuration, simplifying the operational process.[Use Introduction](/docs/multi-agent) +- **🧠 Intelligent Scheduling Core:** Constructed a well-integrated scheduling core system that supports multi-mode one-click configuration, simplifying the operational process.[Use Introduction](/docs/api-docs/MuAgent/overview/multi-agent) - **💻 Comprehensive Code Repository Analysis:** Achieved in-depth understanding at the repository level and coding and generation at the project file level, enhancing development efficiency. - **📄 Enhanced Document Analysis:** Integrated document knowledge bases with knowledge graphs, providing deeper support for document analysis through enhanced retrieval and reasoning. - **🔧 Industry-Specific Knowledge:** Tailored a specialized knowledge base for the DevOps domain, supporting the self-service one-click construction of industry-specific knowledge bases for convenience and practicality. - **🤖 Compatible Models for Specific Verticals:** Designed small models specifically for the DevOps field, ensuring compatibility with related DevOps platforms and promoting the integration of the technological ecosystem. -🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.[Access Demo](/docs/fastchat) +🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.[Access Demo](/docs/developer-docs/CodeFuse-ChatBot/master/fastchat) 👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the CodefuseGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of "Making Development Seamless for Everyone." -
    - Image + Image
    🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API. @@ -43,8 +40,9 @@ This project is an open-source AI intelligent assistant, specifically designed f 👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the DevOpsGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of "Making Development Seamless for Everyone." ## 🧭 Technical Route +
    - Image + Image
    - 🧠 **Multi-Agent Schedule Core:** Easily configurable to create interactive intelligent agents. @@ -57,4 +55,4 @@ This project is an open-source AI intelligent assistant, specifically designed f - 💬 **LLM:**:Supports various open-source models and LLM interfaces. - 🛠️ **API Management::** Enables rapid integration of open-source components and operational platforms. -For implementation details, see: [Technical Route Details](sources/readme_docs/roadmap.md) +For implementation details, see: [Technical Route Details](/docs/developer-docs/CodeFuse-ChatBot/master/roadmap) diff --git a/content/zh/docs/b1.codefusechatbot.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/codefusechatbot.zh-CN.md similarity index 61% rename from content/zh/docs/b1.codefusechatbot.md rename to docs/docs/developer-docs/CodeFuse-ChatBot/master/codefusechatbot.zh-CN.md index 9997cf0..381352a 100644 --- a/content/zh/docs/b1.codefusechatbot.md +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/codefusechatbot.zh-CN.md @@ -1,55 +1,54 @@ --- -title: CodeFuse-ChatBot Development by Private Knowledge Augmentation -slug: CodeFuse-ChatBot-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-chatbot-zh" +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + index: true + order: -1 +title: CodeFuse-ChatBot +toc: content --- -

    - 中文  |  English  -

    - -DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。 - +DevOps-ChatBot 是由蚂蚁 CodeFuse 团队开发的开源 AI 智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了 Multi-Agent 的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得 LLM 模型能够在 DevOps 领域内有效执行和处理复杂任务。 ## 📜 目录 + - [🤝 介绍](#-介绍) - [🎥 演示视频](#-演示视频) - [🧭 技术路线](#-技术路线) ## 🤝 介绍 -💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 +💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的 AI 智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 本项目核心差异技术、功能点: -- **🧠 智能调度核心:** 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 [使用说明](/docs/multi-agent-zh) + +- **🧠 智能调度核心:** 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 [使用说明](/zh-CN/docs/api-docs/MuAgent/overview/multi-agent) - **💻 代码整库分析:** 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。 - **📄 文档分析增强:** 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。 -- **🔧 垂类专属知识:** 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 -- **🤖 垂类模型兼容:** 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。 +- **🔧 垂类专属知识:** 为 DevOps 领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 +- **🤖 垂类模型兼容:** 针对 DevOps 领域的小型模型,保证了与 DevOps 相关平台的兼容性,促进了技术生态的整合。 -🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。[接入Demo](/docs/fastchat-zh) +🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。[接入 Demo](/zh-CN/docs/developer-docs/CodeFuse-ChatBot/master/fastchat) 👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。
    - 图片 + 图片
    - ## 🎥 演示视频 为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。 - - 知识库导入和问答:[演示视频](https://www.youtube.com/watch?v=UGJdTGaVnNY&t=2s&ab_channel=HaotianZhu) - 本地代码库导入和问答:[演示视频](https://www.youtube.com/watch?v=ex5sbwGs3Kg) - ## 🧭 技术路线 +
    - Image + Image
    - 🧠 **Multi-Agent Schedule Core:** 多智能体调度核心,简易配置即可打造交互式智能体。 @@ -62,5 +61,4 @@ DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力 - 💬 **LLM:**:智能体大脑,支持多种开源模型和 LLM 接口。 - 🛠️ **API Management::** API 管理工具,实现对开源组件和运维平台的快速集成。 -具体实现明细见:[技术路线明细](/docs/chatbot-roadmap) - +具体实现明细见:[技术路线明细](/zh-CN/docs/developer-docs/CodeFuse-ChatBot/master/roadmap) diff --git a/content/en/docs/chatbot/c3.fastchat.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/fastchat.en-US.md similarity index 95% rename from content/en/docs/chatbot/c3.fastchat.md rename to docs/docs/developer-docs/CodeFuse-ChatBot/master/fastchat.en-US.md index c90619a..17153ff 100644 --- a/content/en/docs/chatbot/c3.fastchat.md +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/fastchat.en-US.md @@ -1,20 +1,18 @@ --- +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 title: LLM-Configuration -slug: LLM-Configuration -url: "docs/LLM-Configuration" -aliases: -- "/docs/fastchat" -- "/docs/llm-configuration" +order: 1 +toc: content --- -

    - 中文  |  English  -

    - - # Local Privatization/Large Model Interface Access -Leveraging open-source LLMs (Large Language Models) and Embedding models, this project enables offline private deployment based on open-source models. +Leveraging open-source LLMs (Large Language Models) and Embedding models, this project enables offline private deployment based on open-source models. In addition, the project supports the invocation of OpenAI API. @@ -88,8 +86,6 @@ LLM_MODELs = ["chatglm-6b"] "model_name": {'host': DEFAULT_BIND_HOST, 'port': 20057} ``` - -
    量化模型接入 ```bash @@ -139,10 +135,10 @@ class XXWorker(ApiModelWorker): ''' 执行Chat的方法,默认使用模块里面的chat函数。 :params.messages : [ - {"role": "user", "content": "hello"}, + {"role": "user", "content": "hello"}, {"role": "assistant", "content": "hello"} ] - :params.xx: 详情见 ApiChatParams + :params.xx: 详情见 ApiChatParams 要求返回形式:{"error_code": int, "text": str} ''' return {"error_code": 500, "text": f"{self.model_names[0]}未实现chat功能"} @@ -155,7 +151,6 @@ class XXWorker(ApiModelWorker): # Or directly use the existing relevant large model class for use (lacking relevant account testing, community contributions after testing are welcome) ``` - ```bash # Modification of model_config.py#ONLINE_LLM_MODEL configuration # Enter exclusive model details: version, api_base_url, api_key, provider (consistent with the class name above) @@ -177,6 +172,7 @@ ONLINE_LLM_MODEL = { ``` ## Launching Large Model Services + ```bash # start llm-service (optional) - Launch the large model service separately python examples/llm_api.py @@ -200,12 +196,10 @@ print(completion.choices[0].message.content) # Once the correct output is confirmed, LLM can be accessed normally. ``` - - or ```bash # model_config.py#USE_FASTCHAT - Determine whether to integrate local models via fastchat USE_FASTCHAT = "gpt" not in LLM_MODEL python start.py #221 Automatically executes python llm_api.py -``` \ No newline at end of file +``` diff --git a/content/zh/docs/chatbot/c3.fastchat.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/fastchat.zh-CN.md similarity index 93% rename from content/zh/docs/chatbot/c3.fastchat.md rename to docs/docs/developer-docs/CodeFuse-ChatBot/master/fastchat.zh-CN.md index 04a4779..b49328d 100644 --- a/content/zh/docs/chatbot/c3.fastchat.md +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/fastchat.zh-CN.md @@ -1,25 +1,22 @@ --- +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 title: 本地私有化&大模型接口接入 -slug: 本地私有化&大模型接口接入 -url: "docs/本地私有化大模型接口接入" -aliases: -- "/docs/本地私有化大模型接口接入" -- "/docs/fastchat-zh" +order: 1 +toc: content --- - -

    - 中文  |  English  -

    - - # 本地私有化/大模型接口接入 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。 ## 本地私有化模型接入 -
    模型地址配置示例,model_config.py配置修改 +
    模型地址配置示例,model_config.py 配置修改 ```bash # 建议:走huggingface接入,尽量使用chat模型,不要使用base,无法获取正确输出 @@ -88,8 +85,6 @@ LLM_MODELs = ["chatglm-6b"] "model_name": {'host': DEFAULT_BIND_HOST, 'port': 20057} ``` - -
    量化模型接入 ```bash @@ -140,10 +135,10 @@ class XXWorker(ApiModelWorker): ''' 执行Chat的方法,默认使用模块里面的chat函数。 :params.messages : [ - {"role": "user", "content": "hello"}, + {"role": "user", "content": "hello"}, {"role": "assistant", "content": "hello"} ] - :params.xx: 详情见 ApiChatParams + :params.xx: 详情见 ApiChatParams 要求返回形式:{"error_code": int, "text": str} ''' return {"error_code": 500, "text": f"{self.model_names[0]}未实现chat功能"} @@ -156,7 +151,6 @@ class XXWorker(ApiModelWorker): # 或者直接使用已有的相关大模型类进行使用(缺少相关账号测试,欢迎大家测试后提PR) ``` - ```bash # model_config.py#ONLINE_LLM_MODEL 配置修改 # 填写专属模型的 version、api_base_url、api_key、provider(与上述类名一致) @@ -179,6 +173,7 @@ ONLINE_LLM_MODEL = { ``` ## 启动大模型服务 + ```bash # start llm-service(可选) 单独启动大模型服务 python examples/llm_api.py @@ -205,12 +200,10 @@ print(completion.choices[0].message.content) # 正确输出后则确认LLM可正常接入 ``` - - or ```bash # model_config.py#USE_FASTCHAT 判断是否进行fastchat接入本地模型 USE_FASTCHAT = "gpt" not in LLM_MODEL python start.py #221 自动执行 python llm_api.py -``` \ No newline at end of file +``` diff --git a/docs/docs/developer-docs/CodeFuse-ChatBot/master/quickstart.en-US.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/quickstart.en-US.md new file mode 100644 index 0000000..a7fab54 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/quickstart.en-US.md @@ -0,0 +1,58 @@ +--- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 +title: QuickStart +order: -1 +toc: content +--- + +## 🚀 Quick Start + +To deploy private models, please install the NVIDIA driver by yourself. +This project has been tested on Python 3.9.18 and CUDA 11.7 environments, as well as on Windows and macOS systems with x86 architecture. +For Docker installation, private LLM access, and related startup issues, see: [Start-detail...](/docs/developer-docs/CodeFuse-ChatBot/master/start-detail) + +1. Preparation of Python environment + +- It is recommended to use conda to manage the python environment (optional) + +```bash +# Prepare conda environment +conda create --name Codefusegpt python=3.9 +conda activate Codefusegpt +``` + +- Install related dependencies + +```bash +cd Codefuse-ChatBot +# python=3.9,use notebook-latest,python=3.8 use notebook==6.5.5 +pip install -r requirements.txt +``` + +2. Start the Service + +```bash +# After configuring server_config.py, you can start with just one click. +cd examples +bash start.sh +# you can config your llm model and embedding model, then choose the "启动对话服务" +``` + +
    + 图片 +
    + +Or `python start.py` by [old version to start](/docs/developer-docs/CodeFuse-ChatBot/master/start-detail) +More details about accessing LLM Moldes[More Details...](/docs/developer-docs/CodeFuse-ChatBot/master/fastchat) +
    diff --git a/docs/docs/developer-docs/CodeFuse-ChatBot/master/quickstart.zh-CN.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/quickstart.zh-CN.md new file mode 100644 index 0000000..2da1fdc --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/quickstart.zh-CN.md @@ -0,0 +1,58 @@ +--- +nav: + title: 文档 + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 +title: 快速开始 +order: -1 +toc: content +--- + +## 🚀 快速使用 + +如需使用私有化模型部署,请自行安装 nvidia 驱动程序,本项目已在 Python 3.9.18,CUDA 11.7 环境下,Windows、X86 架构的 macOS 系统中完成测试。 + +Docker 安装、私有化 LLM 接入及相关启动问题见:[快速使用明细](/zh-CN/docs/developer-docs/CodeFuse-ChatBot/master/start-detail) + +1、python 环境准备 + +- 推荐采用 conda 对 python 环境进行管理(可选) + +```bash +# 准备 conda 环境 +conda create --name devopsgpt python=3.9 +conda activate devopsgpt +``` + +- 安装相关依赖 + +```bash +cd codefuse-chatbot +# python=3.9,notebook用最新即可,python=3.8用notebook=6.5.6 +pip install -r requirements.txt +``` + +2、启动服务 + +```bash +# 完成server_config.py配置后,可一键启动 +cd examples +bash start.sh +# 开始在页面进行相关配置,然后打开`启动对话服务`即可 +``` + +
    + 图片 +
    + +或者通过`start.py`进行启动[老版启动方式](/zh-CN/docs/developer-docs/CodeFuse-ChatBot/master/start-detail) +更多 LLM 接入方法见[更多细节...](/zh-CN/docs/developer-docs/CodeFuse-ChatBot/master/fastchat) +
    diff --git a/docs/docs/developer-docs/CodeFuse-ChatBot/master/roadmap.en-US.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/roadmap.en-US.md new file mode 100644 index 0000000..38cdb96 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/roadmap.en-US.md @@ -0,0 +1,96 @@ +--- +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 +title: ChatBot-RoadMap +order: 2 +toc: content +--- + +## RoadMap + +
    + 图片 +
    +
    + +Roadmap Overview + +- [x] Sandbox Environment + - [x] Isolated sandbox environment for code execution + - [x] File upload and download + - [ ] Support for Java execution environment +- [x] Vector Database & Retrieval + - [x] Task retrieval + - [x] Tool retrieval +- [x] Prompt Management +- [x] Memory Management +- [x] Multi Agent Framework + - [ ] PRD (Product Requirement Document), system analysis, interface design + - [ ] Generate code based on requirement documents, system analysis, and interface design + - [ ] Automated testing, automated debugger + - [ ] Operations process integration (ToolLearning) + - [ ] Fully automated end-to-end process +- [x] Integration with LLM based on fastchat +- [x] Integration with Text Embedding based on sentencebert +- [x] Improved vector loading speed +- [x] Connector + - [x] React Mode based on langchain + - [x] Tool retrieval completed with langchain +- [ ] General Capability for Web Crawl + - [x] Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. + - [ ] Issue document + - [ ] SDK Library Document + +v0.0 + +- [x] Sandbox Environment + - [x] Isolated sandbox environment for code execution +- [x] Integration with LLM based on fastchat +- [x] Integration with Text Embedding based on sentencebert +- [x] General Capability for Web Crawl: Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. + +Done +
    + +v0.1 + +- [x] Sandbox Environment: File upload and download +- [x] Vector Database & Retrieval + - [x] Task retrieval + - [x] Tool retrieval +- [x] Connector + - [x] React Mode based on langchain +- [x] Integration with Text Embedding based on sentencebert: Improved vector loading speed + +Done +
    + +v0.2 + +- [x] Prompt Management +- [x] Memory Management +- [x] Vector Database & Retrieval + +Done +
    + +v0.3 + +- [x] Sandbox Environment + - [ ] Support for Java execution environment +- [x] Multi Agent + - [ ] PRD (Product Requirement Document), system analysis, interface design + - [ ] Generate code based on requirement documents, system analysis, and interface design + - [ ] Automated testing, automated debugger + - [ ] Operations process integration (ToolLearning) + - [ ] Fully automated end-to-end process +- [x] General Capability for Web Crawl + - [ ] Issue document + - [ ] SDK Library Document + +DDL: 2024.12.31 +
    diff --git a/docs/docs/developer-docs/CodeFuse-ChatBot/master/roadmap.zh-CN.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/roadmap.zh-CN.md new file mode 100644 index 0000000..2d3c5b5 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/roadmap.zh-CN.md @@ -0,0 +1,91 @@ +--- +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 +title: ChatBot 技术路线 +order: 2 +toc: content +--- + +## RoadMap + +
    + 图片 +
    +
    + +完整路线 + +- [x] Sandbox 环境 + - [x] 环境隔离的 sandbox 环境与代码执行 + - [x] 上传、下载文件 + - [ ] 支持 java 执行环境 +- [ ] Vector Database & Retrieval + - [x] task retrieval + - [x] tool retrieval +- [x] Prompt Management +- [x] memory Management +- [x] Multi Agent + - [ ] PRD 需求文档、系分、接口设计 + - [ ] 根据需求文档、系分、接口设计生产代码 + - [ ] 自动测试、自动 debugger + - [ ] 运维流程接入(ToolLearning + - [ ] 全流程自动 +- [x] 基于 fastchat 接入 LLM +- [x] 基于 sentencebert 接入 Text Embedding + - [x] 向量加载速度提升 +- [x] Connector + - [x] 基于 langchain 的 react 模式 + - [x] 基于 langchain 完成 tool 检索 +- [x] Web Crawl 通用能力 + - [x] 技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 + - [ ] issue document + - [ ] SDK Library Document + +

    + +- v0.0 +- [x] Sandbox 环境 + - [x] 环境隔离的 sandbox 环境与代码执行 +- [x] 基于 fastchat 接入 LLM +- [x] 基于 sentencebert 接入 Text Embedding +- [x] Web Crawl 通用能力:技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 +
    +- v0.1 +- [x] Sandbox 环境: 上传、下载文件 +- [x] Vector Database & Retrieval + - [x] task retrieval + - [x] tool retrieval +- [x] Connector + - [x] 基于 langchain 的 react 模式 +- [x] 基于 sentencebert 接入 Text Embedding: 向量加载速度提升 + +Done +
    + +- v0.2 +- [x] Prompt Management +- [x] memory Management +- [x] Vector Database & Retrieval + +DDL: 2024.01.31 +
    + +- v0.3 +- [x] Sandbox 环境 + - [ ] 支持 java 执行环境 +- [x] Multi Agent Framework + - [ ] PRD 需求文档、系分、接口设计 + - [ ] 根据需求文档、系分、接口设计生产代码 + - [ ] 自动测试、自动 debugger + - [ ] 运维流程接入(ToolLearning) + - [ ] 全流程自动 +- [x] Web Crawl 通用能力 + - [ ] issue document + - [ ] SDK Library Document + +DDL: 2024.12.31 +
    diff --git a/content/en/docs/chatbot/c2.start-detail.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/start-detail.en-US.md similarity index 80% rename from content/en/docs/chatbot/c2.start-detail.md rename to docs/docs/developer-docs/CodeFuse-ChatBot/master/start-detail.en-US.md index 25ac27c..fb1736b 100644 --- a/content/en/docs/chatbot/c2.start-detail.md +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/start-detail.en-US.md @@ -1,19 +1,21 @@ --- +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: 0 title: Start-Detail -slug: Start-Detail -aliases: -- "/docs/start-detail" +order: 0 +toc: content --- -

    - 中文  |  English  -

    - - If you need to deploy a privatized model, please install the NVIDIA driver yourself. ### Preparation of Python environment + - It is recommended to use conda to manage the python environment (optional) + ```bash # Prepare conda environment conda create --name Codefusegpt python=3.9 @@ -21,21 +23,24 @@ conda activate Codefusegpt ``` - Install related dependencies + ```bash cd Codefuse-ChatBot pip install -r requirements.txt ``` ### Sandbox Environment Preparation + - Windows Docker installation: -[Docker Desktop for Windows](https://docs.docker.com/desktop/install/windows-install/) supports 64-bit versions of Windows 10 Pro with Hyper-V enabled (Hyper-V is not required for versions v1903 and above), or 64-bit versions of Windows 10 Home v1903 and above. - - [【全面详细】Windows10 Docker安装详细教程](https://zhuanlan.zhihu.com/p/441965046) + [Docker Desktop for Windows](https://docs.docker.com/desktop/install/windows-install/) supports 64-bit versions of Windows 10 Pro with Hyper-V enabled (Hyper-V is not required for versions v1903 and above), or 64-bit versions of Windows 10 Home v1903 and above. + + - [【全面详细】Windows10 Docker 安装详细教程](https://zhuanlan.zhihu.com/p/441965046) - [Docker 从入门到实践](https://yeasy.gitbook.io/docker_practice/install/windows) - [Handling 'Docker Desktop requires the Server service to be enabled'](https://blog.csdn.net/sunhy_csdn/article/details/106526991) - - [安装wsl或者等报错提示](https://learn.microsoft.com/zh-cn/windows/wsl/install) + - [安装 wsl 或者等报错提示](https://learn.microsoft.com/zh-cn/windows/wsl/install) - Linux Docker installation: -Linux installation is relatively simple, please search Baidu/Google for installation guides. + Linux installation is relatively simple, please search Baidu/Google for installation guides. - Mac Docker installation - [Docker 从入门到实践](https://yeasy.gitbook.io/docker_practice/install/mac) @@ -63,8 +68,6 @@ git lfs clone https://huggingface.co/shibing624/text2vec-base-chinese cp ~/shibing624/text2vec-base-chinese ~/codefuse-chatbot/embedding_models/ ``` - - ### Basic Configuration ```bash @@ -99,8 +102,6 @@ DOCKER_SERVICE = True SANDBOX_DO_REMOTE = True ``` - - ### Starting the Service By default, only the webui-related services are started, and fastchat is not started (optional). @@ -113,11 +114,12 @@ By default, only the webui-related services are started, and fastchat is not sta # start llm-service (optional) python examples/llm_api.py ``` -For more LLM integration methods, see[more details...](./fastchat.md) + +For more LLM integration methods, see[more details...](/docs/developer-docs/CodeFuse-ChatBot/master/fastchat)
    ```bash # After completing the server_config.py configuration, you can start with one click cd examples python start.py -``` \ No newline at end of file +``` diff --git a/content/zh/docs/chatbot/c2.start-detail.md b/docs/docs/developer-docs/CodeFuse-ChatBot/master/start-detail.zh-CN.md similarity index 75% rename from content/zh/docs/chatbot/c2.start-detail.md rename to docs/docs/developer-docs/CodeFuse-ChatBot/master/start-detail.zh-CN.md index 43cd795..0f1646d 100644 --- a/content/zh/docs/chatbot/c2.start-detail.md +++ b/docs/docs/developer-docs/CodeFuse-ChatBot/master/start-detail.zh-CN.md @@ -1,23 +1,21 @@ --- +store: + title: CodeFuse-ChatBot + version: master +group: + title: 🌱 CodeFuse-ChatBot + order: -1 title: 启动明细 -slug: 启动明细 -description: 介绍主要功能 -url: "docs/启动明细" -aliases: -- "/docs/启动明细" -- "/docs/start-detail-zh" +order: 0 +toc: content --- -

    - 中文  |  English  -

    - - -如需使用私有化模型部署,请自行安装 nvidia 驱动程序。。 +如需使用私有化模型部署,请自行安装 nvidia 驱动程序。 ### python 环境准备 - 推荐采用 conda 对 python 环境进行管理(可选) + ```bash # 准备 conda 环境 conda create --name devopsgpt python=3.9 @@ -25,6 +23,7 @@ conda activate devopsgpt ``` - 安装相关依赖 + ```bash cd codefuse-chatbot # python=3.9,notebook用最新即可,python=3.8用notebook=6.5.6 @@ -32,16 +31,17 @@ pip install -r requirements.txt ``` ### 沙盒环境准备 + - windows Docker 安装: -[Docker Desktop for Windows](https://docs.docker.com/desktop/install/windows-install/) 支持 64 位版本的 Windows 10 Pro,且必须开启 Hyper-V(若版本为 v1903 及以上则无需开启 Hyper-V),或者 64 位版本的 Windows 10 Home v1903 及以上版本。 + [Docker Desktop for Windows](https://docs.docker.com/desktop/install/windows-install/) 支持 64 位版本的 Windows 10 Pro,且必须开启 Hyper-V(若版本为 v1903 及以上则无需开启 Hyper-V),或者 64 位版本的 Windows 10 Home v1903 及以上版本。 - - [【全面详细】Windows10 Docker安装详细教程](https://zhuanlan.zhihu.com/p/441965046) + - [【全面详细】Windows10 Docker 安装详细教程](https://zhuanlan.zhihu.com/p/441965046) - [Docker 从入门到实践](https://yeasy.gitbook.io/docker_practice/install/windows) - [Docker Desktop requires the Server service to be enabled 处理](https://blog.csdn.net/sunhy_csdn/article/details/106526991) - - [安装wsl或者等报错提示](https://learn.microsoft.com/zh-cn/windows/wsl/install) + - [安装 wsl 或者等报错提示](https://learn.microsoft.com/zh-cn/windows/wsl/install) - Linux Docker 安装: -Linux 安装相对比较简单,请自行 baidu/google 相关安装 + Linux 安装相对比较简单,请自行 baidu/google 相关安装 - Mac Docker 安装 - [Docker 从入门到实践](https://yeasy.gitbook.io/docker_practice/install/mac) @@ -69,7 +69,6 @@ git lfs clone https://huggingface.co/shibing624/text2vec-base-chinese cp ~/shibing624/text2vec-base-chinese ~/codefuse-chatbot/embedding_models/ ``` - ### 基础配置 ```bash @@ -105,7 +104,8 @@ SANDBOX_DO_REMOTE = True ### 启动服务 -默认只启动webui相关服务,未启动fastchat(可选)。 +默认只启动 webui 相关服务,未启动 fastchat(可选)。 + ```bash # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁 # cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py @@ -114,11 +114,12 @@ SANDBOX_DO_REMOTE = True # start llm-service(可选) python examples/llm_api.py ``` -更多LLM接入方法见[详情...](/docs/fastchat-zh) + +更多 LLM 接入方法见[详情...](/docs/developer-docs/CodeFuse-ChatBot/master/fastchat)
    ```bash # 完成server_config.py配置后,可一键启动 cd examples python start.py -``` \ No newline at end of file +``` diff --git a/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/categroy_mapping.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/categroy_mapping.md new file mode 100644 index 0000000..7394beb --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/categroy_mapping.md @@ -0,0 +1,492 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +resource: true +toc: content +--- + +```json +{ + "Visualization.csv": [ + "visualization", + "可视化", + { + "dev": 5, + "test": 44 + }, + "Visualization.csv" + ], + "Logging.csv": [ + "logging", + "日志", + { + "dev": 5, + "test": 100 + }, + "Logging.csv" + ], + "Storage.csv": [ + "storage", + "存储", + { + "dev": 5, + "test": 36 + }, + "Storage.csv" + ], + "DataAcquisition.csv": [ + "data acquisition", + "数据采集", + { + "dev": 5, + "test": 36 + }, + "DataAcquisition.csv" + ], + "IntegrationTesting.csv": [ + "integration testing", + "集成测试", + { + "dev": 5, + "test": 31 + }, + "IntegrationTesting.csv" + ], + "UserAcceptanceTesting.csv": [ + "user acceptance testing", + "用户验收测试", + { + "dev": 5, + "test": 39 + }, + "UserAcceptanceTesting.csv" + ], + "SecurityTesting.csv": [ + "security testing", + "安全测试", + { + "dev": 5, + "test": 38 + }, + "SecurityTesting.csv" + ], + "UnitTesting.csv": [ + "unit testing", + "单元测试", + { + "dev": 5, + "test": 32 + }, + "UnitTesting.csv" + ], + "PerformanceTesting.csv": [ + "performance testing", + "性能测试", + { + "dev": 5, + "test": 36 + }, + "PerformanceTesting.csv" + ], + "SystemTesting.csv": [ + "system testing", + "系统测试", + { + "dev": 5, + "test": 52 + }, + "SystemTesting.csv" + ], + "ProgM.csv": [ + "programme management", + "进度管理", + { + "dev": 5, + "test": 21 + }, + "ProgM.csv" + ], + "REQM.csv": [ + "requirements management", + "需求管理", + { + "dev": 5, + "test": 24 + }, + "REQM.csv" + ], + "RiskMgmt.csv": [ + "risk management", + "风险管理", + { + "dev": 5, + "test": 21 + }, + "RiskMgmt.csv" + ], + "InfrastructureAsCode.csv": [ + "infrastructure as code", + "基础设施即代码", + { + "dev": 5, + "test": 34 + }, + "InfrastructureAsCode.csv" + ], + "Provisioning.csv": [ + "provisioning", + "置备", + { + "dev": 5, + "test": 19 + }, + "Provisioning.csv" + ], + "ConfigMgmt.csv": [ + "config management", + "配置管理", + { + "dev": 5, + "test": 100 + }, + "ConfigMgmt.csv" + ], + "Azure.csv": [ + "microsoft azure", + "微软云服务", + { + "dev": 5, + "test": 27 + }, + "Azure.csv" + ], + "GoogleCloud.csv": [ + "google cloud", + "谷歌云服务", + { + "dev": 5, + "test": 31 + }, + "GoogleCloud.csv" + ], + "AWS.csv": [ + "amazon web services", + "亚马逊云服务", + { + "dev": 5, + "test": 44 + }, + "AWS.csv" + ], + "LogDesign.csv": [ + "log design", + "日志设计", + { + "dev": 5, + "test": 33 + }, + "LogDesign.csv" + ], + "ServiceDesign.csv": [ + "service design", + "服务设计", + { + "dev": 5, + "test": 44 + }, + "ServiceDesign.csv" + ], + "CapabilityDesign.csv": [ + "capability design", + "容量设计", + { + "dev": 5, + "test": 33 + }, + "CapabilityDesign.csv" + ], + "CloudNativeDesign.csv": [ + "cloud native design", + "云原生设计", + { + "dev": 5, + "test": 44 + }, + "CloudNativeDesign.csv" + ], + "CacheDesign.csv": [ + "cache design", + "缓存设计", + { + "dev": 5, + "test": 28 + }, + "CacheDesign.csv" + ], + "DBDesign.csv": [ + "database design", + "数据库设计", + { + "dev": 5, + "test": 38 + }, + "DBDesign.csv" + ], + "ArtificialIntelligence.csv": [ + "artificial intelligence", + "人工智能", + { + "dev": 5, + "test": 45 + }, + "ArtificialIntelligence.csv" + ], + "ComputerBasics.csv": [ + "computer basics", + "计算机基础", + { + "dev": 5, + "test": 100 + }, + "ComputerBasics.csv" + ], + "DataBase.csv": [ + "database", + "数据库", + { + "dev": 5, + "test": 75 + }, + "DataBase.csv" + ], + "ComputerNetwork.csv": [ + "computer network", + "计算机网络", + { + "dev": 5, + "test": 88 + }, + "ComputerNetwork.csv" + ], + "OperatingSystem.csv": [ + "operating system", + "操作系统", + { + "dev": 5, + "test": 36 + }, + "OperatingSystem.csv" + ], + "Go.csv": [ + "go", + "go语言", + { + "dev": 5, + "test": 100 + }, + "Go.csv" + ], + "Java.csv": [ + "java", + "java语言", + { + "dev": 5, + "test": 100 + }, + "Java.csv" + ], + "C:C++.csv": [ + "c/c++", + "c/c++语言", + { + "dev": 5, + "test": 100 + }, + "C:C++.csv" + ], + "Python.csv": [ + "python", + "python语言", + { + "dev": 5, + "test": 73 + }, + "Python.csv" + ], + "BigData.csv": [ + "big data", + "大数据", + { + "dev": 5, + "test": 15 + }, + "BigData.csv" + ], + "Front-end.csv": [ + "front-end", + "前端", + { + "dev": 5, + "test": 100 + }, + "Front-end.csv" + ], + "MobileApp.csv": [ + "mobile app", + "移动应用", + { + "dev": 5, + "test": 100 + }, + "MobileApp.csv" + ], + "MachineLearning.csv": [ + "machine learning", + "机器学习", + { + "dev": 5, + "test": 69 + }, + "MachineLearning.csv" + ], + "Back-end.csv": [ + "back-end", + "后端", + { + "dev": 5, + "test": 100 + }, + "Back-end.csv" + ], + "ArtifactMgmt.csv": [ + "artifact management", + "产出物管理", + { + "dev": 5, + "test": 12 + }, + "ArtifactMgmt.csv" + ], + "CI:CD.csv": [ + "cd/cd", + "持续集成/持续部署", + { + "dev": 5, + "test": 100 + }, + "CI:CD.csv" + ], + "Linux.csv": [ + "linux", + "linux操作系统", + { + "dev": 5, + "test": 100 + }, + "Linux.csv" + ], + "ContainerOrchestration.csv": [ + "container orchestration", + "容器编排", + { + "dev": 5, + "test": 100 + }, + "ContainerOrchestration.csv" + ], + "Virtualization.csv": [ + "virtualization", + "虚拟化技术", + { + "dev": 5, + "test": 34 + }, + "Virtualization.csv" + ], + "TimeSeriesAnomalyDetection.csv": [ + "time series anomaly detection", + "时序异常检测", + { + "dev": 5, + "test": 300 + }, + "TimeSeriesAnomalyDetection.csv" + ], + "TimeSeriesClassification.csv": [ + "time series classification", + "时序分类", + { + "dev": 5, + "test": 200 + }, + "TimeSeriesClassification.csv" + ], + "RootCauseAnalysis.csv": [ + "root cause analysis", + "根因分析", + { + "dev": 5, + "test": 250 + }, + "RootCauseAnalysis.csv" + ], + "LogParser.csv": [ + "log parser", + "日志解析", + { + "dev": 5, + "test": 350 + }, + "LogParser.csv" + ], + "VersionControl.csv": [ + "version control", + "版本控制", + { + "dev": 5, + "test": 100 + }, + "VersionControl.csv" + ], + "DBMgnt.csv": [ + "database management", + "数据库管理", + { + "dev": 5, + "test": 19 + }, + "DBMgnt.csv" + ], + "Dependency.csv": [ + "dependency", + "依赖管理", + { + "dev": 5, + "test": 44 + }, + "Dependency.csv" + ], + "Compile.csv": [ + "compile", + "编译", + { + "dev": 5, + "test": 31 + }, + "Compile.csv" + ], + "Package.csv": [ + "package", + "包管理", + { + "dev": 5, + "test": 24 + }, + "Package.csv" + ] +} +``` diff --git a/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/codefuseDevopsEval.en-US.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/codefuseDevopsEval.en-US.md new file mode 100644 index 0000000..2700a8b --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/codefuseDevopsEval.en-US.md @@ -0,0 +1,140 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + index: true + order: -1 +title: CodeFuse-DevOps-Eval +order: 0 +toc: content +--- + +

    + + + +DevOps-Eval is a comprehensive evaluation suite specifically designed for foundation models in the DevOps field. We hope DevOps-Eval could help developers, especially in the DevOps field, track the progress and analyze the important strengths/shortcomings of their models. + +📚 This repo contains questions and exercises related to DevOps, including the AIOps, ToolLearning; + +💥️ There are currently **7486** multiple-choice questions spanning 8 diverse general categories, as shown [below](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*eXnGSreQ_NQAAAAAAAAAAAAADlHYAQ/original). + +🔥 There are a total of **2840** samples in the AIOps subcategory, covering scenarios such as **log parsing**, **time series anomaly detection**, **time series classification**, **time series forecasting**, and **root cause analysis**. + +🔧 There are a total of **1509** samples in the ToolLearning subcategory, covering 239 tool scenes across 59 fields. + +

    + +## 🏆 Leaderboard + +Below are zero-shot and five-shot accuracies from the models that we evaluate in the initial release. We note that five-shot performance is better than zero-shot for many instruction-tuned models. + +### 👀 DevOps + +#### Zero Shot + +| **ModelName** | plan | code | build | test | release | deploy | operate | monitor | **AVG** | +| :----------------: | :---: | :---: | :---: | :---: | :-----: | :----: | :-----: | :-----: | :-----: | +| DevOpsPal-14B-Chat | 60.61 | 78.35 | 84.86 | 84.65 | 87.26 | 82.75 | 69.89 | 79.17 | 78.23 | +| DevOpsPal-14B-Base | 54.55 | 77.82 | 83.49 | 85.96 | 86.32 | 81.96 | 71.18 | 82.41 | 78.23 | +| Qwen-14B-Chat | 60.61 | 75.4 | 85.32 | 84.21 | 89.62 | 82.75 | 69.57 | 80.56 | 77.18 | +| Qwen-14B-Base | 57.58 | 73.81 | 84.4 | 85.53 | 86.32 | 81.18 | 70.05 | 80.09 | 76.19 | +| Baichuan2-13B-Base | 60.61 | 69.42 | 79.82 | 79.82 | 82.55 | 81.18 | 70.37 | 83.8 | 73.73 | +| Baichuan2-13B-Chat | 60.61 | 68.43 | 77.98 | 80.7 | 81.6 | 83.53 | 67.63 | 84.72 | 72.9 | +| DevOpsPal-7B-Chat | 54.55 | 69.11 | 83.94 | 82.02 | 76.89 | 80 | 64.73 | 77.78 | 71.92 | +| DevOpsPal-7B-Base | 54.55 | 68.96 | 82.11 | 78.95 | 80.66 | 76.47 | 65.54 | 78.7 | 71.69 | +| Qwen-7B-Base | 53.03 | 68.13 | 78.9 | 75.44 | 80.19 | 80 | 65.06 | 80.09 | 71.09 | +| Qwen-7B-Chat | 57.58 | 66.01 | 80.28 | 79.82 | 76.89 | 77.65 | 62.64 | 79.17 | 69.75 | +| Baichuan2-7B-Chat | 54.55 | 63.66 | 77.98 | 76.32 | 71.7 | 73.33 | 59.42 | 79.63 | 66.97 | +| Internlm-7B-Chat | 60.61 | 62.15 | 77.06 | 76.32 | 66.98 | 74.51 | 60.39 | 78.24 | 66.27 | +| Baichuan2-7B-Base | 56.06 | 62.45 | 75.69 | 70.61 | 74.06 | 69.8 | 61.67 | 75.93 | 66.21 | +| Internlm-7B-Base | 54.55 | 58.29 | 79.36 | 78.95 | 77.83 | 70.59 | 65.86 | 75.93 | 65.99 | + +#### Five Shot + +| **ModelName** | plan | code | build | test | release | deploy | operate | monitor | **AVG** | +| :----------------: | :---: | :---: | :---: | :---: | :-----: | :----: | :-----: | :-----: | :-----: | +| DevOpsPal-14B-Chat | 63.64 | 79.49 | 81.65 | 85.96 | 86.79 | 86.67 | 72.95 | 81.48 | 79.69 | +| DevOpsPal-14B-Base | 62.12 | 80.55 | 82.57 | 85.53 | 85.85 | 84.71 | 71.98 | 80.09 | 79.63 | +| Qwen-14B-Chat | 65.15 | 76 | 82.57 | 85.53 | 84.91 | 84.31 | 70.85 | 81.48 | 77.81 | +| Qwen-14B-Base | 66.67 | 76.15 | 84.4 | 85.53 | 86.32 | 80.39 | 72.46 | 80.56 | 77.56 | +| Baichuan2-13B-Base | 63.64 | 71.39 | 80.73 | 82.46 | 81.13 | 84.31 | 73.75 | 85.19 | 75.8 | +| Qwen-7B-Base | 75.76 | 72.52 | 78.9 | 81.14 | 83.96 | 81.18 | 70.37 | 81.94 | 75.36 | +| Baichuan2-13B-Chat | 62.12 | 69.95 | 76.61 | 84.21 | 83.49 | 79.61 | 71.98 | 80.56 | 74.12 | +| DevOpsPal-7B-Chat | 66.67 | 69.95 | 83.94 | 81.14 | 80.19 | 82.75 | 68.6 | 76.85 | 73.61 | +| DevOpsPal-7B-Base | 69.7 | 69.49 | 82.11 | 81.14 | 82.55 | 82.35 | 67.15 | 79.17 | 73.35 | +| Qwen-7B-Chat | 65.15 | 66.54 | 82.57 | 81.58 | 81.6 | 81.18 | 65.38 | 81.02 | 71.69 | +| Baichuan2-7B-Base | 60.61 | 67.22 | 76.61 | 75 | 77.83 | 78.43 | 67.31 | 79.63 | 70.8 | +| Internlm-7B-Chat | 60.61 | 63.06 | 79.82 | 80.26 | 67.92 | 75.69 | 60.06 | 77.31 | 69.21 | +| Baichuan2-7B-Chat | 60.61 | 64.95 | 81.19 | 75.88 | 71.23 | 75.69 | 64.9 | 79.17 | 69.05 | +| Internlm-7B-Base | 62.12 | 65.25 | 77.52 | 80.7 | 74.06 | 78.82 | 63.45 | 75.46 | 67.17 | + +### 🔥 AIOps + +
    + +#### Zero Shot + +| **ModelName** | LogParsing | RootCauseAnalysis | TimeSeriesAnomalyDetection | TimeSeriesClassification | TimeSeriesForecasting | **AVG** | +| :----------------: | :--------: | :---------------: | :------------------------: | :----------------------: | :-------------------: | :-----: | +| Qwen-14B-Base | 66.29 | 58.8 | 25.33 | 43.5 | 62.5 | 52.25 | +| DevOpsPal-14B—Base | 63.14 | 53.6 | 23.33 | 43.5 | 64.06 | 50.49 | +| Qwen-14B-Chat | 64.57 | 51.6 | 22.67 | 36 | 62.5 | 48.94 | +| DevOpsPal-14B—Chat | 60 | 56 | 24 | 43 | 57.81 | 48.8 | +| Qwen-7B-Base | 50 | 39.2 | 22.67 | 54 | 43.75 | 41.48 | +| DevOpsPal-7B—Chat | 56.57 | 30.4 | 25.33 | 45 | 44.06 | 40.92 | +| Baichuan2-13B-Chat | 64 | 18 | 21.33 | 37.5 | 46.88 | 39.3 | +| Qwen-7B-Chat | 57.43 | 38.8 | 22.33 | 39.5 | 25.31 | 36.97 | +| Internlm-7B—Chat | 58.86 | 8.8 | 22.33 | 28.5 | 51.25 | 36.34 | +| Baichuan2-7B-Chat | 60.86 | 10 | 28 | 34.5 | 39.06 | 36.34 | +| Baichuan2-7B-Base | 53.43 | 12.8 | 27.67 | 36.5 | 40.31 | 35.49 | +| Baichuan2-13B-Base | 54 | 12.4 | 23 | 34.5 | 42.81 | 34.86 | +| DevOpsPal-7B—Base | 46.57 | 20.8 | 25 | 34 | 38.75 | 33.94 | +| Internlm-7B—Base | 48.57 | 18.8 | 23.33 | 37.5 | 33.75 | 33.1 | + +#### One Shot + +| **ModelName** | LogParsing | RootCauseAnalysis | TimeSeriesAnomalyDetection | TimeSeriesClassification | TimeSeriesForecasting | **AVG** | +| :----------------: | :--------: | :---------------: | :------------------------: | :----------------------: | :-------------------: | :-----: | +| DevOpsPal-14B—Chat | 66.29 | 80.8 | 23.33 | 44.5 | 56.25 | 54.44 | +| DevOpsPal-14B—Base | 60 | 74 | 25.33 | 43.5 | 52.5 | 51.13 | +| Qwen-14B-Base | 64.29 | 74.4 | 28 | 48.5 | 40.31 | 50.77 | +| Qwen-7B-Base | 56 | 60.8 | 27.67 | 44 | 57.19 | 49.44 | +| Qwen-14B-Chat | 49.71 | 65.6 | 28.67 | 48 | 42.19 | 46.13 | +| Baichuan2-13B-Base | 56 | 43.2 | 24.33 | 41 | 46.88 | 42.89 | +| Baichuan2-7B-Chat | 58.57 | 31.6 | 27 | 31.5 | 51.88 | 41.83 | +| DevOpsPal-7B—Base | 52.86 | 44.4 | 28 | 44.5 | 36.25 | 41.2 | +| Baichuan2-7B-Base | 48.29 | 40.4 | 27 | 42 | 40.94 | 39.86 | +| Qwen-7B-Chat | 54.57 | 52 | 29.67 | 26.5 | 27.19 | 38.73 | +| Baichuan2-13B-Chat | 57.43 | 44.4 | 25 | 25.5 | 30.63 | 37.75 | +| DevOpsPal-7B—Chat | 56.57 | 27.2 | 25.33 | 41.5 | 33.44 | 37.46 | +| Internlm-7B—Chat | 62.57 | 12.8 | 22.33 | 21 | 50.31 | 36.69 | +| Internlm-7B—Base | 48 | 33.2 | 29 | 35 | 31.56 | 35.85 | + +
    + +### 🔧 ToolLearning + +
    + +| **FuncCall-Filler** | dataset_name | fccr | 1-fcffr | 1-fcfnr | 1-fcfpr | 1-fcfnir | aar | +| :-----------------: | :----------: | :---: | :-----: | :-----: | :-----: | :------: | :---: | +| Qwen-14b-chat | luban | 61 | 100 | 97.68 | 63.32 | 100 | 69.46 | +| Qwen-7b-chat | luban | 50.58 | 100 | 98.07 | 52.51 | 100 | 63.59 | +| Baichuan-7b-chat | luban | 60.23 | 100 | 97.3 | 62.93 | 99.61 | 61.12 | +| Internlm-chat-7b | luban | 47.88 | 100 | 96.14 | 51.74 | 99.61 | 61.85 | +| Qwen-14b-chat | fc_data | 98.37 | 99.73 | 99.86 | 98.78 | 100 | 81.58 | +| Qwen-7b-chat | fc_data | 99.46 | 99.86 | 100 | 99.59 | 100 | 79.25 | +| Baichuan-7b-chat | fc_data | 97.96 | 99.32 | 100 | 98.64 | 100 | 89.53 | +| Internlm-chat-7b | fc_data | 94.29 | 95.78 | 100 | 98.5 | 100 | 88.19 | +| CodeLLaMa-7b | fc_data | 98.78 | 99.73 | 100 | 99.05 | 100 | 94.7 | +| CodeLLaMa-7b-16 | fc_data | 98.1 | 99.87 | 99.73 | 98.5 | 100 | 93.14 | +| CodeFuse-7b-4k | fc_data | 98.91 | 99.87 | 99.87 | 99.18 | 100 | 89.5 | + +
    diff --git a/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/codefuseDevopsEval.zh-CN.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/codefuseDevopsEval.zh-CN.md new file mode 100644 index 0000000..cfc3588 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/codefuseDevopsEval.zh-CN.md @@ -0,0 +1,139 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + index: true + order: -1 +title: CodeFuse-DevOps-Eval +toc: content +--- + +

    + + + +DevOps-Eval 是一个专门为 DevOps 领域大模型设计的综合评估数据集。我们希望 DevOps-Eval 能够帮助开发者,尤其是 DevOps 领域的开发者,追踪进展并分析他们拥有的 DevOps 大模型的优势和不足之处。 + +📚 该仓库包含与 DevOps 和 AIOps 相关的问题和练习, 还添加了关于 ToolLearning 相关的样本。 + +💥 目前有 **7486** 个多项选择题,根据 DevOps 的通用流程将其归纳未 8 个模块,如[下图](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*eXnGSreQ_NQAAAAAAAAAAAAADlHYAQ/original)所示。 + +🔥 AIOps 样本总计 **2840** 个,覆盖的场景包括**日志解析**、**时序异常检测**、**时序分类**、**时序预测**和**根因分析**。 + +🔧 ToolLearning 样本 **1509** 个,涵盖 59 个领域,总计 239 种工具类别。 + +

    + +## 🏆 排行榜 + +以下是我们获得的初版评测结果,包括多个开源模型的 zero-shot 和 five-shot 准确率。我们注意到,对于大多数指令模型来说,five-shot 的准确率要优于 zero-shot。 + +### 👀 DevOps + +#### Zero Shot + +| **模型** | plan | code | build | test | release | deploy | operate | monitor | **平均分** | +| :----------------: | :---: | :---: | :---: | :---: | :-----: | :----: | :-----: | :-----: | :--------: | +| DevOpsPal-14B-Chat | 60.61 | 78.35 | 84.86 | 84.65 | 87.26 | 82.75 | 69.89 | 79.17 | 78.23 | +| DevOpsPal-14B-Base | 54.55 | 77.82 | 83.49 | 85.96 | 86.32 | 81.96 | 71.18 | 82.41 | 78.23 | +| Qwen-14B-Chat | 60.61 | 75.4 | 85.32 | 84.21 | 89.62 | 82.75 | 69.57 | 80.56 | 77.18 | +| Qwen-14B-Base | 57.58 | 73.81 | 84.4 | 85.53 | 86.32 | 81.18 | 70.05 | 80.09 | 76.19 | +| Baichuan2-13B-Base | 60.61 | 69.42 | 79.82 | 79.82 | 82.55 | 81.18 | 70.37 | 83.8 | 73.73 | +| Baichuan2-13B-Chat | 60.61 | 68.43 | 77.98 | 80.7 | 81.6 | 83.53 | 67.63 | 84.72 | 72.9 | +| DevOpsPal-7B-Chat | 54.55 | 69.11 | 83.94 | 82.02 | 76.89 | 80 | 64.73 | 77.78 | 71.92 | +| DevOpsPal-7B-Base | 54.55 | 68.96 | 82.11 | 78.95 | 80.66 | 76.47 | 65.54 | 78.7 | 71.69 | +| Qwen-7B-Base | 53.03 | 68.13 | 78.9 | 75.44 | 80.19 | 80 | 65.06 | 80.09 | 71.09 | +| Qwen-7B-Chat | 57.58 | 66.01 | 80.28 | 79.82 | 76.89 | 77.65 | 62.64 | 79.17 | 69.75 | +| Baichuan2-7B-Chat | 54.55 | 63.66 | 77.98 | 76.32 | 71.7 | 73.33 | 59.42 | 79.63 | 66.97 | +| Internlm-7B-Chat | 60.61 | 62.15 | 77.06 | 76.32 | 66.98 | 74.51 | 60.39 | 78.24 | 66.27 | +| Baichuan2-7B-Base | 56.06 | 62.45 | 75.69 | 70.61 | 74.06 | 69.8 | 61.67 | 75.93 | 66.21 | +| Internlm-7B-Base | 54.55 | 58.29 | 79.36 | 78.95 | 77.83 | 70.59 | 65.86 | 75.93 | 65.99 | + +#### Five Shot + +| **模型** | plan | code | build | test | release | deploy | operate | monitor | **平均分** | +| :----------------: | :---: | :---: | :---: | :---: | :-----: | :----: | :-----: | :-----: | :--------: | +| DevOpsPal-14B-Chat | 63.64 | 79.49 | 81.65 | 85.96 | 86.79 | 86.67 | 72.95 | 81.48 | 79.69 | +| DevOpsPal-14B-Base | 62.12 | 80.55 | 82.57 | 85.53 | 85.85 | 84.71 | 71.98 | 80.09 | 79.63 | +| Qwen-14B-Chat | 65.15 | 76 | 82.57 | 85.53 | 84.91 | 84.31 | 70.85 | 81.48 | 77.81 | +| Qwen-14B-Base | 66.67 | 76.15 | 84.4 | 85.53 | 86.32 | 80.39 | 72.46 | 80.56 | 77.56 | +| Baichuan2-13B-Base | 63.64 | 71.39 | 80.73 | 82.46 | 81.13 | 84.31 | 73.75 | 85.19 | 75.8 | +| Qwen-7B-Base | 75.76 | 72.52 | 78.9 | 81.14 | 83.96 | 81.18 | 70.37 | 81.94 | 75.36 | +| Baichuan2-13B-Chat | 62.12 | 69.95 | 76.61 | 84.21 | 83.49 | 79.61 | 71.98 | 80.56 | 74.12 | +| DevOpsPal-7B-Chat | 66.67 | 69.95 | 83.94 | 81.14 | 80.19 | 82.75 | 68.6 | 76.85 | 73.61 | +| DevOpsPal-7B-Base | 69.7 | 69.49 | 82.11 | 81.14 | 82.55 | 82.35 | 67.15 | 79.17 | 73.35 | +| Qwen-7B-Chat | 65.15 | 66.54 | 82.57 | 81.58 | 81.6 | 81.18 | 65.38 | 81.02 | 71.69 | +| Baichuan2-7B-Base | 60.61 | 67.22 | 76.61 | 75 | 77.83 | 78.43 | 67.31 | 79.63 | 70.8 | +| Internlm-7B-Chat | 60.61 | 63.06 | 79.82 | 80.26 | 67.92 | 75.69 | 60.06 | 77.31 | 69.21 | +| Baichuan2-7B-Chat | 60.61 | 64.95 | 81.19 | 75.88 | 71.23 | 75.69 | 64.9 | 79.17 | 69.05 | +| Internlm-7B-Base | 62.12 | 65.25 | 77.52 | 80.7 | 74.06 | 78.82 | 63.45 | 75.46 | 67.17 | + +### 🔥 AIOps + +
    + +#### Zero Shot + +| **模型** | 日志解析 | 根因分析 | 时序异常检测 | 时序分类 | 时序预测 | **平均分** | +| :----------------: | :------: | :------: | :----------: | :------: | :------: | :--------: | +| Qwen-14B-Base | 66.29 | 58.8 | 25.33 | 43.5 | 62.5 | 52.25 | +| DevOpsPal-14B—Base | 63.14 | 53.6 | 23.33 | 43.5 | 64.06 | 50.49 | +| Qwen-14B-Chat | 64.57 | 51.6 | 22.67 | 36 | 62.5 | 48.94 | +| DevOpsPal-14B—Chat | 60 | 56 | 24 | 43 | 57.81 | 48.8 | +| Qwen-7B-Base | 50 | 39.2 | 22.67 | 54 | 43.75 | 41.48 | +| DevOpsPal-7B—Chat | 56.57 | 30.4 | 25.33 | 45 | 44.06 | 40.92 | +| Baichuan2-13B-Chat | 64 | 18 | 21.33 | 37.5 | 46.88 | 39.3 | +| Qwen-7B-Chat | 57.43 | 38.8 | 22.33 | 39.5 | 25.31 | 36.97 | +| Internlm-7B—Chat | 58.86 | 8.8 | 22.33 | 28.5 | 51.25 | 36.34 | +| Baichuan2-7B-Chat | 60.86 | 10 | 28 | 34.5 | 39.06 | 36.34 | +| Baichuan2-7B-Base | 53.43 | 12.8 | 27.67 | 36.5 | 40.31 | 35.49 | +| Baichuan2-13B-Base | 54 | 12.4 | 23 | 34.5 | 42.81 | 34.86 | +| DevOpsPal-7B—Base | 46.57 | 20.8 | 25 | 34 | 38.75 | 33.94 | +| Internlm-7B—Base | 48.57 | 18.8 | 23.33 | 37.5 | 33.75 | 33.1 | + +#### One Shot + +| **模型** | 日志解析 | 根因分析 | 时序异常检测 | 时序分类 | 时序预测 | **平均分** | +| :----------------: | :------: | :------: | :----------: | :------: | :------: | :--------: | +| DevOpsPal-14B—Chat | 66.29 | 80.8 | 23.33 | 44.5 | 56.25 | 54.44 | +| DevOpsPal-14B—Base | 60 | 74 | 25.33 | 43.5 | 52.5 | 51.13 | +| Qwen-14B-Base | 64.29 | 74.4 | 28 | 48.5 | 40.31 | 50.77 | +| Qwen-7B-Base | 56 | 60.8 | 27.67 | 44 | 57.19 | 49.44 | +| Qwen-14B-Chat | 49.71 | 65.6 | 28.67 | 48 | 42.19 | 46.13 | +| Baichuan2-13B-Base | 56 | 43.2 | 24.33 | 41 | 46.88 | 42.89 | +| Baichuan2-7B-Chat | 58.57 | 31.6 | 27 | 31.5 | 51.88 | 41.83 | +| DevOpsPal-7B—Base | 52.86 | 44.4 | 28 | 44.5 | 36.25 | 41.2 | +| Baichuan2-7B-Base | 48.29 | 40.4 | 27 | 42 | 40.94 | 39.86 | +| Qwen-7B-Chat | 54.57 | 52 | 29.67 | 26.5 | 27.19 | 38.73 | +| Baichuan2-13B-Chat | 57.43 | 44.4 | 25 | 25.5 | 30.63 | 37.75 | +| DevOpsPal-7B—Chat | 56.57 | 27.2 | 25.33 | 41.5 | 33.44 | 37.46 | +| Internlm-7B—Chat | 62.57 | 12.8 | 22.33 | 21 | 50.31 | 36.69 | +| Internlm-7B—Base | 48 | 33.2 | 29 | 35 | 31.56 | 35.85 | + +
    + +### 🔧 ToolLearning + +
    + +| **FuncCall-Filler** | dataset_name | fccr | 1-fcffr | 1-fcfnr | 1-fcfpr | 1-fcfnir | aar | +| :-----------------: | :----------: | :---: | :-----: | :-----: | :-----: | :------: | :---: | +| Qwen-14b-chat | luban | 61 | 100 | 97.68 | 63.32 | 100 | 69.46 | +| Qwen-7b-chat | luban | 50.58 | 100 | 98.07 | 52.51 | 100 | 63.59 | +| Baichuan-7b-chat | luban | 60.23 | 100 | 97.3 | 62.93 | 99.61 | 61.12 | +| Internlm-chat-7b | luban | 47.88 | 100 | 96.14 | 51.74 | 99.61 | 61.85 | +| Qwen-14b-chat | fc_data | 98.37 | 99.73 | 99.86 | 98.78 | 100 | 81.58 | +| Qwen-7b-chat | fc_data | 99.46 | 99.86 | 100 | 99.59 | 100 | 79.25 | +| Baichuan-7b-chat | fc_data | 97.96 | 99.32 | 100 | 98.64 | 100 | 89.53 | +| Internlm-chat-7b | fc_data | 94.29 | 95.78 | 100 | 98.5 | 100 | 88.19 | +| CodeLLaMa-7b | fc_data | 98.78 | 99.73 | 100 | 99.05 | 100 | 94.7 | +| CodeLLaMa-7b-16 | fc_data | 98.1 | 99.87 | 99.73 | 98.5 | 100 | 93.14 | +| CodeFuse-7b-4k | fc_data | 98.91 | 99.87 | 99.87 | 99.18 | 100 | 89.5 | + +
    diff --git a/content/en/docs/devops_eval/c1.data.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/data.en-US.md similarity index 76% rename from content/en/docs/devops_eval/c1.data.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/data.en-US.md index 0175452..ec0e747 100644 --- a/content/en/docs/devops_eval/c1.data.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/data.en-US.md @@ -1,36 +1,61 @@ --- +nav: + title: Docs + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 title: Data -slug: Data -description: 介绍主要功能 -url: "docs/data" -aliases: -- "/docs/data" +order: -1 +toc: content --- ## ⏬ Data + #### Download -* Method 1: Download the zip file (you can also simply open the following link with the browser): + +- Method 1: Download the zip file (you can also simply open the following link with the browser): + ``` wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip ``` + then unzip it and you may load the data with pandas: + ``` import os import pandas as pd - + File_Dir="devopseval-exam" test_df=pd.read_csv(os.path.join(File_Dir,"test","UnitTesting.csv")) ``` -* Method 2: Directly load the dataset using [Hugging Face datasets](https://huggingface.co/datasets/codefuse-admin/devopseval-exam): - ```python + +- Method 2: Directly load the dataset using [Hugging Face datasets](https://huggingface.co/datasets/codefuse-admin/devopseval-exam): + + ````python from datasets import load_dataset dataset=load_dataset(r"DevOps-Eval/devopseval-exam",name="UnitTesting") - + print(dataset['val'][0]) # {"id": 1, "question": "单元测试应该覆盖以下哪些方面?", "A": "正常路径", "B": "异常路径", "C": "边界值条件","D": 所有以上,"answer": "D", "explanation": ""} ``` + ```` + +- Method 3: Use [modelscope](https://modelscope.cn/datasets/codefuse-ai/devopseval-exam/files) download all datas。Examples: + ````python + from modelscope.msdatasets import MsDataset + MsDataset.clone_meta(dataset_work_dir='./xxx', dataset_id='codefuse-ai/devopseval-exam')``` + ```` + #### 👀 Notes -To facilitate usage, we have organized the category name handlers and English/Chinese names corresponding to 55 subcategories. Please refer to [category_mapping.json](/images/devops_eval/categroy_mapping.json) for details. The format is: + +To facilitate usage, we have organized the category name handlers and English/Chinese names corresponding to 55 subcategories. Please refer to [category_mapping.json](./categroy_mapping) for details. The format is: ``` { @@ -49,7 +74,8 @@ To facilitate usage, we have organized the category name handlers and English/Ch ] } ``` -Each subcategory consists of two splits: dev and test. The dev set per subcategory consists of five exemplars with explanations for few-shot evaluation. And the test set is for model evaluation. Labels on the test split are also released. + +Each subcategory consists of two splits: dev and test. The dev set per subcategory consists of five exemplars with explanations for few-shot evaluation. And the test set is for model evaluation. Labels on the test split are also released. Below is a dev example from 'version control': @@ -61,14 +87,17 @@ B: 使用命令 `git log --name-only SHA` C: 使用命令 `git commit --name-only SHA` D: 使用命令 `git clone --name-only SHA` answer: A -explanation: +explanation: 分析原因: git diff --name-only SHA命令会显示与SHA参数对应的提交中已修改的文件列表。参数--name-only让命令只输出文件名,而忽略其他信息。其它选项中的命令并不能实现此功能。 ``` + #### 🔥 AIOps Sample Example + 👀 👀 Taking **log parsing** and **time series anomaly detection** as examples, here is a brief showcase of the AIOps samples: LogParsing + ``` id: 0 question: @@ -81,7 +110,7 @@ Here are some running logs 5 16:37:13,837 WARN Cannot open channel to 2 at election address /10.10.34.12:3888 6 09:09:16,008 WARN Cannot open channel to 3 at election address /10.10.34.13:3888 7 15:27:03,681 WARN Cannot open channel to 3 at election address /10.10.34.13:3888 -The first three parts of the log are index, timestamp, and log level. Without considering these three parts, Here we assume that the variables in the logs are represented as '<*>', separated by spaces between tokens. What is the specific log template for the above logs? +The first three parts of the log are index, timestamp, and log level. Without considering these three parts, Here we assume that the variables in the logs are represented as '<*>', separated by spaces between tokens. What is the specific log template for the above logs? A: Notification time out: <*> 和 Connection broken for id <*>, my id = <*>, error = B: Send worker leaving thread 和 Connection broken for id <*>, my id = <*>, error = C: Received connection request /<*>:<*> 和 Interrupting SendWorker @@ -89,7 +118,9 @@ D: Cannot open channel to <*> at election address /<*>:<*> 和 ******* GOODBYE / answer: D explanation: The log includes the fixed template fragments "Cannot open channel to <> at election address /<>:<>" and "****** GOODBYE /<>:<> ********," both of which appear in option D. Meanwhile, the template fragments in the other options do not match the content in the log. Therefore, option D is the most consistent with the log template. ``` + TimeSeriesAnomalyDetection + ``` id: 0 question: @@ -103,9 +134,11 @@ D: 12 answer: D explanation: According to the analysis, the value 265 in the given time series at 12 o'clock is significantly larger than the surrounding data, indicating a sudden increase phenomenon. Therefore, selecting option D is correct. ``` + #### 🔧 ToolLearning Sample Example -👀 👀The data format of ToolLearning samples is compatible with OpenAI's Function Calling. +👀 👀The data format of ToolLearning samples is compatible with OpenAI's Function Calling. -Please refer to [tool_learning_info.md](/docs/devops_eval/tool_learning_info.md) for details. +Please refer to [tool_learning_info.md](/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh) for details. +Tool Learning Data Evalution see [tool_learning_evalution.md](/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution)。
    diff --git a/content/zh/docs/devops_eval/c1.data.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/data.zh-CN.md similarity index 79% rename from content/zh/docs/devops_eval/c1.data.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/data.zh-CN.md index f9e1a14..7638a6b 100644 --- a/content/zh/docs/devops_eval/c1.data.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/data.zh-CN.md @@ -1,42 +1,63 @@ --- -title: 数据 -slug: 数据 -description: 介绍主要功能 -url: "docs/数据介绍" -aliases: -- "/docs/数据介绍" +nav: + title: 文档 + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +title: 数据介绍 +order: -1 +toc: content --- ## ⏬ 数据 + #### 下载 -* 方法一:下载zip压缩文件(你也可以直接用浏览器打开下面的链接): + +- 方法一:下载 zip 压缩文件(你也可以直接用浏览器打开下面的链接): + ``` wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip ``` - 然后可以使用 pandas加载数据: + + 然后可以使用 pandas 加载数据: ``` import os import pandas as pd - + File_Dir="devopseval-exam" test_df=pd.read_csv(os.path.join(File_Dir,"test","UnitTesting.csv")) ``` -* 方法二:使用[Hugging Face datasets](https://huggingface.co/datasets/codefuse-admin/devopseval-exam)直接加载数据集。示例如下: - ```python + +- 方法二:使用[Hugging Face datasets](https://huggingface.co/datasets/codefuse-admin/devopseval-exam)直接加载数据集。示例如下: + + ````python from datasets import load_dataset dataset=load_dataset(r"DevOps-Eval/devopseval-exam",name="UnitTesting") - + print(dataset['val'][0]) # {"id": 1, "question": "单元测试应该覆盖以下哪些方面?", "A": "正常路径", "B": "异常路径", "C": "边界值条件","D": 所有以上,"answer": "D", "explanation": ""} ``` - -* 方法三:使用modelscope下载相关所有数据。示例如下: - ```python + + ```` + +- 方法三:使用[modelscope](https://modelscope.cn/datasets/codefuse-ai/devopseval-exam/files)下载相关所有数据。示例如下: + + ````python from modelscope.msdatasets import MsDataset MsDataset.clone_meta(dataset_work_dir='./xxx', dataset_id='codefuse-ai/devopseval-exam')``` - + + ```` + #### 👀 说明 -为了方便使用,我们已经整理出了 55 个细分类别以及它们的中英文名称。具体细节请查看 [category_mapping.json](resources/categroy_mapping.json) 。格式如下: + +为了方便使用,我们已经整理出了 55 个细分类别以及它们的中英文名称。具体细节请查看 [category_mapping.json](./categroy_mapping) 。格式如下: ``` { @@ -55,9 +76,11 @@ aliases: ] } ``` -每个细分类别由两个部分组成:dev 和 test。每个细分类别的 dev 集包含五个示范实例以及为 few-shot 评估提供的解释。而 test 集则用于模型评估,并且test数据已包含准确标签。 + +每个细分类别由两个部分组成:dev 和 test。每个细分类别的 dev 集包含五个示范实例以及为 few-shot 评估提供的解释。而 test 集则用于模型评估,并且 test 数据已包含准确标签。 下面是 dev 数据的示例,来自"版本控制"细分类别: + ``` id: 4 question: 如何找到Git特定提交中已更改的文件列表? @@ -66,14 +89,17 @@ B: 使用命令 `git log --name-only SHA` C: 使用命令 `git commit --name-only SHA` D: 使用命令 `git clone --name-only SHA` answer: A -explanation: +explanation: 分析原因: git diff --name-only SHA命令会显示与SHA参数对应的提交中已修改的文件列表。参数--name-only让命令只输出文件名,而忽略其他信息。其它选项中的命令并不能实现此功能。 ``` -#### 🔥 AIOps样本示例 -👀 👀 此处以日志解析和时序异常检测为例,对AIOps样本做一些简要的展示: + +#### 🔥 AIOps 样本示例 + +👀 👀 此处以日志解析和时序异常检测为例,对 AIOps 样本做一些简要的展示: 日志解析 + ``` id: 0 question: @@ -94,7 +120,9 @@ D: Cannot open channel to <*> at election address /<*>:<*> 和 ******* GOODBYE / answer: D explanation: 根据日志中的内容,选项D是最符合日志模板的。日志中包含了"Cannot open channel to <*> at election address /<*>:<*>"和"******* GOODBYE /<*>:<*> ********"这两个固定的模板片段,它们都在选项D中出现了。同时,其他选项中的模板片段与日志中的内容不匹配。因此,选项D是最符合日志模板的。 ``` + 时序异常检测 + ``` id: 0 question: @@ -108,8 +136,10 @@ D: 12 answer: D explanation: 根据分析,题目中的时间序列在12点出的值265要明显大于周围数据,存在着突增现象,因此选择D是正确的。 ``` -#### 🔧 ToolLearning样本示例 -工具学习样本的数据格式与OpenAI的函数调用格式兼容。 -详情请参阅[tool_learning_info_zh.md](/docs/devops_eval/tool_learning_info_zh.md)。 -工具学习评测过程,详情请参阅见 [tool_learning_evalution.md](/docs/devops_eval/tool_learning_evalution.md)。 + +#### 🔧 ToolLearning 样本示例 + +工具学习样本的数据格式与 OpenAI 的函数调用格式兼容。 +详情请参阅[tool_learning_info_zh.md](/zh-CN/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh)。 +工具学习评测过程,详情请参阅见 [tool_learning_evalution.md](/zh-CN/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution)。
    diff --git a/content/en/docs/devops_eval/c2.evaluate.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/evaluate.en-US.md similarity index 81% rename from content/en/docs/devops_eval/c2.evaluate.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/evaluate.en-US.md index 37b95ac..8c293a2 100644 --- a/content/en/docs/devops_eval/c2.evaluate.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/evaluate.en-US.md @@ -1,33 +1,40 @@ --- +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 title: Evaluate -slug: Evaluate -description: 介绍主要功能 -url: "docs/codefuse-devops-eval-quickstart" -aliases: -- "/docs/codefuse-devops-eval-quickstart" +order: 0 +toc: content --- ## 🚀 How to Evaluate + If you need to test your own huggingface-formatted model, the overall steps are as follows: + 1. Write the loader function for the model. 2. Write the context_builder function for the model. 3. Register the model in the configuration file. 4. Run the testing script. -If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing. + If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing. #### 1. Write the loader function + If the model requires additional processing after loading (e.g. adjusting the tokenizer), you need to inherit the `ModelAndTokenizerLoader` class in `src.context_builder.context_builder_family.py` and override the corresponding `load_model` and `load_tokenizer` functions. You can refer to the following example: + ```python class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass - + @override def load_model(self, model_path: str): # Implementation of the method pass - + @override def load_tokenizer(self, model_path: str): # Implementation of the method @@ -35,12 +42,14 @@ class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): ``` #### 2. Write the context_builder function for the Model + If the input needs to be converted to a specific format (e.g. chatml format or other human-bot formats), you need to inherit the ContextBuilder class in `src.context_builder.context_builder_family` and override the make_context function. This function is used to convert the input to the corresponding required format. An example is shown below: + ```python class QwenChatContextBuilder(ContextBuilder): def __init__(self): super().__init__() - + @override def make_context(self, model, tokenizer, query: str, system: str = "hello!"): # Implementation of the method @@ -48,18 +57,22 @@ class QwenChatContextBuilder(ContextBuilder): ``` #### 3. Register the model in the configuration file + Go to the `model_conf.json` file in the conf directory and register the corresponding model name and the loader and context_builder that will be used for this model. Simply write the class names defined in the first and second steps for the loader and context_builder. Here is an example: + ```json { "Qwen-Chat": { - "loader": "QwenModelAndTokenizerLoader", - "context_builder": "QwenChatContextBuilder" + "loader": "QwenModelAndTokenizerLoader", + "context_builder": "QwenChatContextBuilder" } } ``` #### 4. Execute the testing script + Run the following code to initiate the test: + ```Bash python src/run_eval.py \ --model_path path_to_model \ @@ -71,6 +84,7 @@ python src/run_eval.py \ --data_path path_to_downloaded_devops_eval_data \ --k_shot 0 ``` -👀 👀 The specific evaluation process is as follows 📖 [**Evaluate Tutorial**](/docs/devops_eval/tutorial.md) -
    \ No newline at end of file +👀 👀 The specific evaluation process is as follows 📖 [**Evaluate Tutorial**](/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial) + +
    diff --git a/content/zh/docs/devops_eval/c2.evaluate.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/evaluate.zh-CN.md similarity index 76% rename from content/zh/docs/devops_eval/c2.evaluate.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/evaluate.zh-CN.md index 22f89ea..d591b32 100644 --- a/content/zh/docs/devops_eval/c2.evaluate.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/evaluate.zh-CN.md @@ -1,63 +1,78 @@ --- +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 title: 评测 -slug: 评测 -description: 介绍主要功能 -url: "docs/codefuse-devops-eval-quickstart-zh" -aliases: -- "/docs/codefuse-devops-eval-quickstart-zh" +order: 0 +toc: content --- ## 🚀 如何进行测试 + 如果需要在自己的 HuggingFace 格式的模型上进行测试的话,总的步骤分为如下几步: + 1. 编写 Model 的 loader 函数 2. 编写 Model 的 context_builder 函数 3. 注册模型到配置文件中 4. 执行测试脚本 -如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 + 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 #### 1. 编写 loader 函数 + 模型加载时还需要做一些额外的处理(e.g. tokenizer 调整),需要继承 `ModelAndTokenizerLoader` 类来覆写对应的 `load_model` 和 `load_tokenizer` 函数, 如下所示: + ```python class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass - + @override def load_model(self, model_path: str): # Implementation of the method pass - + @override def load_tokenizer(self, model_path: str): # Implementation of the method pass ``` + #### 2. 编写 Model 的 context_builder 函数 + 如果输入需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),则需要继承 ContextBuilder 类来覆写 make_context 函数,如下所示: + ```python class QwenChatContextBuilder(ContextBuilder): def __init__(self): super().__init__() - + @override def make_context(self, model, tokenizer, query: str, system: str = "hello!"): # Implementation of the method pass ``` + #### 3. 注册模型到配置文件中 + 去 conf 中的 `model_conf.json`,注册对应的模型名和这个模型将要使用的 loader 和 context_builder,示例如下: + ```json { "Qwen-Chat": { - "loader": "QwenModelAndTokenizerLoader", - "context_builder": "QwenChatContextBuilder" + "loader": "QwenModelAndTokenizerLoader", + "context_builder": "QwenChatContextBuilder" } } ``` #### 4. 执行测试脚本 + 直接运行以下代码发起测试 + ```Bash python src/run_eval.py \ --model_path path_to_model \ @@ -69,5 +84,6 @@ python src/run_eval.py \ --data_path path_to_downloaded_devops_eval_data \ --k_shot 0 ``` -👀 👀 具体评测流程见📖 [**数据集评测教程**](/docs/devops_eval/tutorial_zh.md) + +👀 👀 具体评测流程见 📖 [**数据集评测教程**](/zh-CN/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial)
    diff --git a/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution.en-US.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution.en-US.md new file mode 100644 index 0000000..8024d7b --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution.en-US.md @@ -0,0 +1,254 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +# resource: true +# title: Tool Learning 数据集评测教程 +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +order: 1 +title: Tool Learning Evaluate +toc: content +--- + +## Tool Learning Dataset Evaluation Tutorial + +### ChatML Integration Methods + +To test on your own Huggingface-format model, the overall steps are divided into the following: + +1. Write the `create_prompts` function in `~/evals/FuncCallEvaluation` +2. Write the related functions in `~/models/base_model` +3. Register the model and evaluation functions +4. Execute the test script + If the model does not require special handling after loading, and the input does not need to be converted into a specific format (e.g. ChatML format or other human-bot formats), please skip directly to step four and start testing. + +#### 1. Write the Loader Function + +If the model needs additional processing after loading (e.g., tokenizer adjustments), you will need to inherit the `ModelAndTokenizerLoader` class in `src.context_builder.context_builder_family.py` and override the corresponding `load_model` and `load_tokenizer` functions. + +An example is as follows: + +```python +class FuncCallEvalution(ToolEvalution): + + def create_prompts(self, func_call_datas): + ''' + datas: [ + { + "instruction": history[his_idx], + "input": "", + "output": output, + "history": [(human_content, ai_content), (), ()], + "functions": tools + } + ] + ''' + system_content = '''CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。 + 你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。''' + function_format = '''You are ToolGPT, you have access to the following APIs:\n{tools}''' + + func_call_train_datas = [] + history_error_cnt = 0 + funccall_error_cnt = 0 + + for data in func_call_datas: + tools = data["functions"] + chatrounds = data["chatrounds"] + + function_content = "" + if len(tools) > 0: + function_content = function_format.format(tools=json.dumps(tools, ensure_ascii=False, sort_keys=True)) + + history = [] + for i in chatrounds: + if i["role"]=="system": + continue + + if i["role"]=="user": + history.append(("user", i["content"])) + + if i["role"] == "assistant": + if "function_call" in i: + if not isinstance(i["function_call"], dict): + funccall_error_cnt+=1 + continue + content = "#function" + json.dumps({**{"content": i["content"]}, **i["function_call"]}, ensure_ascii=False) + else: + content = i["content"] + history.append(("assistant", content)) + + + if i["role"] == "function": + content = json.dumps({**{"content": i["content"]}, **{"name": i["name"]}}, ensure_ascii=False) + history.append(("user", content)) + + + history = [i[1] for i in history] + history[0] = "\n".join([system_content,function_content, history[0]]) + + for his_idx in range(0, len(history), 2): + output = history[his_idx+1] + + if "#function" in output: + output = output.split("#function")[-1] + + try: + output = json.loads(output) + except: + output = {"content": output} + + + func_call_train_datas.append( + { + "instruction": history[his_idx], + "input": "", + "output": output, + "history": [history[:his_idx+2][i:i+2] for i in range(0, len(history[:his_idx]), 2)], + "functions": tools + }, + ) + return func_call_train_datas +``` + +#### 2. Write the Model's Context Builder Function + +If the input needs to be converted to a specific format (e.g., ChatML format or other human-bot formats), then you will need to go to `src.context_builder.context_builder_family` and inherit the `ContextBuilder` class to override the `make_context` function. This function is used to convert the input to the corresponding required output. An example is as follows: + +```python +class ToolModel: + def __init__(self, model_path: str, template: str, trust_remote_code=True, tensor_parallel_size=1, gpu_memory_utilization=0.25): + self.model_path = model_path + self.trust_remote_code = trust_remote_code + self.tensor_parallel_size = tensor_parallel_size + self.gpu_memory_utilization = gpu_memory_utilization + self.load_model(self.model_path, self.trust_remote_code, self.tensor_parallel_size, self.gpu_memory_utilization) + + def generate(self, prompts: str, template: str = None, generate_configs: GenerateConfigs = None) -> list: + '''产出对应结果''' + pass + + def generate_params( + self, generate_configs: GenerateConfigs, + ): + '''generate param''' + kargs = generate_configs.dict() + return kargs + + def load_model(self, model_path, trust_remote_code=True, tensor_parallel_size=1, gpu_memory_utilization=0.25): + '''加载模型''' + self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=trust_remote_code) + self.model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map="auto", trust_remote_code=trust_remote_code).eval() + + # self.model = LLM(model=model_path, trust_remote_code=trust_remote_code, tensor_parallel_size=tensor_parallel_size, gpu_memory_utilization=gpu_memory_utilization) +``` + +#### 3. Register the Model and Eval Function + +Registration can be done in `~/models/__init__.py` + +```python +from .base_model import ToolModel +__all__ = [ + "ToolModel", +] +``` + +Registration can also be done in `~/evals/__init__.py` + +```python +from .base_evaluation import ToolEvaluation +from .toolfill_evaluation import ToolFillEvaluation +from .toolparser_evaluation import ToolParserEvaluation +from .toolsummary_evaluation import ToolSummaryEvaluation +from .func_call_evaluation import FuncCallEvaluation +__all__ = [ + "ToolEvaluation", "ToolFillEvaluation", "ToolParserEvaluation", "ToolSummaryEvaluation", "FuncCallEvaluation" +] +``` + +#### 4. Execute the Test Script + +Modify `~/src/qwen_eval_main.py# datainfos` and `model_infos` + +```python +model_infos = [ + {"model_name": "", "template": "chatml", "model_path": "", + "peft_path": "", "model_class": QwenModel}] +datainfos = [ + {"dataset_path": "~/fcdata_luban_zh_test.jsonl", "dataset_name": "fcdata_luban_zh", "tool_task": "func_call"}, + {"dataset_path": "~/test_datas/fcdata_zh_test_v1.jsonl", "dataset_name": "fcdata_zh", "tool_task": "func_call"}, +] +``` + +To execute, run the following command: + +```Bash +python qwen_eval_main.py +``` + +
    + +### Non-ChatML Integration + +To test on your own Huggingface-format model, the overall steps are divided into the following: + +1. Write related code in `~/getAssistantAns.py` +2. Execute the test script + +#### 1. Writing the `getAssistantAns` Example + +``` +class GetAssistantAns(): + # 按照自己推理需求自己修改代码 + + def __init__(self, gpu_num=1): + model = AutoModelForCausalLM.from_pretrained(model_name) + device_list = [] + for gpu_idx in range(gpu_num): + device_list.append(torch.device("cuda:0")) + + # 将模型移动到指定的GPU设备 + model.to(device) + + + def gen_answer(self, chat_dict, gpu_index): + # 这里实际根据自己推理逻辑 然后转为标准格式返回 + # 以下仅仅是样例 + import time + print(os.environ["CUDA_VISIBLE_DEVICES"]) + time.sleep(1) + rtn_dict1 = { + "role": "assistant", + "content": None, + "function_call": + { + "name": "get_fudan_university_scoreline", + "arguments": "{\n \"year\": \"2020\"\n}" + } + } + + rtn_dict2 = { + "role": "assistant", + "content": "2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分" + } + + return random.choice([rtn_dict1, rtn_dict2]) +``` + +#### 2. Execute the Test Script + +Modify `~/src/opensource_functioncall_evaluation.py # test_ans_file_list` + +```python +test_ans_file_list = [ + "fcdata_zh_test.jsonl" + ] +``` + +To execute, run the following command: + +```Bash +python opensource_functioncall_evaluation.py +``` diff --git a/content/zh/docs/devops_eval/tool_learning_evalution.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution.zh-CN.md similarity index 90% rename from content/zh/docs/devops_eval/tool_learning_evalution.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution.zh-CN.md index 6583de3..7c36d11 100644 --- a/content/zh/docs/devops_eval/tool_learning_evalution.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_evalution.zh-CN.md @@ -1,15 +1,31 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +order: 1 +title: Tool Learning 数据集评测教程 +toc: content +--- + ## tool learning 数据集评测教程 -### chatml接入方式 +### chatml 接入方式 + 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: + 1. 编写 ~/evals/FuncCallEvalution 的 create_prompts 函数 2. 编写 ~/models/base_model 的 相关函数 3. 注册模型和评估函数 4. 执行测试脚本 -如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 + 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 #### 1. 编写 loader 函数 + 如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 `src.context_builder.context_builder_family.py` 中继承 `ModelAndTokenizerLoader` 类来覆写对应的 `load_model` 和 `load_tokenizer` 函数,具体可以参照以下示例: + ```python class FuncCallEvalution(ToolEvalution): @@ -17,9 +33,9 @@ class FuncCallEvalution(ToolEvalution): ''' datas: [ { - "instruction": history[his_idx], + "instruction": history[his_idx], "input": "", - "output": output, + "output": output, "history": [(human_content, ai_content), (), ()], "functions": tools } @@ -58,16 +74,16 @@ class FuncCallEvalution(ToolEvalution): else: content = i["content"] history.append(("assistant", content)) - - + + if i["role"] == "function": content = json.dumps({**{"content": i["content"]}, **{"name": i["name"]}}, ensure_ascii=False) history.append(("user", content)) - - + + history = [i[1] for i in history] history[0] = "\n".join([system_content,function_content, history[0]]) - + for his_idx in range(0, len(history), 2): output = history[his_idx+1] @@ -82,9 +98,9 @@ class FuncCallEvalution(ToolEvalution): func_call_train_datas.append( { - "instruction": history[his_idx], + "instruction": history[his_idx], "input": "", - "output": output, + "output": output, "history": [history[:his_idx+2][i:i+2] for i in range(0, len(history[:his_idx]), 2)], "functions": tools }, @@ -93,7 +109,9 @@ class FuncCallEvalution(ToolEvalution): ``` #### 2. 编写 Model 的 context_builder 函数 + 如果输入需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),则需要去 `src.context_builder.context_builder_family` 中继承 ContextBuilder 类来覆写 make_context 函数,这个函数是用来将输入转换格式为对应需要的输出的,一个示例如下: + ```python class ToolModel: def __init__(self, model_path: str, template: str, trust_remote_code=True, tensor_parallel_size=1, gpu_memory_utilization=0.25): @@ -113,7 +131,7 @@ class ToolModel: '''generate param''' kargs = generate_configs.dict() return kargs - + def load_model(self, model_path, trust_remote_code=True, tensor_parallel_size=1, gpu_memory_utilization=0.25): '''加载模型''' self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=trust_remote_code) @@ -122,16 +140,20 @@ class ToolModel: # self.model = LLM(model=model_path, trust_remote_code=trust_remote_code, tensor_parallel_size=tensor_parallel_size, gpu_memory_utilization=gpu_memory_utilization) ``` -#### 3. 注册模型和eval函数即可 -在 ~/models/__init__.py 中注册即可 +#### 3. 注册模型和 eval 函数即可 + +在 ~/models/**init**.py 中注册即可 + ```python from .base_model import ToolModel __all__ = [ - "ToolModel", + "ToolModel", ] ``` -在 ~/evasl/__init__.py 中注册即可 + +在 ~/evasl/**init**.py 中注册即可 + ```python from .base_evalution import ToolEvalution from .toolfill_evalution import ToolFillEvalution @@ -145,9 +167,10 @@ __all__ = [ ] ``` - #### 4. 执行测试脚本 -修改 ~/src/qwen_eval_main.py# datainfos和model_infos + +修改 ~/src/qwen_eval_main.py# datainfos 和 model_infos + ```python model_infos = [ {"model_name": "", "template": "chatml", "model_path": "", @@ -160,19 +183,22 @@ datainfos = [ ``` 运行下述命令即可 + ```Bash python qwen_eval_main.py ```
    -### 非chatml接入 +### 非 chatml 接入 + 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: + 1. 编写 ~/getAssistantAns.py 相关代码 2. 执行测试脚本 - #### 1、编写 getAssistantAns 示例 + ``` class GetAssistantAns(): # 按照自己推理需求自己修改代码 @@ -210,8 +236,11 @@ class GetAssistantAns(): return random.choice([rtn_dict1, rtn_dict2]) ``` + #### 2、执行测试脚本 + 修改 ~/src/opensource_functioncall_evalution.py # test_ans_file_list + ```python test_ans_file_list = [ "fcdata_zh_test.jsonl" @@ -219,6 +248,7 @@ test_ans_file_list = [ ``` 运行下述命令即可 + ```Bash python opensource_functioncall_evalution.py ``` diff --git a/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh.en-US.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh.en-US.md new file mode 100644 index 0000000..95aa2ea --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh.en-US.md @@ -0,0 +1,99 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +# resource: true +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +order: 2 +title: Tool Learning Data +toc: content +--- + +### Data Example + +We are fully compatible with OpenAI Function Calling in terms of data, with the format as follows: +**Data format for Function Call** + +| Input Key | Input Type | Input Description | +| ---------- | --------------- | ------------------------- | +| functions | List[Swagger] | Collection of tools | +| chatrounds | List[chatround] | Multi-round dialogue data | + +**Data format for chatrounds** + +| Input Key | Input Type | Input Description | +| ------------- | ---------- | -------------------------------------------------------------------------------------- | +| role | string | Role name, includes three categories: user, assistant, function | +| name | string | If the role is function, then the name field exists, which is the name of the function | +| content | string | Content returned by the role | +| function_call | dict | Tool invocation | + +``` +{ + "functions": + [ + { + "name": "get_fudan_university_scoreline", + "description": "Query the past years' cut-off scores for Fudan University, for example: querying the 2020 cut-off scores for Fudan University", + "parameters": + { + "type": "object", + "properties": + { + "year": + { + "type": "string", + "description": "Year, for example: 2020, 2019, 2018" + } + }, + "required": + [ + "year" + ] + } + } + ], + "chatrounds": + [ + { + "role": "system", + "content": "CodeFuse is an intelligent assistant targeted at the R&D sector, aiming to help users solve development-related issues in a neutral and harmless manner. All responses are returned in Markdown format.\nYou can utilize many tools and functions to complete the given tasks. In each step, you need to analyze the current state and determine the next course of action through function calls. You can attempt multiple times. If you plan to continuously try different conditions, please try one condition at a time. If a Finish function is provided, it ends with a Finish call, otherwise, it concludes with a dialogue without a function_call." + }, + { + "role": "user", + "content": "Query the 2020 cut-off scores for Fudan University" + }, + { + "role": "assistant", + "content": null, + "function_call": + { + "name": "get_fudan_university_scoreline", + "arguments": "{\n \"year\": \"2020\"\n}" + } + }, + { + "role": "function", + "name": "get_fudan_university_scoreline", + "content": "{\n \"scoreline\":{\n \"Liberal Arts first tier\": 630, \n \"Liberal Arts second tier\": 610, \n \"Science first tier\": 650, \n \"Science second tier\": 630 \n }\n}" + }, + { + "role": "assistant", + "content": "The 2020 cut-off scores for Fudan University are as follows:\n\n- Liberal Arts first tier: 630 points\n- Liberal Arts second tier: 610 points\n- Science first tier: 650 points\n- Science second tier: 630 points" + } + ] +} +``` + +The above data example of Function Call is used to answer users' queries about the admission scores of a certain university after a specific set of tools is provided. + +### Evaluation Metrics + +Since general models generally lack the capability of tool invocation, it is necessary to fine-tune the general model before performing Tool Learn-Eval evaluation, to teach the model the basic paradigm of tool usage. +Below, we define several metrics for assessing the use of tools: + + + +The sum of ②③④⑤ represents the total number of tool invocation failures, with ⑤ being a special case of tool name recognition failure. diff --git a/content/en/docs/devops_eval/tool_learning_info.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh.zh-CN.md similarity index 60% rename from content/en/docs/devops_eval/tool_learning_info.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh.zh-CN.md index d3db092..64a9d69 100644 --- a/content/en/docs/devops_eval/tool_learning_info.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tool_learning_info_zh.zh-CN.md @@ -1,21 +1,36 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +# resource: true +# title: Tool Learning 数据样例 +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +order: 2 +title: Tool Learning 数据样例 +toc: content +--- + ### 数据样例 + 在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下: -**Function Call的数据格式** +**Function Call 的数据格式** -| Input Key | Input Type | Input Description | -| --- | --- | --- | -| functions | List[Swagger] | 工具集合 | -| chatrounds | List[chatround] | 多轮对话数据 | +| Input Key | Input Type | Input Description | +| ---------- | --------------- | ----------------- | +| functions | List[Swagger] | 工具集合 | +| chatrounds | List[chatround] | 多轮对话数据 | -**chatrounds的数据格式** +**chatrounds 的数据格式** -| Input Key | Input Type | Input Description | -| --- | --- | --- | -| role | string | 角色名称,包含三种类别,user、assistant、function | -| name | string | 若role为function,则存在name字段,为function的名称 | -| content | string | role的返回内容 | -| function_call | dict | 工具调用 | +| Input Key | Input Type | Input Description | +| ------------- | ---------- | --------------------------------------------------------- | +| role | string | 角色名称,包含三种类别,user、assistant、function | +| name | string | 若 role 为 function,则存在 name 字段,为 function 的名称 | +| content | string | role 的返回内容 | +| function_call | dict | 工具调用 | ``` { @@ -74,14 +89,14 @@ } ``` -上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 - +上述 Function Call 的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 ### 评测指标 -由于一般通用模型无法具备工具调用的能力,因此在进行Tool Learn-Eval评测之前需要对通用模型进行微调,先让模型学会工具使用的基本范式 + +由于一般通用模型无法具备工具调用的能力,因此在进行 Tool Learn-Eval 评测之前需要对通用模型进行微调,先让模型学会工具使用的基本范式 下面,我们定义了几种评估工具使用的指标: - + -②③④⑤的和为1,代表工具调用失败的总数,⑤工具幻觉是工具名识别失败的一种特殊情况 \ No newline at end of file +②③④⑤ 的和为 1,代表工具调用失败的总数,⑤ 工具幻觉是工具名识别失败的一种特殊情况 diff --git a/content/en/docs/devops_eval/tutorial.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial.en-US.md similarity index 90% rename from content/en/docs/devops_eval/tutorial.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial.en-US.md index 3ec8b47..58511bc 100644 --- a/content/en/docs/devops_eval/tutorial.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial.en-US.md @@ -1,21 +1,38 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +# resource: true +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +order: 3 +title: Evaluate Tutorial +toc: content +--- + ## Evaluate Tutorial ## 🚀 How to Evaluate + If you need to test your own huggingface-formatted model, the overall steps are as follows: + 1. Write the loader function for the model. 2. Write the context_builder function for the model. 3. Register the model in the configuration file. 4. Run the testing script. -If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing. + If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing. #### 1. Write the loader function + If the model requires additional processing after loading (e.g. adjusting the tokenizer), you need to inherit the `ModelAndTokenizerLoader` class in `src.context_builder.context_builder_family.py` and override the corresponding `load_model` and `load_tokenizer` functions. You can refer to the following example: + ```python class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass - + def load_model(self, model_path: str): model = super().load_model(model_path) model.generation_config = GenerationConfig.from_pretrained(model_path) @@ -23,7 +40,7 @@ class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def load_tokenizer(self, model_path: str): tokenizer = super().load_tokenizer(model_path) - + # read generation config with open(model_path + '/generation_config.json', 'r') as f: generation_config = json.load(f) @@ -33,16 +50,18 @@ class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): ``` #### 2. Write the context_builder function for the Model + If the input needs to be converted to a specific format (e.g. chatml format or other human-bot formats), you need to inherit the ContextBuilder class in `src.context_builder.context_builder_family` and override the make_context function. This function is used to convert the input to the corresponding required format. An example is shown below: + ```python class QwenChatContextBuilder(ContextBuilder): def __init__(self): super().__init__() - + def make_context( self, model, - tokenizer, + tokenizer, query: str, system: str = "you are a helpful assistant" ): @@ -85,18 +104,22 @@ class QwenChatContextBuilder(ContextBuilder): ``` #### 3. Register the model in the configuration file + Go to the `model_conf.json` file in the conf directory and register the corresponding model name and the loader and context_builder that will be used for this model. Simply write the class names defined in the first and second steps for the loader and context_builder. Here is an example: + ```json { "Qwen-Chat": { - "loader": "QwenModelAndTokenizerLoader", - "context_builder": "QwenChatContextBuilder" + "loader": "QwenModelAndTokenizerLoader", + "context_builder": "QwenChatContextBuilder" } } ``` #### 4. Execute the testing script + Run the following code to initiate the test: + ```Bash # model_path: path to the model for testing # model_name: the model name corresponding to the model in the configuration file, default is Default, which represents using the default loader and context_builder @@ -119,6 +142,7 @@ python src/run_eval.py \ ``` For example, if the evaluation dataset is downloaded to `folder1`, the code is placed in `folder2`, and the model is in `folder3`, and the model does not require custom loader and context_builder, and all zero-shot scores of all datasets need to be tested, you can use the following script to initiate the test: + ```Bash python folder2/src/run_eval.py \ --model_path folder3 \ @@ -130,4 +154,5 @@ python folder2/src/run_eval.py \ --data_path folder2 \ --k_shot 0 ``` -
    \ No newline at end of file + +
    diff --git a/content/zh/docs/devops_eval/tutorial_zh.md b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial.zh-CN.md similarity index 86% rename from content/zh/docs/devops_eval/tutorial_zh.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial.zh-CN.md index ca303e3..0338bbb 100644 --- a/content/zh/docs/devops_eval/tutorial_zh.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Eval/master/tutorial.zh-CN.md @@ -1,29 +1,46 @@ +--- +store: + title: CodeFuse-DevOps-Eval + version: master +# resource: true +group: + title: 🌱 CodeFuse-DevOps-Eval + order: -1 +order: 3 +title: 数据集评测教程 +toc: content +--- + ## 数据集评测教程 ## 🚀 如何进行测试 + 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: + 1. 编写 Model 的 loader 函数 2. 编写 Model 的 context_builder 函数 3. 注册模型到配置文件中 4. 执行测试脚本 -如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 + 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 #### 1. 编写 loader 函数 + 如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 `src.context_builder.context_builder_family.py` 中继承 `ModelAndTokenizerLoader` 类来覆写对应的 `load_model` 和 `load_tokenizer` 函数,具体可以参照以下示例: + ```python class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass - + def load_model(self, model_path: str): model = super().load_model(model_path) model.generation_config = GenerationConfig.from_pretrained(model_path) return model - + def load_tokenizer(self, model_path: str): tokenizer = super().load_tokenizer(model_path) - + # read generation config with open(model_path + '/generation_config.json', 'r') as f: generation_config = json.load(f) @@ -33,16 +50,18 @@ class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): ``` #### 2. 编写 Model 的 context_builder 函数 + 如果输入需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),则需要去 `src.context_builder.context_builder_family` 中继承 ContextBuilder 类来覆写 make_context 函数,这个函数是用来将输入转换格式为对应需要的输出的,一个示例如下: + ```python class QwenChatContextBuilder(ContextBuilder): def __init__(self): super().__init__() - + def make_context( self, model, - tokenizer, + tokenizer, query: str, system: str = "you are a helpful assistant" ): @@ -85,19 +104,22 @@ class QwenChatContextBuilder(ContextBuilder): ``` #### 3. 注册模型到配置文件中 + 去 conf 中的 `model_conf.json`,注册对应的模型名和这个模型将要使用的 loader 和 context_builder,其中 loader 和 context_builder 写第一步和第二步中自定义的类名就可以,示例如下: + ```json { "Qwen-Chat": { - "loader": "QwenModelAndTokenizerLoader", - "context_builder": "QwenChatContextBuilder" + "loader": "QwenModelAndTokenizerLoader", + "context_builder": "QwenChatContextBuilder" } } ``` - #### 4. 执行测试脚本 + 直接运行以下代码发起测试 + ```Bash # model_path: 要测试的模型路径 # model_name: 模型配置文件对应的模型命名,默认为 Default ,代表走默认的 loader 和 context_builder @@ -108,7 +130,7 @@ class QwenChatContextBuilder(ContextBuilder): # data_path: 评测数据集地址,填写下载数据集后的地址就可以 # k_shot: 支持 0-5,代表 few-shot 会给模型前缀加的示例数量 - + python src/run_eval.py \ --model_path path_to_model \ --model_name model_name_in_conf \ @@ -120,7 +142,8 @@ python src/run_eval.py \ --k_shot 0 ``` -举个🌰:比如评测数据集下载到了 `folder1`,代码放在了 `folder2`,模型在 `folder3`,模型不需要自定义 loader 和 context_builder,需要测试所有的数据集的 zero-shot 得分,那可以按照以下脚本发起测试: +举个 🌰:比如评测数据集下载到了 `folder1`,代码放在了 `folder2`,模型在 `folder3`,模型不需要自定义 loader 和 context_builder,需要测试所有的数据集的 zero-shot 得分,那可以按照以下脚本发起测试: + ```Bash python folder2/src/run_eval.py \ --model_path folder3 \ @@ -132,4 +155,5 @@ python folder2/src/run_eval.py \ --data_path folder2 \ --k_shot 0 ``` -
    \ No newline at end of file + +
    diff --git a/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel.en-US.md b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel.en-US.md new file mode 100644 index 0000000..3058240 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel.en-US.md @@ -0,0 +1,74 @@ +--- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-DevOps-Model + version: main +group: + title: 🌱 CodeFuse-DevOps-Model + index: true + order: -1 +title: CodeFuse-DevOps-Model +order: -1 +toc: content +--- + +## codeFuse-devops-model + +DevOps-Model is a large language model for the Chinese DevOps field jointly released by Ant Group and Peking University. By collecting professional data related to the DevOps domain and conducting additional training and alignment on the model, a large model has been produced to help engineers enhance efficiency throughout the entire development and operations lifecycle. This fills the current gap in large models within the DevOps domain, with the aim to provide solutions to any problems by asking DevOps-Model! +We have now open-sourced two versions of the model, the Base model with additional training and the Chat model after alignment, in both 7B and 14B specifications, as well as the corresponding training code. We welcome everyone to collaborate and contribute! + +## Project Address + +GitHub Address: https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main +ModelScope Address: + +- DevOps-Model-7B-Base: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Base/summary +- DevOps-Model-7B-Chat: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Chat/summary +- DevOps-Model-14B-Base: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Base/summary +- DevOps-Model-14B-Chat: https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Chat/summary + +## Evaluation Questions + +For model evaluation, there was initially no benchmark for testing in the DevOps domain, so we first selected some domain-related multiple-choice questions from general open-source tests for evaluation. The specific test data is as follows: + +| Dataset | Subject | Total Questions | +| -------- | --------------------- | --------------- | +| CMMLU | Computer science 204 | +| Computer | security | 171 | +| Machine | learning | 122 | +| CEval | college programming | 37 | +| CEval | computer_architecture | 21 | +| CEval | computer_network | 19 | +| 总计 | 总计题目数 | 574 | + +## Evaluation Methods + +Since all are multiple-choice questions, we adopted the method of selecting the highest-scoring Token among the four option Tokens in the first Token produced by the model as the model's answer to the question. We also tested Zero-shot and Five-shot results. + +## Evaluation Results + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*8RCfS6OraH4AAAAAAAAAAAAADlHYAQ/original) + +The specific scores are shown in the table below: + +| Scale of Parameters | Model | Model Size | Zero-shot Score | Five-shot Score | +| ------------------- | --------------------- | ---------- | --------------- | --------------- | +| 10+ B | DevOps-Model-14B-Base | 14B | 70.73 | 73.00 | +| 10+ B | Qwen-14B-Base | 14B | 69.16 | 71.25 | +| 10+ B | Baichuan2-13B-Base | 13B | 55.75 | 61.15 | +| 10+ B | DevOps-Model-14B-Chat | 14B | 74.04 | 75.96 | +| 10+ B | Qwen-14B-Chat | 14B | 69.16 | 70.03 | +| 10+ B | Baichuan2-13B-Chat | 13B | 52.79 | 55.23 | +| 7B | DevOps-Model-7B-Base | 7B | 62.72 | 62.02 | +| 7B | Qwen-7B-Base | 7B | 55.75 | 56.0 | +| 7B | Baichuan2-7B-Base | 7B | 49.30 | 55.4 | +| 7B | Internlm-7B-Base | 7B | 47.56 | 52.6 | +| 7B | DevOps-Model-7B-Chat | 7B | 62.20 | 64.11 | +| 7B | Qwen-7B-Chat | 7B | 46.00 | 52.44 | +| 7B | Baichuan2-7B-Chat | 7B | 52.26 | 54.46 | +| 7B | Internlm-7B-Chat | 7B | 52.61 | 55.75 | diff --git a/content/zh/docs/overview/b3.codefuseDevopsModel.md b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel.zh-CN.md similarity index 55% rename from content/zh/docs/overview/b3.codefuseDevopsModel.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel.zh-CN.md index 812d6f3..77505f4 100644 --- a/content/zh/docs/overview/b3.codefuseDevopsModel.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/codefuseDevopsModel.zh-CN.md @@ -1,61 +1,74 @@ --- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-DevOps-Model + version: main +group: + title: 🌱 CodeFuse-DevOps-Model + index: true + order: -1 title: CodeFuse-DevOps-Model -slug: CodeFuse-DevOps-Model-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-devops-model-zh" +order: -1 +toc: content --- ## codeFuse-devops-model -DevOps-Model 是蚂蚁集团联合北京大学发布面向中文 DevOps 领域的大语言模型,通过收集 DevOps 领域相关的专业数据,再针对模型进行语言模型的加训和对齐训练,产出可以帮助工程师在整个开发运维生命周期提效的大模型。弥补当前大模型在 DevOps 领域的缺失,旨在做到有问题,问 DevOps-Model ! -当前我们已经开源了 7B 和 14B 两种规格的经过加训得 Base 模型和经过对齐后的 Chat 模型,同时还开源了对应的训练代码,欢迎大家一起合作建设! +DevOps-Model 是蚂蚁集团联合北京大学发布面向中文 DevOps 领域的大语言模型,通过收集 DevOps 领域相关的专业数据,再针对模型进行语言模型的加训和对齐训练,产出可以帮助工程师在整个开发运维生命周期提效的大模型。弥补当前大模型在 DevOps 领域的缺失,旨在做到有问题,问 DevOps-Model ! +当前我们已经开源了 7B 和 14B 两种规格的经过加训得 Base 模型和经过对齐后的 Chat 模型,同时还开源了对应的训练代码,欢迎大家一起合作建设! ## 项目地址 + Github 地址:https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main ModelScope 地址: + - DevOps-Model-7B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Base/summary - DevOps-Model-7B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Chat/summary - DevOps-Model-14B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Base/summary - DevOps-Model-14B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Chat/summary ## 评测考题 + 针对模型评测,最初并没有这样的一个 benchmark 用来 DevOps 领域进行测试,所以我们首先选用了一些通用开源测试中和 DevOps 领域相关的选择题进行测试,具体测试数据如下: -|数据集 |考试科目 |题目总数| +|数据集 |考试科目 |题目总数| | ---- | --------- | ----- | -|CMMLU |Computer science 204| -|Computer |security |171| -|Machine |learning |122| -|CEval |college programming| 37| -|CEval |computer_architecture| 21| -|CEval |computer_network |19| -|总计 |总计题目数 |574| - - +|CMMLU |Computer science 204| +|Computer |security |171| +|Machine |learning |122| +|CEval |college programming| 37| +|CEval |computer_architecture| 21| +|CEval |computer_network |19| +|总计 |总计题目数 |574| ## 评测方式 -由于都是单选题,我们采用的是选取模型产出的第一个 Token 中四个选项 Token 中得分最高的作为模型对于问题的回答。同时我们还测试了 Zero-shot 和 Five-shot 的结果。 +由于都是单选题,我们采用的是选取模型产出的第一个 Token 中四个选项 Token 中得分最高的作为模型对于问题的回答。同时我们还测试了 Zero-shot 和 Five-shot 的结果。 ## 评测结果 -![](/images/devops_model/devops_eval.webp) + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*8RCfS6OraH4AAAAAAAAAAAAADlHYAQ/original) 具体的得分如下表所示: -|参数量级| 模型 |模型大小 |Zero-shot 得分 |Five-shot 得分| -| - | ---- | --- | ---- | ---- | -|10+ B| DevOps-Model-14B-Base |14B |70.73 |73.00| -|10+ B|Qwen-14B-Base |14B |69.16| 71.25| -|10+ B|Baichuan2-13B-Base |13B |55.75| 61.15| -|10+ B|DevOps-Model-14B-Chat| 14B |74.04 |75.96| -|10+ B|Qwen-14B-Chat |14B |69.16| 70.03| -|10+ B|Baichuan2-13B-Chat |13B |52.79 |55.23| -|7B| DevOps-Model-7B-Base| 7B |62.72| 62.02| -|7B|Qwen-7B-Base| 7B| 55.75| 56.0| -|7B|Baichuan2-7B-Base| 7B |49.30| 55.4| -|7B|Internlm-7B-Base |7B |47.56 |52.6| -|7B|DevOps-Model-7B-Chat| 7B |62.20| 64.11| -|7B|Qwen-7B-Chat| 7B |46.00 |52.44| -|7B|Baichuan2-7B-Chat| 7B| 52.26| 54.46| -|7B|Internlm-7B-Chat |7B |52.61 |55.75| \ No newline at end of file +|参数量级| 模型 |模型大小 |Zero-shot 得分 |Five-shot 得分| +| - | ---- | --- | ---- | ---- | +|10+ B| DevOps-Model-14B-Base |14B |70.73 |73.00| +|10+ B|Qwen-14B-Base |14B |69.16| 71.25| +|10+ B|Baichuan2-13B-Base |13B |55.75| 61.15| +|10+ B|DevOps-Model-14B-Chat| 14B |74.04 |75.96| +|10+ B|Qwen-14B-Chat |14B |69.16| 70.03| +|10+ B|Baichuan2-13B-Chat |13B |52.79 |55.23| +|7B| DevOps-Model-7B-Base| 7B |62.72| 62.02| +|7B|Qwen-7B-Base| 7B| 55.75| 56.0| +|7B|Baichuan2-7B-Base| 7B |49.30| 55.4| +|7B|Internlm-7B-Base |7B |47.56 |52.6| +|7B|DevOps-Model-7B-Chat| 7B |62.20| 64.11| +|7B|Qwen-7B-Chat| 7B |46.00 |52.44| +|7B|Baichuan2-7B-Chat| 7B| 52.26| 54.46| +|7B|Internlm-7B-Chat |7B |52.61 |55.75| diff --git a/content/en/docs/devops-model/2_quickstart.md b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/quickstart.en-US.md similarity index 84% rename from content/en/docs/devops-model/2_quickstart.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Model/main/quickstart.en-US.md index 0f16577..c2140b5 100644 --- a/content/en/docs/devops-model/2_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/quickstart.en-US.md @@ -1,71 +1,75 @@ ---- -title: QuickStart -slug: QuickStart -description: 介绍主要功能 -url: "/docs/codefuse-devops-model-quickstart" -aliases: -- "/docs/codefuse-devops-model-quickstart" ---- - - - -## Dependency Installation -Please install the packages listed in the requirements.txt file from the GitHub address first. You can refer to the following code: -``` -pip install -r requirements.txt -``` - - -## Model Download -Model download information is as follows: - -🤗 Huggingface Address - -| - | Base Model |Aligned Model| -| -- | ---------- | ------- | -|7B| DevOps-Model-7B-Base| DevOps-Model-7B-Chat| -|14B| DevOps-Model-14B-Base| DevOps-Model-14B-Chat| - -🤖 ModelScope Address - -| - | Base Model |Aligned Model| -| -- | ---------- | ------- | -|7B | DevOps-Model-7B-Base |DevOps-Model-7B-Chat| -|14B| DevOps-Model-14B-Base| DevOps-Model-14B-Chat| - -Find the version of the Chat model you want to download; currently, 7B and 14B models are provided. - - -## Model Usage -Interact with the Chat model using the following code: -``` -from transformers import AutoModelForCausalLM, AutoTokenizer -from transformers.generation import GenerationConfig - -tokenizer = AutoTokenizer.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) - -model = AutoModelForCausalLM.from_pretrained("path_to_DevOps-Model-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() - -# 指定 generation_config -model.generation_config = GenerationConfig.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) - -# First round of conversation -resp, hist = model.chat(query='你是谁', tokenizer=tokenizer, history=None) -print(resp) -# 我是 DevOps-Model,一个由蚂蚁集团平台技术事业群风险智能团队和北京大学联合研发的人工智能机器人,可以与用户进行自然语言交互,并协助解答 DevOps 全生命周期中的各种问题。如果您有任何需要协助的问题或者想要进行闲聊,都可以和我交流哦。 - -# Second round of conversation -resp2, hist2 = model.chat(query='Java 中 HashMap 和 Hashtable 有什么区别', tokenizer=tokenizer, history=hist) -print(resp2) -# HashMap 和 Hashtable 都是 Java 中常用的哈希表实现,它们的主要区别在于: -# 1. Hashtable 是线程安全的,而 HashMap 不是线程安全的,因此在多线程环境下,Hashtable 的性能更稳定。 -# 2. Hashtable 中的方法都是同步的,而 HashMap 的方法不是同步的,因此在多线程环境下,Hashtable 的性能更好。 -# 3. Hashtable 中的 key 和 value 都必须实现 Serializable 接口,而 HashMap 中的 key 和 value 可以是任何对象,包括基本数据类型。 -# 4. Hashtable 的初始容量是 11,而 HashMap 的初始容量是 16。 -# 总之,如果需要在多线程环境下使用哈希表,并且需要保证线程安全,那么应该使用 Hashtable;如果不需要考虑线程安全,或者需要快速地进行哈希表操作,那么应该使用 HashMap。 - -# Third round of conversation -resp3, hist3 = model.chat(query='线程安全代表什么', tokenizer=tokenizer, history=hist2) -print(resp3) -# 线程安全是指在多线程环境下,程序能够正确地处理并发访问,并且不会出现数据竞争、死锁、饥饿等异常情况。线程安全的程序可以保证在不同的线程之间共享同一个数据结构时,数据的正确性和一致性。线程安全的实现通常需要使用同步机制,如锁、原子操作等,来保证对共享数据的访问是线程安全的。在 Java 中,可以通过 synchronized 关键字、Lock 接口等机制来实现线程安全。 -``` \ No newline at end of file +--- +store: + title: CodeFuse-DevOps-Model + version: main +group: + title: 🌱 CodeFuse-DevOps-Model + order: -1 +title: QuickStart +order: 1 +toc: content +--- + +## Dependency Installation + +Please install the packages listed in the requirements.txt file from the GitHub address first. You can refer to the following code: + +``` +pip install -r requirements.txt +``` + +## Model Download + +Model download information is as follows: + +🤗 Huggingface Address + +| - | Base Model | Aligned Model | +| --- | --------------------- | --------------------- | +| 7B | DevOps-Model-7B-Base | DevOps-Model-7B-Chat | +| 14B | DevOps-Model-14B-Base | DevOps-Model-14B-Chat | + +🤖 ModelScope Address + +| - | Base Model | Aligned Model | +| --- | --------------------- | --------------------- | +| 7B | DevOps-Model-7B-Base | DevOps-Model-7B-Chat | +| 14B | DevOps-Model-14B-Base | DevOps-Model-14B-Chat | + +Find the version of the Chat model you want to download; currently, 7B and 14B models are provided. + +## Model Usage + +Interact with the Chat model using the following code: + +``` +from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers.generation import GenerationConfig + +tokenizer = AutoTokenizer.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) + +model = AutoModelForCausalLM.from_pretrained("path_to_DevOps-Model-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() + +# 指定 generation_config +model.generation_config = GenerationConfig.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) + +# First round of conversation +resp, hist = model.chat(query='你是谁', tokenizer=tokenizer, history=None) +print(resp) +# 我是 DevOps-Model,一个由蚂蚁集团平台技术事业群风险智能团队和北京大学联合研发的人工智能机器人,可以与用户进行自然语言交互,并协助解答 DevOps 全生命周期中的各种问题。如果您有任何需要协助的问题或者想要进行闲聊,都可以和我交流哦。 + +# Second round of conversation +resp2, hist2 = model.chat(query='Java 中 HashMap 和 Hashtable 有什么区别', tokenizer=tokenizer, history=hist) +print(resp2) +# HashMap 和 Hashtable 都是 Java 中常用的哈希表实现,它们的主要区别在于: +# 1. Hashtable 是线程安全的,而 HashMap 不是线程安全的,因此在多线程环境下,Hashtable 的性能更稳定。 +# 2. Hashtable 中的方法都是同步的,而 HashMap 的方法不是同步的,因此在多线程环境下,Hashtable 的性能更好。 +# 3. Hashtable 中的 key 和 value 都必须实现 Serializable 接口,而 HashMap 中的 key 和 value 可以是任何对象,包括基本数据类型。 +# 4. Hashtable 的初始容量是 11,而 HashMap 的初始容量是 16。 +# 总之,如果需要在多线程环境下使用哈希表,并且需要保证线程安全,那么应该使用 Hashtable;如果不需要考虑线程安全,或者需要快速地进行哈希表操作,那么应该使用 HashMap。 + +# Third round of conversation +resp3, hist3 = model.chat(query='线程安全代表什么', tokenizer=tokenizer, history=hist2) +print(resp3) +# 线程安全是指在多线程环境下,程序能够正确地处理并发访问,并且不会出现数据竞争、死锁、饥饿等异常情况。线程安全的程序可以保证在不同的线程之间共享同一个数据结构时,数据的正确性和一致性。线程安全的实现通常需要使用同步机制,如锁、原子操作等,来保证对共享数据的访问是线程安全的。在 Java 中,可以通过 synchronized 关键字、Lock 接口等机制来实现线程安全。 +``` diff --git a/content/zh/docs/devops-model/2_quickstart.md b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/quickstart.zh-CN.md similarity index 84% rename from content/zh/docs/devops-model/2_quickstart.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Model/main/quickstart.zh-CN.md index 3db7ff9..8d93ca6 100644 --- a/content/zh/docs/devops-model/2_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/quickstart.zh-CN.md @@ -1,66 +1,70 @@ ---- -title: 快速使用 -slug: 快速使用 -description: 介绍主要功能 -url: "/docs/codefuse-devops-model-quickstart-zh" -aliases: -- "/docs/codefuse-devops-model-quickstart-zh" ---- - - -## 依赖安装 -需要先 PIP 安装一下 Github 地址下的 requirement.txt 中的包,可以参考一下代码 -pip install -r requirements.txt - - -## 模型下载 -模型下载相关信息如下: - 🤗 Huggingface 地址 - -| - | 基座模型 |对齐模型| -| -- | ---------- | ------- | -|7B| DevOps-Model-7B-Base| DevOps-Model-7B-Chat| -|14B| DevOps-Model-14B-Base| DevOps-Model-14B-Chat| - -🤖 ModelScope 地址 -| - | 基座模型 |对齐模型| -| -- | ---------- | ------- | -|7B | DevOps-Model-7B-Base |DevOps-Model-7B-Chat| -|14B| DevOps-Model-14B-Base| DevOps-Model-14B-Chat| - -找到自己想要下载的 Chat 模型版本,当前提供了 7B 和 14B 的模型 - - -## 模型使用 -根据以下代码来和 Chat 模型进行交互 -``` -from transformers import AutoModelForCausalLM, AutoTokenizer -from transformers.generation import GenerationConfig - -tokenizer = AutoTokenizer.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) - -model = AutoModelForCausalLM.from_pretrained("path_to_DevOps-Model-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() - -# 指定 generation_config -model.generation_config = GenerationConfig.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) - -# 第一轮对话 -resp, hist = model.chat(query='你是谁', tokenizer=tokenizer, history=None) -print(resp) -# 我是 DevOps-Model,一个由蚂蚁集团平台技术事业群风险智能团队和北京大学联合研发的人工智能机器人,可以与用户进行自然语言交互,并协助解答 DevOps 全生命周期中的各种问题。如果您有任何需要协助的问题或者想要进行闲聊,都可以和我交流哦。 - -# 第二轮对话 -resp2, hist2 = model.chat(query='Java 中 HashMap 和 Hashtable 有什么区别', tokenizer=tokenizer, history=hist) -print(resp2) -# HashMap 和 Hashtable 都是 Java 中常用的哈希表实现,它们的主要区别在于: -# 1. Hashtable 是线程安全的,而 HashMap 不是线程安全的,因此在多线程环境下,Hashtable 的性能更稳定。 -# 2. Hashtable 中的方法都是同步的,而 HashMap 的方法不是同步的,因此在多线程环境下,Hashtable 的性能更好。 -# 3. Hashtable 中的 key 和 value 都必须实现 Serializable 接口,而 HashMap 中的 key 和 value 可以是任何对象,包括基本数据类型。 -# 4. Hashtable 的初始容量是 11,而 HashMap 的初始容量是 16。 -# 总之,如果需要在多线程环境下使用哈希表,并且需要保证线程安全,那么应该使用 Hashtable;如果不需要考虑线程安全,或者需要快速地进行哈希表操作,那么应该使用 HashMap。 - -# 第三轮对话 -resp3, hist3 = model.chat(query='线程安全代表什么', tokenizer=tokenizer, history=hist2) -print(resp3) -# 线程安全是指在多线程环境下,程序能够正确地处理并发访问,并且不会出现数据竞争、死锁、饥饿等异常情况。线程安全的程序可以保证在不同的线程之间共享同一个数据结构时,数据的正确性和一致性。线程安全的实现通常需要使用同步机制,如锁、原子操作等,来保证对共享数据的访问是线程安全的。在 Java 中,可以通过 synchronized 关键字、Lock 接口等机制来实现线程安全。 -``` \ No newline at end of file +--- +store: + title: CodeFuse-DevOps-Model + version: main +group: + title: 🌱 CodeFuse-DevOps-Model + order: -1 +title: 快速使用 +order: 1 +toc: content +--- + +## 依赖安装 + +需要先 PIP 安装一下 Github 地址下的 requirement.txt 中的包,可以参考一下代码 +pip install -r requirements.txt + +## 模型下载 + +模型下载相关信息如下: +🤗 Huggingface 地址 + +| - | 基座模型 | 对齐模型 | +| --- | --------------------- | --------------------- | +| 7B | DevOps-Model-7B-Base | DevOps-Model-7B-Chat | +| 14B | DevOps-Model-14B-Base | DevOps-Model-14B-Chat | + +🤖 ModelScope 地址 +| - | 基座模型 |对齐模型| +| -- | ---------- | ------- | +|7B | DevOps-Model-7B-Base |DevOps-Model-7B-Chat| +|14B| DevOps-Model-14B-Base| DevOps-Model-14B-Chat| + +找到自己想要下载的 Chat 模型版本,当前提供了 7B 和 14B 的模型 + +## 模型使用 + +根据以下代码来和 Chat 模型进行交互 + +``` +from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers.generation import GenerationConfig + +tokenizer = AutoTokenizer.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) + +model = AutoModelForCausalLM.from_pretrained("path_to_DevOps-Model-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() + +# 指定 generation_config +model.generation_config = GenerationConfig.from_pretrained("path_to_DevOps-Model-Chat", trust_remote_code=True) + +# 第一轮对话 +resp, hist = model.chat(query='你是谁', tokenizer=tokenizer, history=None) +print(resp) +# 我是 DevOps-Model,一个由蚂蚁集团平台技术事业群风险智能团队和北京大学联合研发的人工智能机器人,可以与用户进行自然语言交互,并协助解答 DevOps 全生命周期中的各种问题。如果您有任何需要协助的问题或者想要进行闲聊,都可以和我交流哦。 + +# 第二轮对话 +resp2, hist2 = model.chat(query='Java 中 HashMap 和 Hashtable 有什么区别', tokenizer=tokenizer, history=hist) +print(resp2) +# HashMap 和 Hashtable 都是 Java 中常用的哈希表实现,它们的主要区别在于: +# 1. Hashtable 是线程安全的,而 HashMap 不是线程安全的,因此在多线程环境下,Hashtable 的性能更稳定。 +# 2. Hashtable 中的方法都是同步的,而 HashMap 的方法不是同步的,因此在多线程环境下,Hashtable 的性能更好。 +# 3. Hashtable 中的 key 和 value 都必须实现 Serializable 接口,而 HashMap 中的 key 和 value 可以是任何对象,包括基本数据类型。 +# 4. Hashtable 的初始容量是 11,而 HashMap 的初始容量是 16。 +# 总之,如果需要在多线程环境下使用哈希表,并且需要保证线程安全,那么应该使用 Hashtable;如果不需要考虑线程安全,或者需要快速地进行哈希表操作,那么应该使用 HashMap。 + +# 第三轮对话 +resp3, hist3 = model.chat(query='线程安全代表什么', tokenizer=tokenizer, history=hist2) +print(resp3) +# 线程安全是指在多线程环境下,程序能够正确地处理并发访问,并且不会出现数据竞争、死锁、饥饿等异常情况。线程安全的程序可以保证在不同的线程之间共享同一个数据结构时,数据的正确性和一致性。线程安全的实现通常需要使用同步机制,如锁、原子操作等,来保证对共享数据的访问是线程安全的。在 Java 中,可以通过 synchronized 关键字、Lock 接口等机制来实现线程安全。 +``` diff --git a/content/en/docs/devops-model/1_traindetail.md b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/traindetail.en-US.md similarity index 86% rename from content/en/docs/devops-model/1_traindetail.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Model/main/traindetail.en-US.md index 61c043d..9cc3c37 100644 --- a/content/en/docs/devops-model/1_traindetail.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/traindetail.en-US.md @@ -1,46 +1,47 @@ ---- -title: Train Detail -slug: Train Detail -description: 介绍主要功能 -url: "/docs/codefuse-devops-model-train" -aliases: -- "/docs/codefuse-devops-model-train" ---- - - -## Training Process - -According to the literature review, it is known that most domain models are based on conversational models and undergo knowledge infusion through Supervised Fine-Tuning (SFT). However, the QA corpus required for SFT fine-tuning largely comes from ChatGPT generation, which may not fully cover domain knowledge. - -Therefore, the DevOps-Model adopts a pre-training plus training followed by SFT fine-tuning approach, as illustrated in Figure 2.1. We believe that for large domain models, additional pre-training is necessary. This can inject some domain knowledge into the large model during the pre-training phase. If this knowledge has not been covered during the general large model's pre-training, it will allow the large model to learn new information; if it has been covered, it will further reinforce the model's knowledge. The second step is model alignment, aiming to enable the large model to provide the most appropriate content in response to questions. - -![](/images/devops-model/devops_train_framework.png) -![](/images/devops_model/devops_train_framework.png) - -## Training Data -### Data Collection -The model is positioned as a large Chinese DevOps domain model, so we collect pre-training and QA data related to Chinese DevOps. - -The pre-training data mainly comes from the internet, including technical blogs, documentation, and books, amounting to over 50GB of pre-training corpus data. -For the QA data, our goal is not only to align the model with general Q&A capabilities but also to learn how to answer questions better in the DevOps domain. Therefore, we collected both general single-turn and multi-turn dialogue data and generated domain-specific QA data for the DevOps field through crawling and using ChatGPT. Ultimately, we carefully selected around 200K pieces of QA data for SFT fine-tuning training, as shown in the table below. - -|Data Type |Volume| -| -- | - | -|General Single-turn QA| 50K| -|General Multi-turn QA| 20K| -|DevOps Domain QA| 130K| - - -## Data Selection -![](/images/devops-model/devops_data_filter.png) -![](/images/devops_model/devops_data_filter.png) - -Since most of the pre-training data is collected from the internet, the quality can be uneven. As data is the most crucial component in large model training, we established a cleaning Pipeline as shown above to thoroughly filter the quality of the collected data. - -First, experts and manual screening have summarized a set of heuristic filtering rules at the document level, primarily to filter out those documents of very poor quality. -Then, even within an article of slightly lower quality, there may still be some valuable domain knowledge, which we need to collect as much as possible. Here, we split the article into paragraphs. -Next, the split paragraphs are filtered again using the rules from step 1, yielding a batch of paragraphs that have passed rule-based filtering. -We then picked out 1000 paragraphs for labeling by experienced professional developers to obtain high-quality labeled data. -Finally, we trained a scoring model based on the labeling results to score the quality of paragraphs. The vector model for paragraphs was the pre-trained Chinese version of Sentence-Bert, and the scoring algorithm was logistic regression. To avoid errors in the scoring model, we used the Pareto distribution to decide whether to filter a paragraph based on its quality score. -After this Pipeline, we finally settled on approximately 15GB of data for the pre-training plus training of the large model. - +--- +store: + title: CodeFuse-DevOps-Model + version: main +group: + title: 🌱 CodeFuse-DevOps-Model + order: -1 +title: Train Detail +order: 0 +toc: content +--- + +## Training Process + +According to the literature review, it is known that most domain models are based on conversational models and undergo knowledge infusion through Supervised Fine-Tuning (SFT). However, the QA corpus required for SFT fine-tuning largely comes from ChatGPT generation, which may not fully cover domain knowledge. + +Therefore, the DevOps-Model adopts a pre-training plus training followed by SFT fine-tuning approach, as illustrated in Figure 2.1. We believe that for large domain models, additional pre-training is necessary. This can inject some domain knowledge into the large model during the pre-training phase. If this knowledge has not been covered during the general large model's pre-training, it will allow the large model to learn new information; if it has been covered, it will further reinforce the model's knowledge. The second step is model alignment, aiming to enable the large model to provide the most appropriate content in response to questions. + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*66DWSbAXqRAAAAAAAAAAAAAADlHYAQ/original) + +## Training Data + +### Data Collection + +The model is positioned as a large Chinese DevOps domain model, so we collect pre-training and QA data related to Chinese DevOps. + +The pre-training data mainly comes from the internet, including technical blogs, documentation, and books, amounting to over 50GB of pre-training corpus data. +For the QA data, our goal is not only to align the model with general Q&A capabilities but also to learn how to answer questions better in the DevOps domain. Therefore, we collected both general single-turn and multi-turn dialogue data and generated domain-specific QA data for the DevOps field through crawling and using ChatGPT. Ultimately, we carefully selected around 200K pieces of QA data for SFT fine-tuning training, as shown in the table below. + +| Data Type | Volume | +| ---------------------- | ------ | +| General Single-turn QA | 50K | +| General Multi-turn QA | 20K | +| DevOps Domain QA | 130K | + +## Data Selection + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*jKlFTp3GWg8AAAAAAAAAAAAADlHYAQ/original) + +Since most of the pre-training data is collected from the internet, the quality can be uneven. As data is the most crucial component in large model training, we established a cleaning Pipeline as shown above to thoroughly filter the quality of the collected data. + +First, experts and manual screening have summarized a set of heuristic filtering rules at the document level, primarily to filter out those documents of very poor quality. +Then, even within an article of slightly lower quality, there may still be some valuable domain knowledge, which we need to collect as much as possible. Here, we split the article into paragraphs. +Next, the split paragraphs are filtered again using the rules from step 1, yielding a batch of paragraphs that have passed rule-based filtering. +We then picked out 1000 paragraphs for labeling by experienced professional developers to obtain high-quality labeled data. +Finally, we trained a scoring model based on the labeling results to score the quality of paragraphs. The vector model for paragraphs was the pre-trained Chinese version of Sentence-Bert, and the scoring algorithm was logistic regression. To avoid errors in the scoring model, we used the Pareto distribution to decide whether to filter a paragraph based on its quality score. +After this Pipeline, we finally settled on approximately 15GB of data for the pre-training plus training of the large model. diff --git a/content/zh/docs/devops-model/1_traindetail.md b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/traindetail.zh-CN.md similarity index 54% rename from content/zh/docs/devops-model/1_traindetail.md rename to docs/docs/developer-docs/CodeFuse-DevOps-Model/main/traindetail.zh-CN.md index ec60625..9445b51 100644 --- a/content/zh/docs/devops-model/1_traindetail.md +++ b/docs/docs/developer-docs/CodeFuse-DevOps-Model/main/traindetail.zh-CN.md @@ -1,43 +1,46 @@ ---- -title: 训练解析 -slug: 训练解析 -description: 介绍主要功能 -url: "/docs/codefuse-devops-model-train-zh" -aliases: -- "/docs/codefuse-devops-model-train-zh" ---- - - -## 训练流程 -根据查阅文献可知,大部分领域模型都是在对话模型的基础上,通过SFT微调来进行知识注入。而SFT微调所需要QA预料基本都来自于ChatGPT生成。然而,该方案可能存在QA语料无法完全覆盖领域知识的情况。 -因此,DevOps-Model采用的是预训练加训 + SFT微调的方案,如图2.1所示。我们认为针对领域大模型,预训练的加训是必要的,因为其可以将领域内的一些知识在预训练阶段注入到大模型,如果这些知识在通用大模型预训练时没有出现过,那会让大模型学习到新的知识;如果出现过,就可以让大模型进一步加深印象。第二步则是大模型对齐,目的是让大模型可以根据问题来回答最合适的内容。 - -![](/images/devops-model/devops_train_framework.png) -![](/images/devops_model/devops_train_framework.png) - - -## 训练数据 -### 数据收集 -模型的定位是中文 DevOps 领域大模型,因此收集与中文DevOps相关的预训练数据和QA数据。 -- 预训练数据主要来自互联网技术博客、技术文档、技术书籍等,最终收集到了 50G+ 的预训练语料数据; -- 针对 QA 数据,我们的目的是想让模型不但对齐到通用的问答能力,而且针对 DevOps 领域也可以学会如何更好的回答问题,因此不但收集了通用领域的单轮和多轮对话数据,还针对 DevOps 领域,通过爬取和 ChatGPT 生成的方式产出了属于 DevOps 领域的问答数据。最终我们精心筛选了约 200K 的 QA 数据进行 SFT微调训练,具体数据量如下表所示。 - -|数据类型 |数据量级| -| -- | - | -|通用单轮 QA| 50K| -|通用多轮 QA| 20K| -|DevOps 领域 QA| 130K| - -### 数据筛选 -![](/images/devops-model/devops_data_filter.png) -![](/images/devops_model/devops_data_filter.png) - - - -由于预训练数据大部分是从互联网上收集的数据,质量会参差不齐,而大模型训练中数据是最重要的一环,我们建立了如上图所示的清洗 Pipeline,来针对收集到的数据进行质量的全面过滤。 -1. 首先,由专家经验和人工筛选,总结出来了一批文档级别的 Heuristic 过滤规则,这一步主要用来过滤掉那些质量非常差的文档; -2. 然后,即便是一篇质量稍差的文章中,也有可能还是含有一些有价值的领域知识,我们也需要尽可能的进行收集。此处,我们对文章进行段落拆分,将文章拆分成一个个段落; -3. 然后,我们将拆分后的段落会再次通过步骤1进行过滤,便得到了一批经过规则过滤后的段落; -4. 然后,我们摘取了其中 1000 个段落,由经验丰富的专业开发人员来进行打标,获得高质量的打标数据; -5. 最后,我们根据打标后的结果来训练了一个打分模型来针对段落进行质量的打分,段落的向量模型选用了预训练好的中文版本的 Sentence-Bert,打分算法选用了逻辑回归,为了避免打分模型的误差,会再通过帕累托分布来根据段落的质量打分进行采样来决定要不要过滤这个段落。 -经过这个 Pipeline 后,我们最终沉淀下 15G 左右的数据来进行大模型的预训练加训。 \ No newline at end of file +--- +store: + title: CodeFuse-DevOps-Model + version: main +group: + title: 🌱 CodeFuse-DevOps-Model + order: -1 +title: 训练解析 +order: 0 +toc: content +--- + +## 训练流程 + +根据查阅文献可知,大部分领域模型都是在对话模型的基础上,通过 SFT 微调来进行知识注入。而 SFT 微调所需要 QA 预料基本都来自于 ChatGPT 生成。然而,该方案可能存在 QA 语料无法完全覆盖领域知识的情况。 +因此,DevOps-Model 采用的是预训练加训 + SFT 微调的方案,如图 2.1 所示。我们认为针对领域大模型,预训练的加训是必要的,因为其可以将领域内的一些知识在预训练阶段注入到大模型,如果这些知识在通用大模型预训练时没有出现过,那会让大模型学习到新的知识;如果出现过,就可以让大模型进一步加深印象。第二步则是大模型对齐,目的是让大模型可以根据问题来回答最合适的内容。 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*66DWSbAXqRAAAAAAAAAAAAAADlHYAQ/original) + +## 训练数据 + +### 数据收集 + +模型的定位是中文 DevOps 领域大模型,因此收集与中文 DevOps 相关的预训练数据和 QA 数据。 + +- 预训练数据主要来自互联网技术博客、技术文档、技术书籍等,最终收集到了 50G+ 的预训练语料数据; +- 针对 QA 数据,我们的目的是想让模型不但对齐到通用的问答能力,而且针对 DevOps 领域也可以学会如何更好的回答问题,因此不但收集了通用领域的单轮和多轮对话数据,还针对 DevOps 领域,通过爬取和 ChatGPT 生成的方式产出了属于 DevOps 领域的问答数据。最终我们精心筛选了约 200K 的 QA 数据进行 SFT 微调训练,具体数据量如下表所示。 + +| 数据类型 | 数据量级 | +| -------------- | -------- | +| 通用单轮 QA | 50K | +| 通用多轮 QA | 20K | +| DevOps 领域 QA | 130K | + +### 数据筛选 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*jKlFTp3GWg8AAAAAAAAAAAAADlHYAQ/original) + +由于预训练数据大部分是从互联网上收集的数据,质量会参差不齐,而大模型训练中数据是最重要的一环,我们建立了如上图所示的清洗 Pipeline,来针对收集到的数据进行质量的全面过滤。 + +1. 首先,由专家经验和人工筛选,总结出来了一批文档级别的 Heuristic 过滤规则,这一步主要用来过滤掉那些质量非常差的文档; +2. 然后,即便是一篇质量稍差的文章中,也有可能还是含有一些有价值的领域知识,我们也需要尽可能的进行收集。此处,我们对文章进行段落拆分,将文章拆分成一个个段落; +3. 然后,我们将拆分后的段落会再次通过步骤 1 进行过滤,便得到了一批经过规则过滤后的段落; +4. 然后,我们摘取了其中 1000 个段落,由经验丰富的专业开发人员来进行打标,获得高质量的打标数据; +5. 最后,我们根据打标后的结果来训练了一个打分模型来针对段落进行质量的打分,段落的向量模型选用了预训练好的中文版本的 Sentence-Bert,打分算法选用了逻辑回归,为了避免打分模型的误差,会再通过帕累托分布来根据段落的质量打分进行采样来决定要不要过滤这个段落。 + 经过这个 Pipeline 后,我们最终沉淀下 15G 左右的数据来进行大模型的预训练加训。 diff --git a/content/en/docs/overview/b9.mftvlm.md b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm.en-US.md similarity index 82% rename from content/en/docs/overview/b9.mftvlm.md rename to docs/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm.en-US.md index ef95829..620cb1b 100644 --- a/content/en/docs/overview/b9.mftvlm.md +++ b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm.en-US.md @@ -1,28 +1,40 @@ ---- -title: CodeFuse-MFT-VLM -slug: CodeFuse-MFT-VLM -description: 介绍主要功能 -aliases: -- "/docs/codefuse-mft-vlm" ---- - -## CodeFuse-VLM -CodeFuse-VLM is a Multimodal LLM(MLLM) framework that provides users with multiple vision encoders, multimodal alignment adapters, and LLMs. Through CodeFuse-VLM framework, users are able to customize their own MLLM model to adapt their own tasks. -As more and more models are published on Huggingface community, there will be more open-source vision encoders and LLMs. Each of these models has their own specialties, e.g. Code-LLama is good at code-related tasks but has poor performance for Chinese tasks. Therefore, we built CodeFuse-VLM framework to support multiple vision encoders, multimodal alignment adapters, and LLMs to adapt different types of tasks. -![img.jpg](/images/mft-vlm/CodeFuse-VLM-arch.png) - -Under CodeFuse-VLM framework, we use cross attention multimodal adapter, Qwen-14B LLM, and Qwen-VL's vision encoder to train CodeFuse-VLM-14B model. On multiple benchmarks, our CodeFuse-VLM-14B shows superior performances over Qwen-VL and LLAVA-1.5. -![img.jpg](/images/mft-vlm/CodeFuse-VLM-14B-performance.png) - -Here is the table for different MLLM model's performance on benchmarks -Model | MMBench | MMBench-CN | VqaV2 | GQA | TextVQA | Vizwiz -| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | -LLAVA-1.5 | 67.7 | 63.6 | 80.0 | 63.3 | 61.3 | 53.6 -Qwen-VL | 60.6 | 56.7 | 78.2 | 57.5 | 63.8 | 38.9 -CodeFuse-VLM-14B | 75.7 | 69.8 | 79.3 | 59.4 | 63.9 | 45.3 - -Our model achieved high ranking on MMBenchmark: https://mmbench.opencompass.org.cn/leaderboard - -Here's our model's demo video - -https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo +--- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-MFT-VLM + version: main +group: + title: 🌱 CodeFuse-MFT-VLM + index: true + order: -1 +title: CodeFuse-MFT-VLM +order: -1 +toc: content +--- + +## CodeFuse-VLM + +CodeFuse-VLM is a Multimodal LLM(MLLM) framework that provides users with multiple vision encoders, multimodal alignment adapters, and LLMs. Through CodeFuse-VLM framework, users are able to customize their own MLLM model to adapt their own tasks. +As more and more models are published on Huggingface community, there will be more open-source vision encoders and LLMs. Each of these models has their own specialties, e.g. Code-LLama is good at code-related tasks but has poor performance for Chinese tasks. Therefore, we built CodeFuse-VLM framework to support multiple vision encoders, multimodal alignment adapters, and LLMs to adapt different types of tasks. +![img.jpg](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*t7OIS58EJmIAAAAAAAAAAAAADlHYAQ/original) + +Under CodeFuse-VLM framework, we use cross attention multimodal adapter, Qwen-14B LLM, and Qwen-VL's vision encoder to train CodeFuse-VLM-14B model. On multiple benchmarks, our CodeFuse-VLM-14B shows superior performances over Qwen-VL and LLAVA-1.5. +![img.jpg](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*BuIrRZd62ssAAAAAAAAAAAAADlHYAQ/original) + +Here is the table for different MLLM model's performance on benchmarks +Model | MMBench | MMBench-CN | VqaV2 | GQA | TextVQA | Vizwiz +| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | +LLAVA-1.5 | 67.7 | 63.6 | 80.0 | 63.3 | 61.3 | 53.6 +Qwen-VL | 60.6 | 56.7 | 78.2 | 57.5 | 63.8 | 38.9 +CodeFuse-VLM-14B | 75.7 | 69.8 | 79.3 | 59.4 | 63.9 | 45.3 + +Our model achieved high ranking on MMBenchmark: https://mmbench.opencompass.org.cn/leaderboard + +Here's our model's demo video + +https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo diff --git a/content/en/docs/mftcoder/4_atorch.md b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm.zh-CN.md similarity index 79% rename from content/en/docs/mftcoder/4_atorch.md rename to docs/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm.zh-CN.md index a53f7f2..52d1f17 100644 --- a/content/en/docs/mftcoder/4_atorch.md +++ b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/mftvlm.zh-CN.md @@ -1,239 +1,267 @@ ---- -title: "MFTCoder Training: Atorch Framework" -description: 介绍主要功能 -url: /docs/mftcoder-atorch -aliases: -- "/docs/mftcoder-atorch" ---- - - -[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai) - - GitHub - - -[[中文]](/docs/mftcoder-atorch-zh) [**English**] - -## 1. Updates - -🔥 MFTCoder supports fine-tuning of the GPTNeoX model under the Atorch framework. - -🔥 MFTCoder supports both fully supervised fine-tuning. - -🔥 MFTCoder supports LoRA using the Atorch Framework. - -## 2. Data Format -### 2.1 Training Data Format -The training data is in a uniformed JSONL format, in which each line of data has the following JSON format. The "chat_rounds" field is required, and other fields can be added or removed based on the specific need. - -```json -{ - "id":0, - "data_name":"code-helper", - "chat_rounds":[ - { - "role": "system", - "content": "You are a expert in coding and help answer code questions", - "chat_round_id": 0 - }, - { - "role": "human", - "content": "Write a python function of quick sort", - "chat_round_id": 1 - }, - { - "role": "bot", - "content": "Below is the function of quick sort: ...", - "chat_round_id": 1 - }, - { - "role": "human", - "content": "Explain the code", - "chat_round_id": 2 - }, - { - "role": "bot", - "content": "OK, this code ...", - "chat_round_id": 2 - } - ] -} -``` - -### 2.2 Inference Data Format -The inference data contains strings concatenated by conversation data(system, human and bot contents) in the training data format. -It is used as the data "seen"(before tokenization) by the model in training process. -It is used as input during the inference process as well. -Here is an example format of the concatenated string: - -```python -""" -<|role_start|>system<|role_end|>System instruction -<|role_start|>human<|role_end|>Human 1st round input -<|role_start|>bot<|role_end|>Bot 1st round output -<|role_start|>human<|role_end|>Human 2nd round input -<|role_start|>bot<|role_end|>Bot 2nd round output -... -... -... -<|role_start|>human<|role_end|>Human nth round input -<|role_start|>bot<|role_end|>{Bot output to be genreated} -""" -``` -When applying inference, you always make your input string end with "<|role_start|>bot<|role_end|>" to request the model generating answers. - -## 3. Model Training -Currently, the "MFTCoder/mft_atorch" code repository supports fully instruction fine-tuning, and LoRA instruction fine-tuning. Only the training of the GPTNeoX model is supported. In theory, the pretrained weights of the GPTNeoX model available on HuggingFace can be used for training within this project. - -We have extracted various components used in training to facilitate future extension and optimization. Please refer to the implementation in the main directory for more details. The entry directory for fine-tuning training is ```train/```, and the entry file for training is ```train/run_train.py```. The parameter configurations are stored in the launch scripts such as ```train/run_gpt_*.sh```, making it easier to manage and modify them uniformly. - -### 3.1 Tokenization -During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned earlier) and then tokenize it. In this format, <|role_start|>human<|role_end|> represents the human input (i.e., prompt), <|role_start|>bot<|role_end|> represents the bot output, and represents the eos_token. -You can modify and replace the eos_token based on different models' requirements. - -Here is an example of the concatenated format with prompts: -``` -"<|role_start|>human<|role_end|>input1target1input2target2... -``` -During the calculation of loss, we use a ```loss mask``` to ensure that the loss from the input part does not contribute to the parameter updates. Only the loss from the ```target``` part is used for updating parameters. -This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. -By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient. - -### 3.2 Fully Supervised Fine-Tuning (SFT) -To perform fully SFT, you can execute the following command: -```bash -sh run_gpt_mft.sh 10 1 8 5 -``` -Please note that the four parameters after the launch script have the following meanings: -- The first parameter is the per GPU batch size. -- The second parameter is the number of tensor parallelism (currently only supports 1). -- The third parameter is the number of data parallelism, which should match the number of GPUs used. -- The fourth parameter is the number of training epochs. - -For other training modes, the same four parameters need to be configured in the launch script. - -### 3.3 LoRA Supervised Fine-Tuning -To perform LoRA SFT, you can execute the following command: -```bash -sh run_gpt_mft_peft.sh 10 1 8 5 -``` - -### 3.4 Parameter Explanations -The main parameter explanations for the ```train/run_gpt_*.sh``` are as follows. You can modify these parameters according to your needs: - -- **tokenize_mode**: Need to be 'sft' at present. - -- **train_mode**: Need to be 'sft' at present. - -- **load_raw_dataset**: Need to be 'True' at present. Only JSONL format is supported. - -- **data_paths**: "[path1,path2,path3]" Input data addresses, a string enclosed in [], with different paths separated by commas (,). Each path is a directory where the last level of the directory name is considered as the task name. Each task directory contains 1 to multiple jsonl data files. - -- **output_dir**: Training output directory to store checkpoints, lora_adaptor checkpoints, etc. - -- **tensorboard_dir**: Can be temporarily ignored, as the actual tensorboard is stored in the runs directory under output_dir. - -- **model_type**: Currently only supports gpt_neox. - -- **peft_type**: Currently only supports lora. - -- **pretrained_model_path**: Local directory of the pre-trained model. - -- **total_train_batch_size**: The total batch size for training across all GPUs, calculated automatically based on per gpu batch size entered in the script. - -- **per_device_valid_batch_size**: The batch size for evaluation on each GPU, calculated automatically based on per gpu batch size entered in the script. - -- **gradient_accumulation_steps**: Number of gradient accumulation steps. Global batch size = num_gpus * per_device_train_batch_size * gradient_accumulation_steps. - -- **checkpoint_activations**: Enable if running out of GPU memory. Trades time for space by not caching activation states, resulting in two forward passes to save memory. - -- **learning_rate**: Learning rate. When fine-tuning the entire model, it is recommended to use a smaller value, such as 1e-5 or 5e-6. For lora, a larger learning rate is generally used, such as 1e-4 or 2e-4. - -- **min_lr**: Minimum learning rate, usually one-tenth of the learning_rate. - -- **seq_length**: Maximum length during training. Set according to your device, longer lengths require more memory. - -- **log_interval**: Frequency of logging training loss. - -- **checkpointing_steps**: Frequency of saving a model checkpoint. - -- **evalation_steps**: Frequency of evaluating on the validation set. - -- **early_stopping_patience**: Number of consecutive eval points without further convergence to stop training. - -- **lr_scheduler_type**: Learning rate changing strategy. - -- **num_warmup_steps**: Number of warm-up steps for the learning rate to increase to the specified value. - -- **seed**: Random seed used for reproducibility of experimental results. - -- **train_iters**: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats. - -- **valid_iters**: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats. - -- **evaluation_strategy**: Evaluation strategy during training. "steps" means to evaluate every "valid_interval" steps, "epoch" means to evaluate every epoch. Both can be enabled simultaneously. - -- **save_strategy**: Strategy for saving model weights during training. "steps" means to save every "checkpointing_steps" steps. -- **extra_save_by_epoch**: Whether to save an epoch-level checkpoint every epoch. - -- **save_total_limit**: Maximum number of model checkpoints to keep. Generally set to 2, retaining the checkpoint with the lowest valid loss and the latest checkpoint. Note that epoch-level checkpoints will always be retained and are not subject to this limit. - -- **weighted_loss_mode**: Loss weighting method for multi-task training. - -## 4. Model Usage - -### 4.1 Merge Adaptor weights -Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. -To merge the adapter weights with the base model, see ```src/pefts/merge_base_and_lora_to_hf.py``` - -### 4.2 Inference demo -Here is the script for inference on our trained models, which is compatible with most Hugging Face models: -```python -from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, -) -tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False) -tokenizer.padding_side = "left" -tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") -model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True) - -HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>" -BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>" -texts = ["write a python function of quick sort."] -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] - -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") -outputs = model.generate( - inputs=inputs["input_ids"], - attention_mask=inputs["attention_mask"], - max_new_tokens=512, - top_p=0.95, - temperature=0.1, - do_sample=True, - eos_token_id=tokenizer.eos_token_id, - pad_token_id=tokenizer.pad_token_id - ) -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) -print(gen_text) -``` - -Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model's generation output. -You can modify these parameters based on your specific use case. - -In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric: - -top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results. - -temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic. - -These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results. - -If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings: - -beam_num: Set a smaller value such as 1 or 3. ```beam_num=1``` represents greedy decoding, which selects the most probable single generated word. ```beam_num=3``` represents beam search mode, which considers multiple potential generation paths and chooses the best path among them. - -## 5. FAQ -### Q1:What should I do when cuda OOM happens? -If OOM (Out of Memory) occurs, you can mitigate it by reducing parameters such as per GPU batch size (the first argument when starting the training script) and seq_length. You can also set gradient_checkpointing=true, which significantly reduces memory usage but may slow down the training speed. \ No newline at end of file +--- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-MFT-VLM + version: main +group: + title: 🌱 CodeFuse-MFT-VLM + index: true + order: -1 +title: CodeFuse-MFT-VLM +order: -1 +toc: content +--- + +[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai)  + +GitHub + + +## 1. Updates + +🔥 MFTCoder supports fine-tuning of the GPTNeoX model under the Atorch framework. + +🔥 MFTCoder supports both fully supervised fine-tuning. + +🔥 MFTCoder supports LoRA using the Atorch Framework. + +## 2. Data Format + +### 2.1 Training Data Format + +The training data is in a uniformed JSONL format, in which each line of data has the following JSON format. The "chat_rounds" field is required, and other fields can be added or removed based on the specific need. + +```json +{ + "id": 0, + "data_name": "code-helper", + "chat_rounds": [ + { + "role": "system", + "content": "You are a expert in coding and help answer code questions", + "chat_round_id": 0 + }, + { + "role": "human", + "content": "Write a python function of quick sort", + "chat_round_id": 1 + }, + { + "role": "bot", + "content": "Below is the function of quick sort: ...", + "chat_round_id": 1 + }, + { + "role": "human", + "content": "Explain the code", + "chat_round_id": 2 + }, + { + "role": "bot", + "content": "OK, this code ...", + "chat_round_id": 2 + } + ] +} +``` + +### 2.2 Inference Data Format + +The inference data contains strings concatenated by conversation data(system, human and bot contents) in the training data format. +It is used as the data "seen"(before tokenization) by the model in training process. +It is used as input during the inference process as well. +Here is an example format of the concatenated string: + +```python +""" +<|role_start|>system<|role_end|>System instruction +<|role_start|>human<|role_end|>Human 1st round input +<|role_start|>bot<|role_end|>Bot 1st round output +<|role_start|>human<|role_end|>Human 2nd round input +<|role_start|>bot<|role_end|>Bot 2nd round output +... +... +... +<|role_start|>human<|role_end|>Human nth round input +<|role_start|>bot<|role_end|>{Bot output to be genreated} +""" +``` + +When applying inference, you always make your input string end with "<|role_start|>bot<|role_end|>" to request the model generating answers. + +## 3. Model Training + +Currently, the "MFTCoder/mft_atorch" code repository supports fully instruction fine-tuning, and LoRA instruction fine-tuning. Only the training of the GPTNeoX model is supported. In theory, the pretrained weights of the GPTNeoX model available on HuggingFace can be used for training within this project. + +We have extracted various components used in training to facilitate future extension and optimization. Please refer to the implementation in the main directory for more details. The entry directory for fine-tuning training is `train/`, and the entry file for training is `train/run_train.py`. The parameter configurations are stored in the launch scripts such as `train/run_gpt_*.sh`, making it easier to manage and modify them uniformly. + +### 3.1 Tokenization + +During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned earlier) and then tokenize it. In this format, <|role_start|>human<|role_end|> represents the human input (i.e., prompt), <|role_start|>bot<|role_end|> represents the bot output, and represents the eos_token. +You can modify and replace the eos_token based on different models' requirements. + +Here is an example of the concatenated format with prompts: + +``` +"<|role_start|>human<|role_end|>input1target1input2target2... +``` + +During the calculation of loss, we use a `loss mask` to ensure that the loss from the input part does not contribute to the parameter updates. Only the loss from the `target` part is used for updating parameters. +This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. +By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient. + +### 3.2 Fully Supervised Fine-Tuning (SFT) + +To perform fully SFT, you can execute the following command: + +```bash +sh run_gpt_mft.sh 10 1 8 5 +``` + +Please note that the four parameters after the launch script have the following meanings: + +- The first parameter is the per GPU batch size. +- The second parameter is the number of tensor parallelism (currently only supports 1). +- The third parameter is the number of data parallelism, which should match the number of GPUs used. +- The fourth parameter is the number of training epochs. + +For other training modes, the same four parameters need to be configured in the launch script. + +### 3.3 LoRA Supervised Fine-Tuning + +To perform LoRA SFT, you can execute the following command: + +```bash +sh run_gpt_mft_peft.sh 10 1 8 5 +``` + +### 3.4 Parameter Explanations + +The main parameter explanations for the `train/run_gpt_*.sh` are as follows. You can modify these parameters according to your needs: + +- **tokenize_mode**: Need to be 'sft' at present. + +- **train_mode**: Need to be 'sft' at present. + +- **load_raw_dataset**: Need to be 'True' at present. Only JSONL format is supported. + +- **data_paths**: "[path1,path2,path3]" Input data addresses, a string enclosed in [], with different paths separated by commas (,). Each path is a directory where the last level of the directory name is considered as the task name. Each task directory contains 1 to multiple jsonl data files. + +- **output_dir**: Training output directory to store checkpoints, lora_adaptor checkpoints, etc. + +- **tensorboard_dir**: Can be temporarily ignored, as the actual tensorboard is stored in the runs directory under output_dir. + +- **model_type**: Currently only supports gpt_neox. + +- **peft_type**: Currently only supports lora. + +- **pretrained_model_path**: Local directory of the pre-trained model. + +- **total_train_batch_size**: The total batch size for training across all GPUs, calculated automatically based on per gpu batch size entered in the script. + +- **per_device_valid_batch_size**: The batch size for evaluation on each GPU, calculated automatically based on per gpu batch size entered in the script. + +- **gradient_accumulation_steps**: Number of gradient accumulation steps. Global batch size = num*gpus * per*device_train_batch_size * gradient_accumulation_steps. + +- **checkpoint_activations**: Enable if running out of GPU memory. Trades time for space by not caching activation states, resulting in two forward passes to save memory. + +- **learning_rate**: Learning rate. When fine-tuning the entire model, it is recommended to use a smaller value, such as 1e-5 or 5e-6. For lora, a larger learning rate is generally used, such as 1e-4 or 2e-4. + +- **min_lr**: Minimum learning rate, usually one-tenth of the learning_rate. + +- **seq_length**: Maximum length during training. Set according to your device, longer lengths require more memory. + +- **log_interval**: Frequency of logging training loss. + +- **checkpointing_steps**: Frequency of saving a model checkpoint. + +- **evalation_steps**: Frequency of evaluating on the validation set. + +- **early_stopping_patience**: Number of consecutive eval points without further convergence to stop training. + +- **lr_scheduler_type**: Learning rate changing strategy. + +- **num_warmup_steps**: Number of warm-up steps for the learning rate to increase to the specified value. + +- **seed**: Random seed used for reproducibility of experimental results. + +- **train_iters**: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats. + +- **valid_iters**: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats. + +- **evaluation_strategy**: Evaluation strategy during training. "steps" means to evaluate every "valid_interval" steps, "epoch" means to evaluate every epoch. Both can be enabled simultaneously. + +- **save_strategy**: Strategy for saving model weights during training. "steps" means to save every "checkpointing_steps" steps. +- **extra_save_by_epoch**: Whether to save an epoch-level checkpoint every epoch. + +- **save_total_limit**: Maximum number of model checkpoints to keep. Generally set to 2, retaining the checkpoint with the lowest valid loss and the latest checkpoint. Note that epoch-level checkpoints will always be retained and are not subject to this limit. + +- **weighted_loss_mode**: Loss weighting method for multi-task training. + +## 4. Model Usage + +### 4.1 Merge Adaptor weights + +Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. +To merge the adapter weights with the base model, see `src/pefts/merge_base_and_lora_to_hf.py` + +### 4.2 Inference demo + +Here is the script for inference on our trained models, which is compatible with most Hugging Face models: + +```python +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, +) +tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False) +tokenizer.padding_side = "left" +tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") +model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True) + +HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>" +BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>" +texts = ["write a python function of quick sort."] +texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] + +inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` + +Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model's generation output. +You can modify these parameters based on your specific use case. + +In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric: + +top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results. + +temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic. + +These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results. + +If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings: + +beam_num: Set a smaller value such as 1 or 3. `beam_num=1` represents greedy decoding, which selects the most probable single generated word. `beam_num=3` represents beam search mode, which considers multiple potential generation paths and chooses the best path among them. + +## 5. FAQ + +### Q1:What should I do when cuda OOM happens? + +If OOM (Out of Memory) occurs, you can mitigate it by reducing parameters such as per GPU batch size (the first argument when starting the training script) and seq_length. You can also set gradient_checkpointing=true, which significantly reduces memory usage but may slow down the training speed. diff --git a/content/en/docs/codefuse-mft-vlm/1_quickstart.md b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/quickstart.en-US.md similarity index 55% rename from content/en/docs/codefuse-mft-vlm/1_quickstart.md rename to docs/docs/developer-docs/CodeFuse-MFT-VLM/main/quickstart.en-US.md index a4761c7..fe7906f 100644 --- a/content/en/docs/codefuse-mft-vlm/1_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/quickstart.en-US.md @@ -1,66 +1,75 @@ ---- -title: QuickStart -slug: QuickStart -description: QuickStart Document -aliases: -- "/docs/codefuse-mft-vlm-quickstart" ---- - - - -## Contents -- [Install](#Install) -- [Datasets](#Datasets) -- [Multimodal Alignment](#Multimodal-Alignment) -- [Visual Instruction Tuning](#Visual-Instruction-Tuning) -- [Evaluation](#Evaluation) - -## Install -Please run sh init\_env.sh - -## Datasets -Here's the table of datasets we used to train CodeFuse-VLM-14B: - -Dataset | Task Type | Number of Samples -| ------------- | ------------- | ------------- | -synthdog-en | OCR | 800,000 -synthdog-zh | OCR | 800,000 -cc3m(downsampled)| Image Caption | 600,000 -cc3m(downsampled)| Image Caption | 600,000 -SBU | Image Caption | 850,000 -Visual Genome VQA (Downsampled) | Visual Question Answer(VQA) | 500,000 -Visual Genome Region descriptions (Downsampled) | Reference Grouding | 500,000 -Visual Genome objects (Downsampled) | Grounded Caption | 500,000 -OCR VQA (Downsampled) | OCR and VQA | 500,000 - -Please download these datasets on their own official websites. - -## Multimodal Alignment -Please run sh scripts/pretrain.sh or sh scripts/pretrain\_multinode.sh - -## Visual Instruction Tuning -Please run sh scripts/finetune.sh or sh scripts/finetune\_multinode.sh - -## Evaluation -Please run python scripts in directory llava/eval/. Our pre-trained CodeFuse-VLM-14B can be loaded with the following code: - -``` -import os -from llava.model.builder import load_mixed_pretrained_model - -model_path = '/pretrained/model/path' -tokenizer, model, image_processor, context_len = load_mixed_pretrained_model(model_path, None, 'qwen-vl-14b', os.path.join(model_path, 'Qwen-VL-visual'), 'cross_attn', os.path.join(model_path, 'mm_projector/mm_projector.bin')) -``` - -You can also run scripts/merge\_qwen\_vl\_weights.sh first and load the merged model by the following code: - -``` -from llava.model import LlavaQWenForCausalLM - -model = LlavaQWenForCausalLM.from_pretrained('/path/to/our/pretrained/model') -``` - -## CodeFuse-VLM Product Video -Here's the demo video of front-end code copilot backed by our VLM model - -https://private-user-images.githubusercontent.com/22836551/300398424-201f667d-6b6b-4548-b3e6-724afc4b3071.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjE5MTIsIm5iZiI6MTcwNjUyMTYxMiwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzOTg0MjQtMjAxZjY2N2QtNmI2Yi00NTQ4LWIzZTYtNzI0YWZjNGIzMDcxLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5NDY1MlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWI0ZmJmZWNlNDZmNWM3NzA0OThlMmY1ODY4MDkxNWY5ZWNiNzRiYjJkYmE4NjEzM2EwYWRiNWY2ODc3N2ViYjEmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.BIvWGNx0XV7RoauxB0c2noEdbfZfu8-16LPHtCaCJ9k \ No newline at end of file +--- +store: + title: CodeFuse-MFT-VLM + version: main +group: + title: 🌱 CodeFuse-MFT-VLM + order: -1 +title: QuickStart +order: 0 +toc: content +--- + +## Contents + +- [Install](#Install) +- [Datasets](#Datasets) +- [Multimodal Alignment](#Multimodal-Alignment) +- [Visual Instruction Tuning](#Visual-Instruction-Tuning) +- [Evaluation](#Evaluation) + +## Install + +Please run sh init_env.sh + +## Datasets + +Here's the table of datasets we used to train CodeFuse-VLM-14B: + +| Dataset | Task Type | Number of Samples | +| ----------------------------------------------- | --------------------------- | ----------------- | +| synthdog-en | OCR | 800,000 | +| synthdog-zh | OCR | 800,000 | +| cc3m(downsampled) | Image Caption | 600,000 | +| cc3m(downsampled) | Image Caption | 600,000 | +| SBU | Image Caption | 850,000 | +| Visual Genome VQA (Downsampled) | Visual Question Answer(VQA) | 500,000 | +| Visual Genome Region descriptions (Downsampled) | Reference Grouding | 500,000 | +| Visual Genome objects (Downsampled) | Grounded Caption | 500,000 | +| OCR VQA (Downsampled) | OCR and VQA | 500,000 | + +Please download these datasets on their own official websites. + +## Multimodal Alignment + +Please run sh scripts/pretrain.sh or sh scripts/pretrain_multinode.sh + +## Visual Instruction Tuning + +Please run sh scripts/finetune.sh or sh scripts/finetune_multinode.sh + +## Evaluation + +Please run python scripts in directory llava/eval/. Our pre-trained CodeFuse-VLM-14B can be loaded with the following code: + +``` +import os +from llava.model.builder import load_mixed_pretrained_model + +model_path = '/pretrained/model/path' +tokenizer, model, image_processor, context_len = load_mixed_pretrained_model(model_path, None, 'qwen-vl-14b', os.path.join(model_path, 'Qwen-VL-visual'), 'cross_attn', os.path.join(model_path, 'mm_projector/mm_projector.bin')) +``` + +You can also run scripts/merge_qwen_vl_weights.sh first and load the merged model by the following code: + +``` +from llava.model import LlavaQWenForCausalLM + +model = LlavaQWenForCausalLM.from_pretrained('/path/to/our/pretrained/model') +``` + +## CodeFuse-VLM Product Video + +Here's the demo video of front-end code copilot backed by our VLM model + +https://private-user-images.githubusercontent.com/22836551/300398424-201f667d-6b6b-4548-b3e6-724afc4b3071.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjE5MTIsIm5iZiI6MTcwNjUyMTYxMiwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzOTg0MjQtMjAxZjY2N2QtNmI2Yi00NTQ4LWIzZTYtNzI0YWZjNGIzMDcxLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5NDY1MlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWI0ZmJmZWNlNDZmNWM3NzA0OThlMmY1ODY4MDkxNWY5ZWNiNzRiYjJkYmE4NjEzM2EwYWRiNWY2ODc3N2ViYjEmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.BIvWGNx0XV7RoauxB0c2noEdbfZfu8-16LPHtCaCJ9k diff --git a/content/zh/docs/codefuse-mft-vlm/1_quickstart.md b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/quickstart.zh-CN.md similarity index 53% rename from content/zh/docs/codefuse-mft-vlm/1_quickstart.md rename to docs/docs/developer-docs/CodeFuse-MFT-VLM/main/quickstart.zh-CN.md index 5f9bbf7..7016b4f 100644 --- a/content/zh/docs/codefuse-mft-vlm/1_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-MFT-VLM/main/quickstart.zh-CN.md @@ -1,65 +1,75 @@ ---- -title: 快速使用 -slug: 快速使用 -description: 快速使用 -aliases: -- "/docs/codefuse-mft-vlm-quickstart-zh" ---- - - -## Contents -- [Install](#Install) -- [Datasets](#Datasets) -- [Multimodal Alignment](#Multimodal-Alignment) -- [Visual Instruction Tuning](#Visual-Instruction-Tuning) -- [Evaluation](#Evaluation) - -## Install -请执行 sh init\_env.sh - -## Datasets -使用了以下数据集训练模型: - -数据集 | 任务种类 | 样本量 -| ------------- | ------------- | ------------- | -synthdog-en | OCR | 800,000 -synthdog-zh | OCR | 800,000 -cc3m(downsampled)| Image Caption | 600,000 -cc3m(downsampled)| Image Caption | 600,000 -SBU | Image Caption | 850,000 -Visual Genome VQA (Downsampled) | Visual Question Answer(VQA) | 500,000 -Visual Genome Region descriptions (Downsampled) | Reference Grouding | 500,000 -Visual Genome objects (Downsampled) | Grounded Caption | 500,000 -OCR VQA (Downsampled) | OCR and VQA | 500,000 - -请到各个数据集的官网上下载这些数据。 - -## Multimodal Alignment -请执行 sh scripts/pretrain.sh 或者 sh scripts/pretrain\_multinode.sh - - -## Visual Instruction Tuning -请执行 sh scripts/finetune.sh 或者 sh scripts/finetune\_multinode.sh - -## Evaluation -请执行 llava/eval/ 当中的python脚本. 可以通过下面的代码来加载我们预训练的CodeFuse-VLM-14B: - -``` -import os -from llava.model.builder import load_mixed_pretrained_model - -model_path = '/pretrained/model/path' -tokenizer, model, image_processor, context_len = load_mixed_pretrained_model(model_path, None, 'qwen-vl-14b', os.path.join(model_path, 'Qwen-VL-visual'), 'cross_attn', os.path.join(model_path, 'mm_projector/mm_projector.bin')) -``` - -您也可以先运行下面的脚本来合并各个模型组件:scripts/merge\_qwen\_vl\_weights.sh,然后通过下面的代码加载合并后的模型: -``` -from llava.model import LlavaQWenForCausalLM - -model = LlavaQWenForCausalLM.from_pretrained('/path/to/our/pretrained/model') -``` - -## CodeFuse-VLM 产品视频 -这是我们模型支持的产品的视频 - -https://private-user-images.githubusercontent.com/22836551/300398424-201f667d-6b6b-4548-b3e6-724afc4b3071.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjE5MTIsIm5iZiI6MTcwNjUyMTYxMiwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzOTg0MjQtMjAxZjY2N2QtNmI2Yi00NTQ4LWIzZTYtNzI0YWZjNGIzMDcxLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5NDY1MlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWI0ZmJmZWNlNDZmNWM3NzA0OThlMmY1ODY4MDkxNWY5ZWNiNzRiYjJkYmE4NjEzM2EwYWRiNWY2ODc3N2ViYjEmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.BIvWGNx0XV7RoauxB0c2noEdbfZfu8-16LPHtCaCJ9k \ No newline at end of file +--- +store: + title: CodeFuse-MFT-VLM + version: main +group: + title: 🌱 CodeFuse-MFT-VLM + order: -1 +title: 快速使用 +order: 0 +toc: content +--- + +## Contents + +- [Install](#Install) +- [Datasets](#Datasets) +- [Multimodal Alignment](#Multimodal-Alignment) +- [Visual Instruction Tuning](#Visual-Instruction-Tuning) +- [Evaluation](#Evaluation) + +## Install + +请执行 sh init_env.sh + +## Datasets + +使用了以下数据集训练模型: + +| 数据集 | 任务种类 | 样本量 | +| ----------------------------------------------- | --------------------------- | ------- | +| synthdog-en | OCR | 800,000 | +| synthdog-zh | OCR | 800,000 | +| cc3m(downsampled) | Image Caption | 600,000 | +| cc3m(downsampled) | Image Caption | 600,000 | +| SBU | Image Caption | 850,000 | +| Visual Genome VQA (Downsampled) | Visual Question Answer(VQA) | 500,000 | +| Visual Genome Region descriptions (Downsampled) | Reference Grouding | 500,000 | +| Visual Genome objects (Downsampled) | Grounded Caption | 500,000 | +| OCR VQA (Downsampled) | OCR and VQA | 500,000 | + +请到各个数据集的官网上下载这些数据。 + +## Multimodal Alignment + +请执行 sh scripts/pretrain.sh 或者 sh scripts/pretrain_multinode.sh + +## Visual Instruction Tuning + +请执行 sh scripts/finetune.sh 或者 sh scripts/finetune_multinode.sh + +## Evaluation + +请执行 llava/eval/ 当中的 python 脚本. 可以通过下面的代码来加载我们预训练的 CodeFuse-VLM-14B: + +``` +import os +from llava.model.builder import load_mixed_pretrained_model + +model_path = '/pretrained/model/path' +tokenizer, model, image_processor, context_len = load_mixed_pretrained_model(model_path, None, 'qwen-vl-14b', os.path.join(model_path, 'Qwen-VL-visual'), 'cross_attn', os.path.join(model_path, 'mm_projector/mm_projector.bin')) +``` + +您也可以先运行下面的脚本来合并各个模型组件:scripts/merge_qwen_vl_weights.sh,然后通过下面的代码加载合并后的模型: + +``` +from llava.model import LlavaQWenForCausalLM + +model = LlavaQWenForCausalLM.from_pretrained('/path/to/our/pretrained/model') +``` + +## CodeFuse-VLM 产品视频 + +这是我们模型支持的产品的视频 + +https://private-user-images.githubusercontent.com/22836551/300398424-201f667d-6b6b-4548-b3e6-724afc4b3071.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjE5MTIsIm5iZiI6MTcwNjUyMTYxMiwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzOTg0MjQtMjAxZjY2N2QtNmI2Yi00NTQ4LWIzZTYtNzI0YWZjNGIzMDcxLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5NDY1MlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWI0ZmJmZWNlNDZmNWM3NzA0OThlMmY1ODY4MDkxNWY5ZWNiNzRiYjJkYmE4NjEzM2EwYWRiNWY2ODc3N2ViYjEmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.BIvWGNx0XV7RoauxB0c2noEdbfZfu8-16LPHtCaCJ9k diff --git a/content/en/docs/overview/b5.CodeFuseModelCache.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache.en-US.md similarity index 78% rename from content/en/docs/overview/b5.CodeFuseModelCache.md rename to docs/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache.en-US.md index 9e3a93e..56625fb 100644 --- a/content/en/docs/overview/b5.CodeFuseModelCache.md +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache.en-US.md @@ -1,23 +1,24 @@ --- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + index: true + order: -1 title: CodeFuse-ModelCache -slug: CodeFuse-ModelCache -description: 介绍主要功能 -aliases: -- "/docs/codefuse-modelcache" +order: -1 +toc: content --- - -

    -

    -

    -

    - 中文 | - English -

    -

    -
    - ## Contents + - [news](#news) - [Introduction](#Introduction) - [Modules](#Modules) @@ -25,18 +26,23 @@ aliases: - [Contributing](#Contributing) ## news + - 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as 'llmEmb', 'ONNX', 'PaddleNLP', 'FastText', alone with the image embedding framework 'timm', to bolster embedding functionality. - 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests. - [2023.08.26] codefuse-ModelCache... ## Introduction -Codefuse-ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience.
    This project aims to optimize services by introducing a caching mechanism. It helps businesses and research institutions reduce the cost of inference deployment, improve model performance and efficiency, and provide scalable services for large models. Through open-source, we aim to share and exchange technologies related to large model semantic cache. + +Codefuse-ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience.
    This project aims to optimize services by introducing a caching mechanism. It helps businesses and research institutions reduce the cost of inference deployment, improve model performance and efficiency, and provide scalable services for large models. Through open-source, we aim to share and exchange technologies related to large model semantic cache. ## modules -![modelcache modules](/images/codefuse-modelcache/modelcache_modules_20231114.png) + +![modelcache modules](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*Z-6cSr6udKAAAAAAAAAAAAAADlHYAQ/original) ## Acknowledgements + This project has referenced the following open-source projects. We would like to express our gratitude to the projects and their developers for their contributions and research.
    [GPTCache](https://github.com/zilliztech/GPTCache) ## Contributing -ModelCache is a captivating and invaluable project, whether you are an experienced developer or a novice just starting out, your contributions to this project are warmly welcomed. Your involvement in this project, be it through raising issues, providing suggestions, writing code, or documenting and creating examples, will enhance the project's quality and make a significant contribution to the open-source community. \ No newline at end of file + +ModelCache is a captivating and invaluable project, whether you are an experienced developer or a novice just starting out, your contributions to this project are warmly welcomed. Your involvement in this project, be it through raising issues, providing suggestions, writing code, or documenting and creating examples, will enhance the project's quality and make a significant contribution to the open-source community. diff --git a/docs/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache.zh-CN.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache.zh-CN.md new file mode 100644 index 0000000..0d7eaac --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/CodeFuseModelCache.zh-CN.md @@ -0,0 +1,48 @@ +--- +nav: + title: 文档 + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + index: true + order: -1 +title: CodeFuse-ModelCache +order: -1 +toc: content +--- + +## Contents + +- [新闻](#新闻) +- [项目简介](#项目简介) +- [架构大图](#架构大图) +- [致谢](#致谢) +- [Contributing](#Contributing) + +## 新闻 + +- 🔥🔥[2023.12.10] 增加 llmEmb、onnx、paddlenlp、fasttext 等 LLM embedding 框架,并增加 timm 图片 embedding 框架,用于提供更丰富的 embedding 能力。 +- 🔥🔥[2023.11.20] codefuse-ModelCache 增加本地存储能力, 适配了嵌入式数据库 sqlite、faiss,方便用户快速启动测试。 +- [2023.10.31] codefuse-ModelCache... + +## 项目简介 + +Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义 Cache 的相关技术。 + +## 架构大图 + +![modelcache modules](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*Z-6cSr6udKAAAAAAAAAAAAAADlHYAQ/original) + +## 致谢 + +本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。
    [GPTCache](https://github.com/zilliztech/GPTCache) + +## Contributing + +ModelCache 是一个非常有趣且有用的项目,我们相信这个项目有很大的潜力,无论你是经验丰富的开发者,还是刚刚入门的新手,都欢迎你为这个项目做出一些贡献,包括但不限于:提交问题和建议,参与代码编写,完善文档和示例。你的参与将会使这个项目变得更好,同时也会为开源社区做出贡献。 diff --git a/content/en/docs/codefuse-modelcache/3_config.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/config.en-US.md similarity index 77% rename from content/en/docs/codefuse-modelcache/3_config.md rename to docs/docs/developer-docs/CodeFuse-ModelCache/main/config.en-US.md index 331c967..e028f15 100644 --- a/content/en/docs/codefuse-modelcache/3_config.md +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/config.en-US.md @@ -1,21 +1,27 @@ ---- -title: How to better configure your cache -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-config" -aliases: -- "/docs/codefuse-modelcache-config" ---- - -## Environment Dependencies -- Python version: 3.8 or higher -- To install dependencies: pip install requirements.txt - -## Service Startup -- Before starting the service, the following environment configurations should be performed: -- Install relational database MySQL, import SQL to create tables, SQL file: reference_doc/create_table.sql -- Install vector database Milvus -- Add database access information to the configuration files, which are: - - modelcache/config/milvus_config.ini - - modelcache/config/mysql_config.ini -- Download offline model bin files, refer to: https://huggingface.co/shibing624/text2vec-base-chinese/tree/main, and place the downloaded bin files into the model/text2vec-base-chinese folder -- Start the backend service using the flask4modelcache.py script. \ No newline at end of file +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: How to better configure your cache +order: 2 +toc: content +--- + +## Environment Dependencies + +- Python version: 3.8 or higher +- To install dependencies: pip install requirements.txt + +## Service Startup + +- Before starting the service, the following environment configurations should be performed: +- Install relational database MySQL, import SQL to create tables, SQL file: reference_doc/create_table.sql +- Install vector database Milvus +- Add database access information to the configuration files, which are: + - modelcache/config/milvus_config.ini + - modelcache/config/mysql_config.ini +- Download offline model bin files, refer to: https://huggingface.co/shibing624/text2vec-base-chinese/tree/main, and place the downloaded bin files into the model/text2vec-base-chinese folder +- Start the backend service using the flask4modelcache.py script. diff --git a/docs/docs/developer-docs/CodeFuse-ModelCache/main/config.zh-CN.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/config.zh-CN.md new file mode 100644 index 0000000..b62dc44 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/config.zh-CN.md @@ -0,0 +1,28 @@ +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: 最佳配置 +order: 2 +toc: content +--- + +## 环境依赖 + +- python 版本: 3.8 及以上 +- 依赖包安装: + `pip install requirements.txt ` + +## 服务启动 + +- 在启动服务前,应该进行如下环境配置: +- 安装关系数据库 mysql, 导入 sql 创建数据表,sql 文件: reference_doc/create_table.sql +- 安装向量数据库 milvus +- 在配置文件中添加数据库访问信息,配置文件为: + - modelcache/config/milvus_config.ini + - modelcache/config/mysql_config.ini +- 离线模型 bin 文件下载, 参考地址:https://huggingface.co/shibing624/text2vec-base-chinese/tree/main,并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 +- 通过 flask4modelcache.py 脚本启动后端服务。 diff --git a/content/en/docs/codefuse-modelcache/2_feature.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/feature.en-US.md similarity index 72% rename from content/en/docs/codefuse-modelcache/2_feature.md rename to docs/docs/developer-docs/CodeFuse-ModelCache/main/feature.en-US.md index 599f44e..4c98ce2 100644 --- a/content/en/docs/codefuse-modelcache/2_feature.md +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/feature.en-US.md @@ -1,168 +1,168 @@ ---- -title: Feature -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-feature" -aliases: -- "/docs/codefuse-modelcache-feature" ---- - - - - - -From a functional standpoint, to address Huggingface network issues and improve inference speed, local inference capabilities for embeddings have been added. Given some limitations in the SQLAlchemy framework, we have rewritten the relational database interaction module for more flexible database operations. In practice, large model products need to interface with multiple users and models; thus, support for multi-tenancy has been added to ModelCache, as well as preliminary compatibility with system commands and multi-turn conversations. - -Below is a feature comparison table for ModelCache and GPTCache modules: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModuleFunction
    ModelCacheGPTCache
    Basic InterfaceData query interface
    Data writing interface
    EmbeddingEmbedding model configuration
    Large model embedding layer
    BERT model long text processing
    Large model invocationDecoupling from large models
    Local loading of embedding model
    Data isolationModel data isolation
    Hyperparameter isolation
    DatabasesMySQL
    Milvus
    OceanBase
    Session managementSingle-turn dialogue
    System commands
    Multi-turn dialogue
    Data managementData persistence
    One-click cache clearance
    Tenant managementSupport for multi-tenancy
    Milvus multi-collection capability
    OtherLong-short dialogue distinction
    - -## Core Features -In ModelCache, the main ideas of GPTCache are carried forward, including a series of core modules: adapter, embedding, similarity, and data_manager. The adapter module's main function is to handle the business logic for various tasks and connect modules like embedding, similarity, and data_manager; the embedding module is responsible for converting text into semantic vector representations, transforming user queries into vectors for recall or storage; the rank module ranks and evaluates the similarity of recalled vectors; the data_manager module manages the database. To better industrialize, we've made architectural and functional upgrades as follows: - -- [x] Architectural Adjustment (Lightweight Integration): Embedded in large model products in a cache mode similar to Redis, it provides semantic caching capabilities without interfering with LLM invocation and security audits, adaptable toall large model services. - -- [x] Multiple Model Loading Schemes: - - Support for loading local embedding models to resolve Huggingface connectivity issues. - - Support for loading various pre-trained model embedding layers. - -- [x] Data Isolation Capabilities: - - Environmental Isolation: Depending on the environment, different database configurations can be pulled to achieve isolation (development, staging, production). - - Multi-Tenant Data Isolation: Dynamically create collections according to the model to isolate data, addressing data isolation issues for multiple models/services in large model products. - -- [x] Support for System Commands: Using concatenation to solve system command issues within the prompt paradigm. - -- [x] Distinguishing Long and Short Texts: Long texts pose more challenges to similarity assessment, so the differentiation between long and short texts has been enhanced, allowing separate configuration of judgment thresholds. - -- [x] Performance Optimization for Milvus: Adjusting Milvus's consistency_level to "Session" level for better performance. - -- [x] Data Management Capabilities: - - One-click cache clearing ability for data management after model upgrades. - - Recall hit queries for subsequent data analysis and model iteration reference. - - Asynchronous log write-back capability for data analysis and statistics. - - Added model fields and data statistics fields for feature expansion. - - Future features that will continue to be built upon include: -- [ ] Data isolation based on hyperparameters. -- [ ] System prompt partitioned storage capability to improve the accuracy and efficiency of similarity matching. -- [ ] More versatile embedding models and similarity evaluation algorithms. \ No newline at end of file +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: Feature +order: 1 +toc: content +--- + +From a functional standpoint, to address Huggingface network issues and improve inference speed, local inference capabilities for embeddings have been added. Given some limitations in the SQLAlchemy framework, we have rewritten the relational database interaction module for more flexible database operations. In practice, large model products need to interface with multiple users and models; thus, support for multi-tenancy has been added to ModelCache, as well as preliminary compatibility with system commands and multi-turn conversations. + +Below is a feature comparison table for ModelCache and GPTCache modules: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ModuleFunction
    ModelCacheGPTCache
    Basic InterfaceData query interface
    Data writing interface
    EmbeddingEmbedding model configuration
    Large model embedding layer
    BERT model long text processing
    Large model invocationDecoupling from large models
    Local loading of embedding model
    Data isolationModel data isolation
    Hyperparameter isolation
    DatabasesMySQL
    Milvus
    OceanBase
    Session managementSingle-turn dialogue
    System commands
    Multi-turn dialogue
    Data managementData persistence
    One-click cache clearance
    Tenant managementSupport for multi-tenancy
    Milvus multi-collection capability
    OtherLong-short dialogue distinction
    + +## Core Features + +In ModelCache, the main ideas of GPTCache are carried forward, including a series of core modules: adapter, embedding, similarity, and data_manager. The adapter module's main function is to handle the business logic for various tasks and connect modules like embedding, similarity, and data_manager; the embedding module is responsible for converting text into semantic vector representations, transforming user queries into vectors for recall or storage; the rank module ranks and evaluates the similarity of recalled vectors; the data_manager module manages the database. To better industrialize, we've made architectural and functional upgrades as follows: + +- [x] Architectural Adjustment (Lightweight Integration): Embedded in large model products in a cache mode similar to Redis, it provides semantic caching capabilities without interfering with LLM invocation and security audits, adaptable toall large model services. + +- [x] Multiple Model Loading Schemes: + + - Support for loading local embedding models to resolve Huggingface connectivity issues. + - Support for loading various pre-trained model embedding layers. + +- [x] Data Isolation Capabilities: + + - Environmental Isolation: Depending on the environment, different database configurations can be pulled to achieve isolation (development, staging, production). + - Multi-Tenant Data Isolation: Dynamically create collections according to the model to isolate data, addressing data isolation issues for multiple models/services in large model products. + +- [x] Support for System Commands: Using concatenation to solve system command issues within the prompt paradigm. + +- [x] Distinguishing Long and Short Texts: Long texts pose more challenges to similarity assessment, so the differentiation between long and short texts has been enhanced, allowing separate configuration of judgment thresholds. + +- [x] Performance Optimization for Milvus: Adjusting Milvus's consistency_level to "Session" level for better performance. + +- [x] Data Management Capabilities: + - One-click cache clearing ability for data management after model upgrades. + - Recall hit queries for subsequent data analysis and model iteration reference. + - Asynchronous log write-back capability for data analysis and statistics. + - Added model fields and data statistics fields for feature expansion. + - Future features that will continue to be built upon include: +- [ ] Data isolation based on hyperparameters. +- [ ] System prompt partitioned storage capability to improve the accuracy and efficiency of similarity matching. +- [ ] More versatile embedding models and similarity evaluation algorithms. diff --git a/content/zh/docs/codefuse-modelcache/2_feature.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/feature.zh-CN.md similarity index 56% rename from content/zh/docs/codefuse-modelcache/2_feature.md rename to docs/docs/developer-docs/CodeFuse-ModelCache/main/feature.zh-CN.md index 440a448..f37002f 100644 --- a/content/zh/docs/codefuse-modelcache/2_feature.md +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/feature.zh-CN.md @@ -1,159 +1,160 @@ ---- -title: 功能特性 -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-feature-zh" -aliases: -- "/docs/codefuse-modelcache-feature-zh" ---- - - - - -功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模块功能
    ModelCacheGPTCache
    基础接口数据查询接口
    数据写入接口
    Embeddingembedding模型配置
    大模型embedding层
    bert模型长文本处理
    Large model invocation是否与大模型解耦
    embeddingg模型本地加载
    数据隔离模型数据隔离
    超参数隔离
    数据库MySQL
    Milvus
    OceanBase
    会话管理单轮回话
    system指令
    多轮回话
    数据管理数据持久化
    一键清空缓存
    租户管理支持多租户(多模型)
    milvus多表能力
    其他长短对话区分能力
    - -## 核心功能 -在ModelCache中,沿用了GPTCache的主要思想,包含了一系列核心模块:adapter、embedding、similarity和data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将embedding、similarity、data_manager等模块串联起来;embedding模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank模块用于对召回的向量进行相似度排序和评估;data_manager模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: - -- [x] 架构调整(轻量化集成):以类redis的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰LLM调用和安全审核等功能,适配所有大模型服务。 -- [x] 多种模型加载方案: - - 支持加载本地embedding模型,解决huggingface网络连通问题 - - 支持加载多种预训练模型embeding层 -- [x] 数据隔离能力 - - 环境隔离:可依据环境,拉取不同的数据库配置,实现环境隔离(开发、预发、生产) - - 多租户数据隔离:根据模型动态创建collection,进行数据隔离,用于大模型产品中多个模型/服务数据隔离问题 -- [x] 支持系统指令:采用拼接的方式,解决propmt范式中sys指令问题。 -- [x] 长短文本区分:长文本会给相似评估带来更多挑战,增加了长短文本的区分,可单独配置判断阈值。 -- [x] milvus性能优化:milvus consistency_level调整为"Session"级别,可以得到更好的性能。 -- [x] 数据管理能力: - - 一键清空缓存的能力,用于模型升级后的数据管理。 - - 召回hitquery,用于后续的数据分析和模型迭代参考。 - - 异步日志回写能力,用于数据分析和统计 - - 增加model字段和数据统计字段,用于功能拓展。 - -未来会持续建设的功能: - -- [ ] 基于超参数的数据隔离 -- [ ] system promt分区存储能力,以提高相似度匹配的准确度和效率 -- [ ] 更通用的embedding模型和相似度评估算法 \ No newline at end of file +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: 功能特性 +order: 1 +toc: content +--- + +功能方面,为了解决 huggingface 网络问题并提升推理速度,增加了 embedding 本地推理能力。鉴于 SqlAlchemy 框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在 ModelCache 中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    模块功能
    ModelCacheGPTCache
    基础接口数据查询接口
    数据写入接口
    Embeddingembedding模型配置
    大模型embedding层
    bert模型长文本处理
    Large model invocation是否与大模型解耦
    embeddingg模型本地加载
    数据隔离模型数据隔离
    超参数隔离
    数据库MySQL
    Milvus
    OceanBase
    会话管理单轮回话
    system指令
    多轮回话
    数据管理数据持久化
    一键清空缓存
    租户管理支持多租户(多模型)
    milvus多表能力
    其他长短对话区分能力
    + +## 核心功能 + +在 ModelCache 中,沿用了 GPTCache 的主要思想,包含了一系列核心模块:adapter、embedding、similarity 和 data_manager。adapter 模块主要功能是处理各种任务的业务逻辑,并且能够将 embedding、similarity、data_manager 等模块串联起来;embedding 模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank 模块用于对召回的向量进行相似度排序和评估;data_manager 模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: + +- [x] 架构调整(轻量化集成):以类 redis 的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰 LLM 调用和安全审核等功能,适配所有大模型服务。 +- [x] 多种模型加载方案: + - 支持加载本地 embedding 模型,解决 huggingface 网络连通问题 + - 支持加载多种预训练模型 embeding 层 +- [x] 数据隔离能力 + - 环境隔离:可依据环境,拉取不同的数据库配置,实现环境隔离(开发、预发、生产) + - 多租户数据隔离:根据模型动态创建 collection,进行数据隔离,用于大模型产品中多个模型/服务数据隔离问题 +- [x] 支持系统指令:采用拼接的方式,解决 propmt 范式中 sys 指令问题。 +- [x] 长短文本区分:长文本会给相似评估带来更多挑战,增加了长短文本的区分,可单独配置判断阈值。 +- [x] milvus 性能优化:milvus consistency_level 调整为"Session"级别,可以得到更好的性能。 +- [x] 数据管理能力: + - 一键清空缓存的能力,用于模型升级后的数据管理。 + - 召回 hitquery,用于后续的数据分析和模型迭代参考。 + - 异步日志回写能力,用于数据分析和统计 + - 增加 model 字段和数据统计字段,用于功能拓展。 + +未来会持续建设的功能: + +- [ ] 基于超参数的数据隔离 +- [ ] system promt 分区存储能力,以提高相似度匹配的准确度和效率 +- [ ] 更通用的 embedding 模型和相似度评估算法 diff --git a/content/en/docs/codefuse-modelcache/1_quickstart.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/quickstart.en-US.md similarity index 93% rename from content/en/docs/codefuse-modelcache/1_quickstart.md rename to docs/docs/developer-docs/CodeFuse-ModelCache/main/quickstart.en-US.md index 66ea1ff..51b8fab 100644 --- a/content/en/docs/codefuse-modelcache/1_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/quickstart.en-US.md @@ -1,48 +1,55 @@ ---- -title: QuickStart -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-quickstart" -aliases: -- "/docs/codefuse-modelcache-quickstart" ---- - - -ModelCache is easy to use, and you can build a cache testing demo in just one step. - -## Quick Start -### Building a Cache -The default interface for Cache is shown below: -``` -class Cache: - # it should be called when start the cache system - def __init__(self): - self.has_init = False - self.cache_enable_func = None - self.embedding_func = None - self.post_process_messages_func = None - self.config = Config() -``` - -Before creating a ModelCache, consider the following questions: - -How will you generate embedding vectors for queries? (embedding_func) This function embeds text into a dense vector for contextual similarity search. ModelCache can support various methods of embedding context: Huggingface, ONNX, and SentenceTransformers. In the default logic, the text2vec model from huggingface, which performs better in the Chinese domain, is used. Simply initialize your embedding function to: text2vec.to_embeddings -``` -data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) -cache.init( - embedding_func=data2vec.to_embeddings, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, -) -``` - - -Where will you cache data? (data_manager cache storage) The cache storage is used to store all scalar data such as original questions, prompts, answers, and access times. ModelCache supports multiple cache storage options like SQLite, MySQL, and OceanBase. More NoSQL database options will be added in the future. -Where will you store and search vector embeddings? (data_manager vector storage) The vector storage component is used to store and search all embedding vectors to semantically find the most similar results. ModelCache supports vector search libraries like FAISS or vector databases like Milvus. More vector database and cloud service options will be added in the future. -Here are some examples: -``` -data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) -data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension)) -``` \ No newline at end of file +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: QuickStart +order: 0 +toc: content +--- + +ModelCache is easy to use, and you can build a cache testing demo in just one step. + +## Quick Start + +### Building a Cache + +The default interface for Cache is shown below: + +``` +class Cache: + # it should be called when start the cache system + def __init__(self): + self.has_init = False + self.cache_enable_func = None + self.embedding_func = None + self.post_process_messages_func = None + self.config = Config() +``` + +Before creating a ModelCache, consider the following questions: + +How will you generate embedding vectors for queries? (embedding_func) This function embeds text into a dense vector for contextual similarity search. ModelCache can support various methods of embedding context: Huggingface, ONNX, and SentenceTransformers. In the default logic, the text2vec model from huggingface, which performs better in the Chinese domain, is used. Simply initialize your embedding function to: text2vec.to_embeddings + +``` +data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), + VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) +cache.init( + embedding_func=data2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, +) +``` + +Where will you cache data? (data_manager cache storage) The cache storage is used to store all scalar data such as original questions, prompts, answers, and access times. ModelCache supports multiple cache storage options like SQLite, MySQL, and OceanBase. More NoSQL database options will be added in the future. +Where will you store and search vector embeddings? (data_manager vector storage) The vector storage component is used to store and search all embedding vectors to semantically find the most similar results. ModelCache supports vector search libraries like FAISS or vector databases like Milvus. More vector database and cloud service options will be added in the future. +Here are some examples: + +``` +data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) +data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension)) +``` diff --git a/content/zh/docs/codefuse-modelcache/1_quickstart.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/quickstart.zh-CN.md similarity index 55% rename from content/zh/docs/codefuse-modelcache/1_quickstart.md rename to docs/docs/developer-docs/CodeFuse-ModelCache/main/quickstart.zh-CN.md index 5961216..caeb49f 100644 --- a/content/zh/docs/codefuse-modelcache/1_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/quickstart.zh-CN.md @@ -1,50 +1,57 @@ ---- -title: QuickStart -description: 介绍主要功能 -url: "/docs/codefuse-modelcache-quickstart-zh" -aliases: -- "/docs/codefuse-modelcache-quickstart-zh" ---- - - - -ModelCache易于使用,只需1步骤即可构建缓存测试Demo - -## 快速开始 -### 构建Cache -Cache的默认接口如下所示: -``` -class Cache: - # it should be called when start the cache system - def __init__(self): - self.has_init = False - self.cache_enable_func = None - self.embedding_func = None - self.post_process_messages_func = None - self.config = Config() -``` - -在创建ModelCache之前,请考虑以下问题: -- 你将如何为查询生成嵌入向量?(embedding_func) 该函数将文本嵌入到一个用于上下文相似性搜索的密集向量中。ModelCache可以支持多种嵌入上下文的方法:Huggingface、ONNX和SentenceTransformers。默认逻辑中,使用了在中文领域表现更好的huggingface中的text2vec模型。只需将你的嵌入函数初始化为:text2vec.to_embeddings - -``` -data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) - -cache.init( - embedding_func=data2vec.to_embeddings, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, -) -``` - -- 你将在哪里缓存数据?(data_manager缓存存储) 缓存存储用于存储所有标量数据,例如原始问题、提示、答案和访问时间。ModelCache支持多种缓存存储选项,如SQLite、MySQL和OceanBase。未来还将添加更多的NoSQL数据库选项。 -- 你将在哪里存储和搜索向量嵌入?(data_manager向量存储) 向量存储组件用于存储和搜索所有嵌入向量,以便在语义上找到最相似的结果。ModelCache支持使用FAISS等向量搜索库或Milvus等向量数据库。未来还将添加更多的向量数据库和云服务选项。 - -以下是一些示例: -``` -data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) -data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension)) -``` +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: 快速开始 +order: 0 +toc: content +--- + +ModelCache 易于使用,只需 1 步骤即可构建缓存测试 Demo + +## 快速开始 + +### 构建 Cache + +Cache 的默认接口如下所示: + +``` +class Cache: + # it should be called when start the cache system + def __init__(self): + self.has_init = False + self.cache_enable_func = None + self.embedding_func = None + self.post_process_messages_func = None + self.config = Config() +``` + +在创建 ModelCache 之前,请考虑以下问题: + +- 你将如何为查询生成嵌入向量?(embedding_func) 该函数将文本嵌入到一个用于上下文相似性搜索的密集向量中。ModelCache 可以支持多种嵌入上下文的方法:Huggingface、ONNX 和 SentenceTransformers。默认逻辑中,使用了在中文领域表现更好的 huggingface 中的 text2vec 模型。只需将你的嵌入函数初始化为:text2vec.to_embeddings + +``` +data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), + VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + +cache.init( + embedding_func=data2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, +) +``` + +- 你将在哪里缓存数据?(data_manager 缓存存储) 缓存存储用于存储所有标量数据,例如原始问题、提示、答案和访问时间。ModelCache 支持多种缓存存储选项,如 SQLite、MySQL 和 OceanBase。未来还将添加更多的 NoSQL 数据库选项。 +- 你将在哪里存储和搜索向量嵌入?(data_manager 向量存储) 向量存储组件用于存储和搜索所有嵌入向量,以便在语义上找到最相似的结果。ModelCache 支持使用 FAISS 等向量搜索库或 Milvus 等向量数据库。未来还将添加更多的向量数据库和云服务选项。 + +以下是一些示例: + +``` +data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) +data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension)) +``` diff --git a/docs/docs/developer-docs/CodeFuse-ModelCache/main/release_note.en-US.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/release_note.en-US.md new file mode 100644 index 0000000..baf9e93 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/release_note.en-US.md @@ -0,0 +1,23 @@ +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: Release Note +order: 3 +toc: content +--- + +| 时间 | 功能 | 版本号 | +| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------ | +| 20230430 | Completed GPTCache research, open-source process running through OpenAI interface, single-node form | 无 | +| 20230509 | 1. Completed technology selection and upstream/downstream interaction scheme
    2. Redeveloped database module, replaced SQLAlchemy framework
    3. Refactored llm_handler module, compatible with codegpt, adapted codegpt model parameters 数 | V0.1.0 | +| 20230519 | 1. Dynamically selected codegpt service mode based on environment
    2. Capability for local model loading and pre-loading
    3. Added dynamic loading capability for local paths based on environment | V0.1.1 | +| 20230522 | 1. Architecture optimized, adjusted to a Redis-like structure, decoupled large model invocation
    2. Switched relational database from SQLite to OceanBase
    3. Switched vector database from FAISS to Milvus
    4. Model data isolation capability
    5. Added core modules adapter_query, adapter_insert | V0.2.0 | +| 20230531 | 1. Online environment launched with dynamic sensing capability
    2. Embedding model evaluation and selection
    3. Added staging environment and data isolation capability
    4. Added exposure capability for the original query field | V0.2.1 | +| 20230607 | 1. Optimized relational database access performance
    2. Optimized environment and model isolation capabilities | V0.2.2 | +| 20230630 | 1. Added large model embedding layer adaptation module in modelCache
    2. Added adoption rate statistical capability | V0.2.3 | +| 20230730 | 1. Added cache statistics feature
    2. Added data deletion function interface
    3. One-click cache clearing capability launched
    4. Developed multi-turn conversation ability, supporting system commands and multi-turn dialogues | v0.3.0 | +| 20230830 | 1. Added asynchronous processing capability, performance improved by over 20%
    2. Architecture change, decoupled embedding inference and business processing logic
    3. Blacklist filtering feature | V0.3.1 | diff --git a/docs/docs/developer-docs/CodeFuse-ModelCache/main/release_note.zh-CN.md b/docs/docs/developer-docs/CodeFuse-ModelCache/main/release_note.zh-CN.md new file mode 100644 index 0000000..5d5d297 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-ModelCache/main/release_note.zh-CN.md @@ -0,0 +1,23 @@ +--- +store: + title: CodeFuse-ModelCache + version: main +group: + title: 🌱 CodeFuse-ModelCache + order: -1 +title: 版本记录 +order: 3 +toc: content +--- + +| 时间 | 功能 | 版本号 | +| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| 20230430 | 完成 GPTCache 调研,开源流程在 OpenAI 接口上跑通,单节点形式 | 无 | +| 20230509 | 1、完成技术选型及上下游交互方案
    2、重新开发数据库模块,替换 SQLalchemy 框架
    3、重构 llm_handler 模块,兼容 codegpt,适配 codegpt 模型参数 | V0.1.0 | +| 20230519 | 1、根据环境动态选择 codegpt 服务模式
    2、模型本地加载能力,以及预加载能力
    3、增加本地路径依据环境动态加载能力 | V0.1.1 | +| 20230522 | 1、架构优化,调整为类 redis 结构,解藕大模型调用
    2、关系数据库由 sqlite 切换至 OceanBase
    3、向量数据库由 faiss 切换至 milvus
    4、模型数据隔离能力
    5、增加核心模块 adapter_query、adapter_insert | V0.2.0 | +| 20230531 | 1、线上环境上线,动态感知能力
    2、embedding 模型评测及选型
    3、增加预发环境及数据隔离能力
    4、增加原始 query 字段透出能力 | V0.2.1 | +| 20230607 | 1、优化关系数据库访问性能
    2、优化环境和模型隔离能力 | V0.2.2 | +| 20230630 | 1、在 modelCache 中增加大模型 embedding 层适配模块
    2、增加采纳率统计能力 | V0.2.3 | +| 20230730 | 1、增加缓存统计功能
    2、增加数据删除功能接口
    3、缓存一键清空能力上线
    4、多轮会话能力研发,支持 system 指令和多轮对话 | v0.3.0 | +| 20230830 | 1、增加异步处理能力,性能提升超 20%
    2、架构变更,解藕 embedding 推理和业务处理逻辑
    3、黑名单过滤功能 | V0.3.1 | diff --git a/content/en/docs/overview/b8.CodeFuseQuery.md b/docs/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery.en-US.md similarity index 92% rename from content/en/docs/overview/b8.CodeFuseQuery.md rename to docs/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery.en-US.md index 875dc25..119f9eb 100644 --- a/content/en/docs/overview/b8.CodeFuseQuery.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery.en-US.md @@ -1,25 +1,37 @@ --- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + index: true + order: -1 title: CodeFuse-Query -slug: CodeFuse-Query -description: 介绍主要功能 -aliases: -- "/docs/codefuse-query" +order: -1 +toc: content --- ## CodeFuse-Query -With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. For example, a security team might need sophisticated algorithms like context-sensitive taint analysis to review smaller codebases, while project managers might need a lighter algorithm, such as one that calculates cyclomatic complexity, to measure developer productivity on larger codebases. + +With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. For example, a security team might need sophisticated algorithms like context-sensitive taint analysis to review smaller codebases, while project managers might need a lighter algorithm, such as one that calculates cyclomatic complexity, to measure developer productivity on larger codebases. These diversified needs, coupled with the common computational resource constraints in large organizations, pose a significant challenge. Traditional tools, with their problem-specific computation methods, often fail to scale in such environments. This is why we introduced CodeQuery, a centralized data platform specifically designed for large-scale static analysis. In implementing CodeQuery, we treat source code and analysis results as data, and the execution process as big data processing, a significant departure from traditional tool-centric approaches. We leverage common systems in large organizations, such as data warehouses, data computation facilities like MaxCompute and Hive, OSS object storage, and flexible computing resources like Kubernetes, allowing CodeQuery to integrate seamlessly into these systems. This approach makes CodeQuery highly maintainable and scalable, capable of supporting diverse needs and effectively addressing changing demands. Furthermore, CodeQuery's open architecture encourages interoperability between various internal systems, facilitating seamless interaction and data exchange. This level of integration and interaction not only increases the degree of automation within the organization but also improves efficiency and reduces the likelihood of manual errors. By breaking down information silos and fostering a more interconnected, automated environment, CodeQuery significantly enhances the overall productivity and efficiency of the software development process. -Moreover, CodeQuery's data-centric approach offers unique advantages when addressing domain-specific challenges in static source code analysis. For instance, source code is typically a highly structured and interconnected dataset, with strong informational and relational ties to other code and configuration files. By treating code as data, CodeQuery can adeptly handle these issues, making it especially suitable for use in large organizations where codebases evolve continuously but incrementally, with most code undergoing minor changes daily while remaining stable. CodeQuery also supports use cases like code-data based Business Intelligence (BI), generating reports and dashboards to aid in monitoring and decision-making processes. Additionally, CodeQuery plays an important role in analyzing training data for large language models (LLMs), providing deep insights to enhance the overall effectiveness of these models. +Moreover, CodeQuery's data-centric approach offers unique advantages when addressing domain-specific challenges in static source code analysis. For instance, source code is typically a highly structured and interconnected dataset, with strong informational and relational ties to other code and configuration files. By treating code as data, CodeQuery can adeptly handle these issues, making it especially suitable for use in large organizations where codebases evolve continuously but incrementally, with most code undergoing minor changes daily while remaining stable. CodeQuery also supports use cases like code-data based Business Intelligence (BI), generating reports and dashboards to aid in monitoring and decision-making processes. Additionally, CodeQuery plays an important role in analyzing training data for large language models (LLMs), providing deep insights to enhance the overall effectiveness of these models. -In the current field of static analysis, CodeQuery introduces a new paradigm. It not only meets the needs of analyzing large, complex codebases but is also adaptable to the ever-changing and diversified scenarios of static analysis. CodeQuery's data-centric approach gives it a unique advantage in dealing with code analysis issues in big data environments. Designed to address static analysis problems in large-scale software development settings, it views both source code and analysis results as data, allowing it to integrate flexibly into various systems within large organizations. This approach not only enables efficient handling of large codebases but can also accommodate various complex analysis needs, thereby making static analysis work more effective and accurate. +In the current field of static analysis, CodeQuery introduces a new paradigm. It not only meets the needs of analyzing large, complex codebases but is also adaptable to the ever-changing and diversified scenarios of static analysis. CodeQuery's data-centric approach gives it a unique advantage in dealing with code analysis issues in big data environments. Designed to address static analysis problems in large-scale software development settings, it views both source code and analysis results as data, allowing it to integrate flexibly into various systems within large organizations. This approach not only enables efficient handling of large codebases but can also accommodate various complex analysis needs, thereby making static analysis work more effective and accurate. The characteristics and advantages of CodeQuery can be summarized as follows: -- **Highly Scalable**: CodeQuery can handle large codebases and adapt to different analysis needs. This high level of scalability makes CodeQuery particularly valuable in large organizations. -- **Data-Centric**: By treating source code and analysis results as data, CodeQuery's data-centric approach gives it a distinct edge in addressing code analysis problems in big data environments. -- **Highly Integrated**: CodeQuery can integrate seamlessly into various systems within large organizations, including data warehouses, data computation facilities, object storage, and flexible computing resources. This high level of integration makes the use of CodeQuery in large organizations more convenient and efficient. -- **Supports Diverse Needs**: CodeQuery can process large codebases and accommodate various complex analysis needs, including QoS analysis, cross-language analysis, algorithmic needs, and performance requirements. +- **Highly Scalable**: CodeQuery can handle large codebases and adapt to different analysis needs. This high level of scalability makes CodeQuery particularly valuable in large organizations. +- **Data-Centric**: By treating source code and analysis results as data, CodeQuery's data-centric approach gives it a distinct edge in addressing code analysis problems in big data environments. +- **Highly Integrated**: CodeQuery can integrate seamlessly into various systems within large organizations, including data warehouses, data computation facilities, object storage, and flexible computing resources. This high level of integration makes the use of CodeQuery in large organizations more convenient and efficient. +- **Supports Diverse Needs**: CodeQuery can process large codebases and accommodate various complex analysis needs, including QoS analysis, cross-language analysis, algorithmic needs, and performance requirements. -CodeQuery is a powerful static code analysis platform, suitable for large-scale, complex codebase analysis scenarios. Its data-centric approach and high scalability give it a unique advantage in the modern software development environment. As static code analysis technology continues to evolve, CodeQuery is expected to play an increasingly important role in this field. \ No newline at end of file +CodeQuery is a powerful static code analysis platform, suitable for large-scale, complex codebase analysis scenarios. Its data-centric approach and high scalability give it a unique advantage in the modern software development environment. As static code analysis technology continues to evolve, CodeQuery is expected to play an increasingly important role in this field. diff --git a/content/zh/docs/overview/b8.CodeFuseQuery.md b/docs/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery.zh-CN.md similarity index 95% rename from content/zh/docs/overview/b8.CodeFuseQuery.md rename to docs/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery.zh-CN.md index 548cda2..05328e1 100644 --- a/content/zh/docs/overview/b8.CodeFuseQuery.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/CodeFuseQuery.zh-CN.md @@ -1,12 +1,24 @@ --- +nav: + title: 文档 + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + index: true + order: -1 title: CodeFuse-Query -slug: CodeFuse-Query-zh -description: 介绍主要功能 -aliases: -- "/docs/codefuse-query-zh" +order: -1 +toc: content --- ## CodeFuse-Query + 随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。 这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 diff --git a/LICENSE b/docs/docs/developer-docs/CodeFuse-Query/main/LICENSE.md similarity index 98% rename from LICENSE rename to docs/docs/developer-docs/CodeFuse-Query/main/LICENSE.md index 261eeb9..1d4edb4 100644 --- a/LICENSE +++ b/docs/docs/developer-docs/CodeFuse-Query/main/LICENSE.md @@ -1,3 +1,16 @@ +--- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 +title: 条款和条件 +order: 5 +toc: content +--- + +``` Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,3 +212,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +``` diff --git a/content/en/docs/codefuse-query/4_godelscript_language.en.md b/docs/docs/developer-docs/CodeFuse-Query/main/godelscript_language.en-US.md similarity index 88% rename from content/en/docs/codefuse-query/4_godelscript_language.en.md rename to docs/docs/developer-docs/CodeFuse-Query/main/godelscript_language.en-US.md index daf7e22..3dcd60e 100644 --- a/content/en/docs/codefuse-query/4_godelscript_language.en.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/godelscript_language.en-US.md @@ -1,13 +1,15 @@ --- -title: GodelLanguage -slug: GodelLanguage -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-godellanguage -aliases: -- "/docs/codefuse-query-godellanguage" +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 +title: GodelLanguage +order: 2 +toc: content --- - # GödelScript Query Language ## Index @@ -157,85 +159,85 @@ GödelScript includes basic types `int`, `string`, and `bool`. `bool` is a basic #### `int` Type Native Functions -| Function | Type | Explanation | -| --- | --- | --- | -| pow | (int, int) -> int | Exponentiation. Arguments must be non-negative numbers. | -| rem | (int, int) -> int | Remainder operation. | -| bitand | (int, int) -> int | Bitwise conjunction. | -| bitor | (int, int) -> int | Bitwise disjunction. | -| bitxor | (int, int) -> int | Bitwise exclusive disjunction. | -| bitnot | (int) -> int | Bitwise negation. | -| neg | (int) -> int | Arithmetic negation. | -| to_string | (int) -> string | Conversion to a string. | -| add | (int, int) -> int | Addition (+). | -| sub | (int, int) -> int | Subtraction (-). | -| mul | (int, int) -> int | Multiplication (*). | -| div | (int, int) -> int | Division (/). | -| eq | (int, int) -> bool | Equality (=). | -| ne | (int, int) -> bool | Inequality (!=). | -| gt | (int, int) -> bool | Greater than (>). | -| ge | (int, int) -> bool | Greater than or equal to (>=). | -| lt | (int, int) -> bool | Less than (<). | -| le | (int, int) -> bool | Less than or equal to (<=). | -| to_set | (int) -> *int | Cast to a set type. | +| Function | Type | Explanation | +| --------- | ------------------ | ------------------------------------------------------- | +| pow | (int, int) -> int | Exponentiation. Arguments must be non-negative numbers. | +| rem | (int, int) -> int | Remainder operation. | +| bitand | (int, int) -> int | Bitwise conjunction. | +| bitor | (int, int) -> int | Bitwise disjunction. | +| bitxor | (int, int) -> int | Bitwise exclusive disjunction. | +| bitnot | (int) -> int | Bitwise negation. | +| neg | (int) -> int | Arithmetic negation. | +| to_string | (int) -> string | Conversion to a string. | +| add | (int, int) -> int | Addition (+). | +| sub | (int, int) -> int | Subtraction (-). | +| mul | (int, int) -> int | Multiplication (\*). | +| div | (int, int) -> int | Division (/). | +| eq | (int, int) -> bool | Equality (=). | +| ne | (int, int) -> bool | Inequality (!=). | +| gt | (int, int) -> bool | Greater than (>). | +| ge | (int, int) -> bool | Greater than or equal to (>=). | +| lt | (int, int) -> bool | Less than (<). | +| le | (int, int) -> bool | Less than or equal to (<=). | +| to_set | (int) -> \*int | Cast to a set type. | #### `string` Type Native Functions -| Function | Type | Explanation | -| --- | --- | --- | -| len | (string) -> int | Gets the length of a string. | -| substr | (string, int, int) -> string | Substring extraction using initial index and length. | -| contains | (string, string) -> bool | Checks if one string is contained within the current string. | -| matches | (string, string) -> bool | Checks if a regular expression fully matches the current string. | -| get_regex_match_result | (string, string, int) -> string | Gets a capture result from a full regex match on the current string, determined by the second parameter (int). For example, "abcdef".get_regex_match_result("a(.*)f", 1) yields "bcde". | -| to_int | (string) -> int | Converts to an integer. | -| add | (string, string) -> string | String concatenation. | -| eq | (string, string) -> bool | Checks string equality. | -| ne | (string, string) -> bool | Checks string inequality. | -| to_set | (string) -> *string | Cast to a set type. | +| Function | Type | Explanation | +| ---------------------- | ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| len | (string) -> int | Gets the length of a string. | +| substr | (string, int, int) -> string | Substring extraction using initial index and length. | +| contains | (string, string) -> bool | Checks if one string is contained within the current string. | +| matches | (string, string) -> bool | Checks if a regular expression fully matches the current string. | +| get_regex_match_result | (string, string, int) -> string | Gets a capture result from a full regex match on the current string, determined by the second parameter (int). For example, "abcdef".get_regex_match_result("a(.\*)f", 1) yields "bcde". | +| to_int | (string) -> int | Converts to an integer. | +| add | (string, string) -> string | String concatenation. | +| eq | (string, string) -> bool | Checks string equality. | +| ne | (string, string) -> bool | Checks string inequality. | +| to_set | (string) -> \*string | Cast to a set type. | #### `bool` Type Native Functions While `bool` exists as a basic type, it cannot be used as data in intermediate calculations, only as a conditional result. -| Function | Type | Explanation | -| --- | --- | --- | -| not | (bool) -> bool | Logical negation. | -| and | (bool, bool) -> bool | Logical conjunction. | -| or | (bool, bool) -> bool | Logical disjunction. | -| eq | (bool, bool) -> bool | Equality. | -| ne | (bool, bool) -> bool | Inequality. | +| Function | Type | Explanation | +| -------- | -------------------- | -------------------- | +| not | (bool) -> bool | Logical negation. | +| and | (bool, bool) -> bool | Logical conjunction. | +| or | (bool, bool) -> bool | Logical disjunction. | +| eq | (bool, bool) -> bool | Equality. | +| ne | (bool, bool) -> bool | Inequality. | #### Native Functions for Sets -| Function | Type | Explanation | -| --- | --- | --- | -| len | (*T) -> int | Gets the count of a data set. | -| max | (*int) -> int | Finds the maximum value. | -| min | (*int) -> int | Finds the minimum value. | -| sum | (*int) -> int | Summation of the values. | -| find | (*T0) -> T1 | Finds a data entry from a set using a primary key. | +| Function | Type | Explanation | +| -------- | -------------- | -------------------------------------------------- | +| len | (\*T) -> int | Gets the count of a data set. | +| max | (\*int) -> int | Finds the maximum value. | +| min | (\*int) -> int | Finds the minimum value. | +| sum | (\*int) -> int | Summation of the values. | +| find | (\*T0) -> T1 | Finds a data entry from a set using a primary key. | #### Global Native Functions -| Function | Type | Explanation | -| --- | --- | --- | -| output | ((...) -> bool) -> | Outputs query content. | +| Function | Type | Explanation | +| -------- | ------------------------------ | ---------------------- | +| output | ((...) -> bool) -> | Outputs query content. | #### Database Native Functions -| Function | Type | Explanation | -| --- | --- | --- | -| load | (string) -> T | Loads the database. | +| Function | Type | Explanation | +| -------- | ------------- | ------------------- | +| load | (string) -> T | Loads the database. | #### Schema Native Functions -| Function | Type | Explanation | -| --- | --- | --- | -| to | (self) -> T | Converts to another schema type, using duck typing. | -| is | (self) -> bool | Determines if it can be another schema type, using duck typing. If the schema has a primary key, the underlying check will only use the primary key to determine compatibility. | -| key_eq | (self, T) -> bool | Checks if the primary keys of two schema instances are equal. | -| key_neq | (self, T) -> bool | Checks if the primary keys of two schema instances are **not** equal. | +| Function | Type | Explanation | +| ----------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| to<T> | (self) -> T | Converts to another schema type, using duck typing. | +| is<T> | (self) -> bool | Determines if it can be another schema type, using duck typing. If the schema has a primary key, the underlying check will only use the primary key to determine compatibility. | +| key_eq | (self, T) -> bool | Checks if the primary keys of two schema instances are equal. | +| key_neq | (self, T) -> bool | Checks if the primary keys of two schema instances are **not** equal. | Schema native function example: @@ -274,7 +276,7 @@ Query functions are recommended to have a `bool` return type and need to use `ou The query functions called within `output()` are no longer invoked in the conventional manner of passing arguments to functions. At this point, the parameter list changes to represent the table schema of the output table. Here are two examples of how query functions are applied: 1. Single-table `output` - + A single-table `output` specifically refers to using `output` only once within the `main` function to produce output. ```rust @@ -289,8 +291,8 @@ The query functions called within `output()` are no longer invoked in the conven ```json [ - {"a": 0, "b": "xxx"}, - {"a": 1, "b": "xxx"} + { "a": 0, "b": "xxx" }, + { "a": 1, "b": "xxx" } ] ``` @@ -312,14 +314,14 @@ The query functions called within `output()` are no longer invoked in the conven ```json { - "example0":[ - {"a": 0, "b": "xxx"}, - {"a": 1, "b": "xxx"} - ], - "example1":[ - {"a": "xxx", "b": 0}, - {"a": "xxx", "b": 1} - ] + "example0": [ + { "a": 0, "b": "xxx" }, + { "a": 1, "b": "xxx" } + ], + "example1": [ + { "a": "xxx", "b": 0 }, + { "a": "xxx", "b": 1 } + ] } ``` @@ -327,7 +329,7 @@ Below is a more detailed example where we directly construct two sets of data fo 1. In GödelScript, boolean values can be represented with the keywords `true` and `false`. -2. The `=` symbol in GödelScript is quite special and should not be interpreted in the same way as in conventional programming languages. GödelScript is a Datalog language. Here, the `=` symbol carries dual semantics: both __assignment__ and __equality comparison__. Details can be found in [`=` operator](#assignment-and-equality-comparison-operator). +2. The `=` symbol in GödelScript is quite special and should not be interpreted in the same way as in conventional programming languages. GödelScript is a Datalog language. Here, the `=` symbol carries dual semantics: both **assignment** and **equality comparison**. Details can be found in [`=` operator](#assignment-and-equality-comparison-operator). 3. In the conditional statements of this example, both `a` and `b` use the assignment semantics of `=`, because the `int` and `string` type parameters are considered `ungrounded (unassigned/unbound)` within the function body and must be assigned before they can be used. @@ -355,8 +357,8 @@ The expected output should be: ```json [ - {"a": 1, "b": "1"}, - {"a": 2, "b": "2"} + { "a": 1, "b": "1" }, + { "a": 2, "b": "2" } ] ``` @@ -576,6 +578,7 @@ impl File { ... } ``` + ##### Static Methods Static methods do not require `self` as the first argument and are straightforward to use: `ClassName::MethodName(...)`. @@ -1098,12 +1101,12 @@ fn class_method(className: string, methodName: string, methodSignature: string) GödelScript will determine symbols that are not bound to a set as `ungrounded`. The basic rule of judgment is: - Uninitialized/unusued/unbound symbols - - Unbound `int`, `string` arguments - - Unused database type arguments - - Function body has statements, but no return statements + - Unbound `int`, `string` arguments + - Unused database type arguments + - Function body has statements, but no return statements - Symbols bound within negation blocks - - For example, `!(__tmp = 1)`, `__tmp` is considered unbound - - Calling inline functions or data constructors in negation blocks + - For example, `!(__tmp = 1)`, `__tmp` is considered unbound + - Calling inline functions or data constructors in negation blocks #### 1. Unused Database/Basic Type Parameters @@ -1277,7 +1280,7 @@ fn class_hierarchy(className : string, superClassName : string) -> bool { } } -fn main() { +fn main() { output(class_hierarchy()) } ``` @@ -1302,7 +1305,7 @@ fn methods(className : string, methodName : string) -> bool { } } -fn main() { +fn main() { output(methods()) } ``` @@ -1457,10 +1460,10 @@ impl PublicVisitedElement { pub fn getName(self) -> string { let (tmp = Class(__all_data__).find(self)) { - return tmp.getQualifiedName() + return tmp.getQualifiedName() } let (tmp = Function(__all_data__).find(self)) { - return tmp.getQualifiedName() + return tmp.getQualifiedName() } } } @@ -1502,7 +1505,7 @@ fn output_result( eline: int, isCommented: int) -> bool { for (e in PublicVisitedElement(default_db())) { - if (type = e.getType() && + if (type = e.getType() && name = e.getName() && filePath = e.getLocation().getFile().getRelativePath() && sline = e.getLocation().getStartLineNumber() && @@ -2292,4 +2295,4 @@ This way, the execution time is reduced from 35 seconds back to 3 seconds, meeti ## Using Query Scripts Locally -For instructions on using query scripts on your machine, see [Installation, Configuration, and Running](./3_install_and_run.md). \ No newline at end of file +For instructions on using query scripts on your machine, see [Installation, Configuration, and Running](./install_and_run.en-US.md). diff --git a/content/zh/docs/codefuse-query/4_godelscript_language.md b/docs/docs/developer-docs/CodeFuse-Query/main/godelscript_language.zh-CN.md similarity index 85% rename from content/zh/docs/codefuse-query/4_godelscript_language.md rename to docs/docs/developer-docs/CodeFuse-Query/main/godelscript_language.zh-CN.md index 06522ff..fcfbd6d 100644 --- a/content/zh/docs/codefuse-query/4_godelscript_language.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/godelscript_language.zh-CN.md @@ -1,13 +1,15 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 title: 查询语言介绍 -slug: 查询语言介绍 -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-godellanguage-zh -aliases: -- "/docs/codefuse-query-godellanguage-zh" +order: 2 +toc: content --- - # GödelScript 查询语言 ## 目录 @@ -157,85 +159,85 @@ GödelScript 包含基础类型`int` `string`,`bool`属于基础类型,但 #### `int`类型 native 函数 -| 函数 | 类型 | 解释 | -| --- | --- | --- | -| pow | (int, int) -> int | 乘方。参数只能非负数。 | -| rem | (int, int) -> int | 取余。 | -| bitand | (int, int) -> int | 按位与。 | -| bitor | (int, int) -> int | 按位或。 | -| bitxor | (int, int) -> int | 按位异或。 | -| bitnot | (int) -> int | 按位非。 | -| neg | (int) -> int | 算术取反。 | -| to_string | (int) -> string | 转换为字符串。 | -| add | (int, int) -> int | + | -| sub | (int, int) -> int | - | -| mul | (int, int) -> int | * | -| div | (int, int) -> int | / | -| eq | (int, int) -> bool | = | -| ne | (int, int) -> bool | != | -| gt | (int, int) -> bool | > | -| ge | (int, int) -> bool | >= | -| lt | (int, int) -> bool | < | -| le | (int, int) -> bool | <= | -| to_set | (int) -> *int | 转为集合类型。 | +| 函数 | 类型 | 解释 | +| --------- | ------------------ | ---------------------- | +| pow | (int, int) -> int | 乘方。参数只能非负数。 | +| rem | (int, int) -> int | 取余。 | +| bitand | (int, int) -> int | 按位与。 | +| bitor | (int, int) -> int | 按位或。 | +| bitxor | (int, int) -> int | 按位异或。 | +| bitnot | (int) -> int | 按位非。 | +| neg | (int) -> int | 算术取反。 | +| to_string | (int) -> string | 转换为字符串。 | +| add | (int, int) -> int | + | +| sub | (int, int) -> int | - | +| mul | (int, int) -> int | \* | +| div | (int, int) -> int | / | +| eq | (int, int) -> bool | = | +| ne | (int, int) -> bool | != | +| gt | (int, int) -> bool | > | +| ge | (int, int) -> bool | >= | +| lt | (int, int) -> bool | < | +| le | (int, int) -> bool | <= | +| to_set | (int) -> \*int | 转为集合类型。 | #### `string`类型 native 函数 -| 函数 | 类型 | 解释 | -| --- | --- | --- | -| len | (string) -> int | 获取字符串长度。 | -| substr | (string, int, int) -> string | 通过初始index和length来截取字符串。 | -| contains | (string, string) -> bool | 判断一个字符串是否被包含在当前字符串中。 | -| matches | (string, string) -> bool | 判断正则字符串是否完全匹配当前字符串。 | -| get_regex_match_result | (string, string, int) -> string | 获取被正则字符串完全匹配当前字符串时的某一个捕获结果,该结果由第二个参数(int)确定。如 "abcdef".get_regex_match_result("a(.*)f", 1) 的结果是 "bcde"。 | -| to_int | (string) -> int | 转换为整数。 | -| add | (string, string) -> string | 字符串拼接。 | -| eq | (string, string) -> bool | 判断字符串相等。 | -| ne | (string, string) -> bool | 判断字符串不相等。 | -| to_set | (string) -> *string | 转为集合类型。 | +| 函数 | 类型 | 解释 | +| ---------------------- | ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| len | (string) -> int | 获取字符串长度。 | +| substr | (string, int, int) -> string | 通过初始 index 和 length 来截取字符串。 | +| contains | (string, string) -> bool | 判断一个字符串是否被包含在当前字符串中。 | +| matches | (string, string) -> bool | 判断正则字符串是否完全匹配当前字符串。 | +| get_regex_match_result | (string, string, int) -> string | 获取被正则字符串完全匹配当前字符串时的某一个捕获结果,该结果由第二个参数(int)确定。如 "abcdef".get_regex_match_result("a(.\*)f", 1) 的结果是 "bcde"。 | +| to_int | (string) -> int | 转换为整数。 | +| add | (string, string) -> string | 字符串拼接。 | +| eq | (string, string) -> bool | 判断字符串相等。 | +| ne | (string, string) -> bool | 判断字符串不相等。 | +| to_set | (string) -> \*string | 转为集合类型。 | #### `bool`类型 native 函数 `bool`虽然作为基础类型存在,但是该类型不能作为数据参与中间计算,只能作为条件结果。 -| 函数 | 类型 | 解释 | -| --- | --- | --- | -| not | (bool) -> bool | 条件取反。 | -| and | (bool, bool) -> bool | 条件与。 | -| or | (bool, bool) -> bool | 条件或。 | -| eq | (bool, bool) -> bool | 相等。 | -| ne | (bool, bool) -> bool | 不相等。 | +| 函数 | 类型 | 解释 | +| ---- | -------------------- | ---------- | +| not | (bool) -> bool | 条件取反。 | +| and | (bool, bool) -> bool | 条件与。 | +| or | (bool, bool) -> bool | 条件或。 | +| eq | (bool, bool) -> bool | 相等。 | +| ne | (bool, bool) -> bool | 不相等。 | #### 作用于集合的 native 函数 -| 函数 | 类型 | 解释 | -| --- | --- | --- | -| len | (*T) -> int | 获取数据集合的数量。 | -| max | (*int) -> int | 查找最大值。 | -| min | (*int) -> int | 查找最小值。 | -| sum | (*int) -> int | 求和。 | -| find | (*T0) -> T1 | 从一个集合中,通过主键查找数据。 | +| 函数 | 类型 | 解释 | +| ---- | -------------- | -------------------------------- | +| len | (\*T) -> int | 获取数据集合的数量。 | +| max | (\*int) -> int | 查找最大值。 | +| min | (\*int) -> int | 查找最小值。 | +| sum | (\*int) -> int | 求和。 | +| find | (\*T0) -> T1 | 从一个集合中,通过主键查找数据。 | #### 全局 native 函数 -| 函数 | 类型 | 解释 | -| --- | --- | --- | +| 函数 | 类型 | 解释 | +| ------ | ------------------------------ | ----------------- | | output | ((...) -> bool) -> | 输出 query 内容。 | #### database 的 native 函数 -| 函数 | 类型 | 解释 | -| --- | --- | --- | +| 函数 | 类型 | 解释 | +| ---- | ------------- | ---------------- | | load | (string) -> T | 加载 database 。 | #### schema 的 native 函数 -| 函数 | 类型 | 解释 | -| --- | --- | --- | -| to | (self) -> T | 转换到其他类型的 schema,采用 duck type 检测。 | -| is | (self) -> bool | 判断是否可以是其他类型的 schema,采用 duck type 检测。如果自身 schema 有主键,则底层只会通过主键判断是否可以是其他类型。 | -| key_eq | (self, T) -> bool | 检查两个 schema 实例的主键是否相等。 | -| key_neq | (self, T) -> bool | 检查两个 schema 实例的主键是否不等。 | +| 函数 | 类型 | 解释 | +| ----------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------ | +| to<T> | (self) -> T | 转换到其他类型的 schema,采用 duck type 检测。 | +| is<T> | (self) -> bool | 判断是否可以是其他类型的 schema,采用 duck type 检测。如果自身 schema 有主键,则底层只会通过主键判断是否可以是其他类型。 | +| key_eq | (self, T) -> bool | 检查两个 schema 实例的主键是否相等。 | +| key_neq | (self, T) -> bool | 检查两个 schema 实例的主键是否不等。 | schema native 函数实例: @@ -275,59 +277,59 @@ fn convert() -> *ElementParent { 1. 单表`output` - 单表`output`特指在`main`函数中,只使用一次`output`来输出。 + 单表`output`特指在`main`函数中,只使用一次`output`来输出。 - ```rust - fn example(a: int, b: string) -> bool {...} + ```rust + fn example(a: int, b: string) -> bool {...} - fn main() { - output(example()) // 此时参数列表变为输出表结构,不需要传参 - } - ``` + fn main() { + output(example()) // 此时参数列表变为输出表结构,不需要传参 + } + ``` - 对应的输出表结构为: + 对应的输出表结构为: - ```json - [ - {"a": 0, "b": "xxx"}, - {"a": 1, "b": "xxx"} - ] - ``` + ```json + [ + { "a": 0, "b": "xxx" }, + { "a": 1, "b": "xxx" } + ] + ``` 2. 多表`output` - 多表`output`是指在`main`函数中,使用多次`output`来输出。在这种情况下,输出数据会附带对应的表名。 - - ```rust - fn example0(a: int, b: string) -> bool {...} - fn example1(a: string, b: int) -> bool {...} - - fn main() { - output(example0()) - output(example1()) - } - ``` - - 对应的输出表结构为: - - ```json - { - "example0":[ - {"a": 0, "b": "xxx"}, - {"a": 1, "b": "xxx"} - ], - "example1":[ - {"a": "xxx", "b": 0}, - {"a": "xxx", "b": 1} - ] - } - ``` + 多表`output`是指在`main`函数中,使用多次`output`来输出。在这种情况下,输出数据会附带对应的表名。 + + ```rust + fn example0(a: int, b: string) -> bool {...} + fn example1(a: string, b: int) -> bool {...} + + fn main() { + output(example0()) + output(example1()) + } + ``` + + 对应的输出表结构为: + + ```json + { + "example0": [ + { "a": 0, "b": "xxx" }, + { "a": 1, "b": "xxx" } + ], + "example1": [ + { "a": "xxx", "b": 0 }, + { "a": "xxx", "b": 1 } + ] + } + ``` 下面是一个比较详细的例子,在这个例子中,我们直接构造了两组数据并输出。在下列代码中,需要注意的是: 1. GödelScript 中,布尔值可以使用`true`和`false`关键字。 -2. `=`符号在 GödelScript 中是比较特殊的符号,不能用常规的编程语言的思路来理解。GödelScript 是一种 Datalog 语言。在这里,`=`符号同时具备两种语义,一个是 __赋值__ 一个是 __判等__。详情可看[`=`运算符](#赋值和判等运算符)。 +2. `=`符号在 GödelScript 中是比较特殊的符号,不能用常规的编程语言的思路来理解。GödelScript 是一种 Datalog 语言。在这里,`=`符号同时具备两种语义,一个是 **赋值** 一个是 **判等**。详情可看[`=`运算符](#赋值和判等运算符)。 3. 在这个例子的条件语句中,`a`和`b`均使用了`=`的赋值语义,因为`int`和`string`类型参数在函数体中被认为是`ungrounded(未赋值/未绑定)`,必须要被赋值才能使用。 @@ -355,8 +357,8 @@ fn main() { ```json [ - {"a": 1, "b": "1"}, - {"a": 2, "b": "2"} + { "a": 1, "b": "1" }, + { "a": 2, "b": "2" } ] ``` @@ -452,60 +454,60 @@ if (f.getName().contains("util") || f.getName().contains("com")) { 1. 赋值 - 赋值一般出现在`int` `string`这类基础类型的变量参数上,这类变量作为函数的参数出现时,一般被认为是未赋值的。而具有这类变量的函数被调用时,传入的参数,实际上是作为筛选条件存在。 - - ```rust - fn example(a: int) -> bool { - // 这里比较反直觉,在过程式语言中,这里通常会被认为是判断 a == 1 - // 但是在 datalog 方言中,datalog 的每个函数实际上都是在算一个中间表 (view) - // 所以这个函数本质上是生成了一个 view,数据为 [{"a": 1}] - return a = 1 // assign a = 1 - } - - fn test() -> bool { - // 这里看似是在通过传参让 a = 2,实际上并不是 - // example() 自己会返回 view: [{"a": 1}] - // 然后通过 a = 2 来约束结果,可以看到,我们这里没有拿到任何结果 - // 所以返回了 false - return example(2) // false - } - ``` + 赋值一般出现在`int` `string`这类基础类型的变量参数上,这类变量作为函数的参数出现时,一般被认为是未赋值的。而具有这类变量的函数被调用时,传入的参数,实际上是作为筛选条件存在。 + + ```rust + fn example(a: int) -> bool { + // 这里比较反直觉,在过程式语言中,这里通常会被认为是判断 a == 1 + // 但是在 datalog 方言中,datalog 的每个函数实际上都是在算一个中间表 (view) + // 所以这个函数本质上是生成了一个 view,数据为 [{"a": 1}] + return a = 1 // assign a = 1 + } + + fn test() -> bool { + // 这里看似是在通过传参让 a = 2,实际上并不是 + // example() 自己会返回 view: [{"a": 1}] + // 然后通过 a = 2 来约束结果,可以看到,我们这里没有拿到任何结果 + // 所以返回了 false + return example(2) // false + } + ``` 2. 判等 - 对于 schema 类型来说,任何一个 schema 背后都有一个全集,所以参数列表中的 schema 类型一般被认为是已经被赋值的。对于已经赋值的变量来说,`=`就是判等操作。 - - ```rust - // 声明 schema - schema A {...} - - // 实现 schema 的成员函数 - impl A { - // 这里定义了 schema A 的全集 - @data_constraint - pub fn __all__() -> *A {...} - } - - fn example(a: A) -> bool { - for(temp in A::__all__()) { - if (a = temp) { - return true - } - } - } - ``` - - 同样,对于中间声明的有初始值的`int`或者`string`,`=`也是判等操作。 - - ```rust - fn example() -> bool { - let (a = 1) { // assign a = 1 - if (a = 1) { // compare a = 1 - return true - } - } - } - ``` + 对于 schema 类型来说,任何一个 schema 背后都有一个全集,所以参数列表中的 schema 类型一般被认为是已经被赋值的。对于已经赋值的变量来说,`=`就是判等操作。 + + ```rust + // 声明 schema + schema A {...} + + // 实现 schema 的成员函数 + impl A { + // 这里定义了 schema A 的全集 + @data_constraint + pub fn __all__() -> *A {...} + } + + fn example(a: A) -> bool { + for(temp in A::__all__()) { + if (a = temp) { + return true + } + } + } + ``` + + 同样,对于中间声明的有初始值的`int`或者`string`,`=`也是判等操作。 + + ```rust + fn example() -> bool { + let (a = 1) { // assign a = 1 + if (a = 1) { // compare a = 1 + return true + } + } + } + ``` #### match 语句 @@ -576,6 +578,7 @@ impl File { ... } ``` + ##### 静态方法 静态方法不需要`self`作为第一个参数,使用方式很简单,`类名::方法名(...)`。 @@ -943,7 +946,7 @@ Error: "Location" is ambiguous, with multiple symbols "coref::java::Location, coref::xml::Location". ``` -与其他语言类似,GödelScript允许通过完整路径的方式直接指定一个符号,但是该符号必须被引入。 +与其他语言类似,GödelScript 允许通过完整路径的方式直接指定一个符号,但是该符号必须被引入。 ```rust use coref::java::Location @@ -1098,12 +1101,12 @@ fn class_method(className: string, methodName: string, methodSignature: string) GödelScript 会将未与数据绑定的符号判定为`ungrounded(未赋值/未绑定)`。基本判定规则为: - 未初始化的/未被使用的/未与集合绑定的符号 - - 未被绑定的`int` `string`参数 - - 未被使用的 database 类型的参数 - - 函数体有语句,但是没有任何返回语句 + - 未被绑定的`int` `string`参数 + - 未被使用的 database 类型的参数 + - 函数体有语句,但是没有任何返回语句 - 在取非运算块中进行绑定的符号 - - 例如 `!(__tmp = 1)`,`__tmp`会被认为是未绑定的 - - 在取非运算块中调用 inline 函数或数据构造函数 + - 例如 `!(__tmp = 1)`,`__tmp`会被认为是未绑定的 + - 在取非运算块中调用 inline 函数或数据构造函数 #### 1. 未使用的 database/基础类型参数 @@ -1277,7 +1280,7 @@ fn class_hierarchy(className : string, superClassName : string) -> bool { } } -fn main() { +fn main() { output(class_hierarchy()) } ``` @@ -1302,7 +1305,7 @@ fn methods(className : string, methodName : string) -> bool { } } -fn main() { +fn main() { output(methods()) } ``` @@ -1457,10 +1460,10 @@ impl PublicVisitedElement { pub fn getName(self) -> string { let (tmp = Class(__all_data__).find(self)) { - return tmp.getQualifiedName() + return tmp.getQualifiedName() } let (tmp = Function(__all_data__).find(self)) { - return tmp.getQualifiedName() + return tmp.getQualifiedName() } } } @@ -1502,7 +1505,7 @@ fn output_result( eline: int, isCommented: int) -> bool { for (e in PublicVisitedElement(default_db())) { - if (type = e.getType() && + if (type = e.getType() && name = e.getName() && filePath = e.getLocation().getFile().getRelativePath() && sline = e.getLocation().getStartLineNumber() && @@ -2292,4 +2295,4 @@ impl XmlElementBase { ## 在本机使用查询脚本流程 -参见[安装、配置、运行](./3_install_and_run.md) +参见[安装、配置、运行](./install_and_run.zh-CN.md) diff --git a/content/en/docs/codefuse-query/3_install_and_run.en.md b/docs/docs/developer-docs/CodeFuse-Query/main/install_and_run.en-US.md similarity index 88% rename from content/en/docs/codefuse-query/3_install_and_run.en.md rename to docs/docs/developer-docs/CodeFuse-Query/main/install_and_run.en-US.md index 006d3bd..f9bba3a 100644 --- a/content/en/docs/codefuse-query/3_install_and_run.en.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/install_and_run.en-US.md @@ -1,10 +1,13 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 title: QuickStart -slug: QuickStart -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-quickstart -aliases: -- "/docs/codefuse-query-quickstart" +order: 1 +toc: content --- # Installation, Configuration, and Running @@ -12,26 +15,25 @@ aliases: ## Hardware and Software Requirements - Hardware: 4C8G - - Environment Requirements: Java 1.8 and Python 3.8 or above runtime environments. Please ensure Java and Python executables are available. ## Sparrow Installation Steps and Guidance -- The CodeFuse-Query download package is a zip archive that contains tools, scripts, and various files specific to CodeFuse-Query. If you do not have a CodeFuse-Query license, downloading this archive indicates your agreement with the [CodeFuse-Query Terms and Conditions](../LICENSE). +- The CodeFuse-Query download package is a zip archive that contains tools, scripts, and various files specific to CodeFuse-Query. If you do not have a CodeFuse-Query license, downloading this archive indicates your agreement with the [CodeFuse-Query Terms and Conditions](./LICENSE). - CodeFuse-Query is currently only supported on Mac and Linux systems. The download links are: (currently, only a sample is given, the official download link will be provided after open-source release) - - Mac: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) - - Linux: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) + - Mac: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) + - Linux: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) - You should always use the CodeFuse-Query bundle to ensure version compatibility. ### Tips: - On Mac systems, directly downloading the package may prompt a verification for the developer. -![image.png](/images/codefuse-query/macos_cannot_open_godel.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*0_0lSbOt4vEAAAAAAAAAAAAADlHYAQ/original) - You can modify the verification in the security settings. -![image.png](/images/codefuse-query/security_allow_godel_run.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*NSZ4SaVbGDcAAAAAAAAAAAAADlHYAQ/original) - Click "Allow Anyway." @@ -48,11 +50,8 @@ xattr -d com.apple.quarantine path/to/file ## Configuring and Initializing the CodeFuse-Query Development Environment - Unzip using the command line or by simply clicking to unzip. - - You need to have Java 8 and Python 3.8 or higher runtime environments. - - After unzipping CodeFuse-Query, you can run the Sparrow process by running the executable in the following ways: - - By executing `/sparrow-cli/sparrow`, where `` is the folder where you extracted the CodeFuse-Query package. - By adding `/sparrow-cli` to your PATH, so you can directly run the executable `sparrow`. @@ -69,11 +68,12 @@ At this point, you can execute the `sparrow` command. - Write a Gödel script based on the code data to obtain the desired code data. -- For how to write Gödel scripts, refer to [GödelScript Query Language](./4_godelscript_language.md) +- For how to write Gödel scripts, refer to [GödelScript Query Language](./godelscript_language.en-US.md) ### Execution Example #### Data Extraction + ```java /sparrow-cli/sparrow database create -s -lang -o ``` @@ -90,7 +90,7 @@ At this point, you can execute the `sparrow` command. - Assuming you have the following Gödel script to get all Java method names from a specified repository: -- For specific Gödel script writing, refer to [GödelScript Query Language](./4_godelscript_language.md) +- For specific Gödel script writing, refer to [GödelScript Query Language](./godelscript_language.en-US.md) ```java // script @@ -119,6 +119,7 @@ fn main() { ``` #### Script Execution + ```java /sparrow-cli/sparrow query run -d -gdl -o ``` @@ -143,11 +144,11 @@ public class HelloWorld { String world = tmp.getWorld(); System.out.println(hello + " " + world); } - + public String getHello() { return "Hello"; } - + public String getWorld() { return "World"; } @@ -161,15 +162,14 @@ sparrow query run -d ./db/ -gdl example.gdl -o ./ ``` - `` is the directory where the given Java file is stored. - - example.gdl is the given Gödel script sample, saved in the current directory. - - After execution, you can find the example.json file in the current directory. The corresponding script output JSON file content is as follows: + ```java [{"name": "getHello"}, {"name": "getWorld"}, {"name": "main"}] -``` \ No newline at end of file +``` diff --git a/content/zh/docs/codefuse-query/3_install_and_run.md b/docs/docs/developer-docs/CodeFuse-Query/main/install_and_run.zh-CN.md similarity index 81% rename from content/zh/docs/codefuse-query/3_install_and_run.md rename to docs/docs/developer-docs/CodeFuse-Query/main/install_and_run.zh-CN.md index 962b5c4..021ae36 100644 --- a/content/zh/docs/codefuse-query/3_install_and_run.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/install_and_run.zh-CN.md @@ -1,15 +1,18 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 title: 快速开始 -slug: 快速开始 -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-quickstart-zh -aliases: -- "/docs/codefuse-query-quickstart-zh" +order: 1 +toc: content --- # 安装、配置、运行 -## 硬件和软件要求 +## 硬件和软件要求 - 硬件:4C8G @@ -17,25 +20,25 @@ aliases: ## Sparrow 安装步骤和指导 -- CodeFuse-Query 下载包是一个 zip 存档,其中包含工具、脚本和各种特定于 CodeFuse-Query 的文件。如果您没有 CodeFuse-Query 许可证,那么下载此存档即表示您同意 [CodeFuse-Query 条款和条件](../LICENSE)。 +- CodeFuse-Query 下载包是一个 zip 存档,其中包含工具、脚本和各种特定于 CodeFuse-Query 的文件。如果您没有 CodeFuse-Query 许可证,那么下载此存档即表示您同意 [CodeFuse-Query 条款和条件](./LICENSE)。 - 目前仅支持 mac,linux 系统下使用 CodeFuse-Query,下载地址为:(目前仅给出示例,开源后给出正式下载地址) - - mac: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) - - linux: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) + - mac: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) + - linux: [CodeFuse-Query 2.0.0](https://github.com/codefuse-ai/CodeFuse-Query/releases/tag/2.0.0) - 您应该始终使用 CodeFuse-Query 捆绑包,确保版本兼容性 ### Tips: -- mac系统下直接下载软件包会提示需要验证开发者 +- mac 系统下直接下载软件包会提示需要验证开发者 -![image.png](/images/codefuse-query/macos_cannot_open_godel.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*0_0lSbOt4vEAAAAAAAAAAAAADlHYAQ/original) - 可在安全性设置中进行修改验证 -![image.png](/images/codefuse-query/security_allow_godel_run.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*NSZ4SaVbGDcAAAAAAAAAAAAADlHYAQ/original) - 点击仍然允许 -- 详细步骤可参照:[Mac 官方文档: 如何在 Mac 上安全地打开 App](https://support.apple.com/zh-cn/HT202491) +- 详细步骤可参照:[Mac 官方文档: 如何在 Mac 上安全地打开 App](https://support.apple.com/zh-cn/HT202491) - 或使用`xattr -d com.apple.quarantine`命令,删除 CodeFuse-Query 被 macOS 赋予的外部属性 @@ -53,7 +56,7 @@ xattr -d com.apple.quarantine path/to/file - CodeFuse-Query 解压后,您可以通过以下几种方式运行可执行文件来运行 sparrow 进程: -- 通过执行 `/sparrow-cli/sparrow`,其中 `` 是提取CodeFuse-Query包的文件夹。 +- 通过执行 `/sparrow-cli/sparrow`,其中 `` 是提取 CodeFuse-Query 包的文件夹。 - 通过添加 `/sparrow-cli` 到您的 PATH,以便您可以直接运行可执行文件 sparrow。 @@ -69,11 +72,12 @@ xattr -d com.apple.quarantine path/to/file - 基于代码数据编写 godel 脚本,获取自己想要的代码数据 -- godel 脚本如何编写参照 [GödelScript 查询语言](./4_godelscript_language.md) +- godel 脚本如何编写参照 [GödelScript 查询语言](./godelscript_language.zh-CN.md) ### 执行样例 #### 数据抽取 + ```java /sparrow-cli/sparrow database create -s -lang -o ``` @@ -86,11 +90,11 @@ xattr -d com.apple.quarantine path/to/file - 在数据抽取步骤,获得脚本执行需要的数据库 `` -#### 编写godel脚本 +#### 编写 godel 脚本 - 假设具备如下 godel 脚本, 获取指定仓库的所有 java 方法名 -- godel 脚本具体编写可参照 [GödelScript 查询语言](./4_godelscript_language.md) +- godel 脚本具体编写可参照 [GödelScript 查询语言](./godelscript_language.zh-CN.md) ```java // script @@ -119,6 +123,7 @@ fn main() { ``` #### 脚本执行 + ```java /sparrow-cli/sparrow query run -d -gdl -o ``` @@ -133,7 +138,7 @@ fn main() { #### 例子 -若存在以下java代码 +若存在以下 java 代码 ```java public class HelloWorld { @@ -167,6 +172,7 @@ sparrow query run -d ./db/ -gdl example.gdl -o ./ - 执行完毕后可在当前目录下找到 example.json 文件 对应的脚本输出 json 文件内容如下 + ```java [{"name": "getHello"}, {"name": "getWorld"}, diff --git a/content/en/docs/codefuse-query/2_introduction.en.md b/docs/docs/developer-docs/CodeFuse-Query/main/introduction.en-US.md similarity index 84% rename from content/en/docs/codefuse-query/2_introduction.en.md rename to docs/docs/developer-docs/CodeFuse-Query/main/introduction.en-US.md index a4475e3..0ceadaa 100644 --- a/content/en/docs/codefuse-query/2_introduction.en.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/introduction.en-US.md @@ -1,53 +1,69 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 title: Introduction -slug: Introduction -description: 介绍主要功能 -url: docs/codefuse-query-introduction -aliases: -- "/docs/codefuse-query-introduction" +order: 0 +toc: content --- # Introduction + CodeFuse-Query is a code data platform that supports structured analysis of various programming languages. The core idea is to transform all code into data using various language parsers and to store this data in a structured format within a code database. Data analysis is then performed according to business needs using a custom query language, as shown in the diagram below: -![image.png](/images/codefuse-query/introduction01.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*a-DPSbstzhAAAAAAAAAAAAAADlHYAQ/original) ## 2.1 Architecture of CodeFuse-Query + Overall, the CodeFuse-Query code data platform is divided into three main parts: the code data model, the code query DSL (Domain-Specific Language), and platform productization services. The main workflow is illustrated in the following diagram: -![image.png](/images/codefuse-query/introduction02.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*F2FURp24VVEAAAAAAAAAAAAADlHYAQ/original) ### Code Datafication and Standardization: COREF + We have defined a model for code datafication and standardization called COREF, which requires all code to be converted to this model through various language extractors. COREF mainly includes the following information: **COREF** = AST (Abstract Syntax Tree) + ASG (Abstract Semantic Graph) + CFG (Control Flow Graph) + PDG (Program Dependency Graph) + Call Graph + Class Hierarchy + Documentation (Documentation/Commentary Information) Note: As the computational complexity of each type of information varies, not all languages' COREF information includes all of the above. The basic information mainly includes AST, ASG, Call Graph, Class Hierarchy, and Documentation, while other information (CFG and PDG) is still under development and will be gradually supported. + ### Code Query DSL + Based on the generated COREF code data, CodeFuse-Query uses a custom DSL language called **Gödel** for querying, thereby fulfilling code analysis requirements. Gödel is a logic-based reasoning language, whose underlying implementation is based on the logical reasoning language Datalog. By describing "facts" and "rules," the program can continuously derive new facts. Gödel is also a declarative language, focusing more on describing "what is needed" and leaving the implementation to the computational engine. Since code has already been converted to relational data (COREF data stored in the form of relational tables), one might wonder why not use SQL directly, or use an SDK instead of learning a new DSL language. Because Datalog's computation is monotonic and terminating. Simply put, Datalog sacrifices expressiveness to achieve higher performance, and Gödel inherits this feature. - Compared to SDKs, Gödel's main advantage is its ease of learning and use. As a declarative language, users do not need to focus on intermediate computations and can simply describe their needs as they would with SQL. - Compared to SQL, Gödel's advantages are stronger descriptive capabilities and faster computation speed, for example, describing recursive algorithms and multi-table joint queries, which are difficult for SQL. + ### Platformization and Productization + CodeFuse-Query includes the **Sparrow CLI** and the online service **Query Centre**. Sparrow CLI contains all components and dependencies, such as extractors, data models, compilers, etc., and users can completely generate and query code data locally using Sparrow CLI (for how to use Sparrow CLI, please see Section 3: Installation, Configuration, Running). If users have online query needs, they can use the Query Centre to experiment. + ## 2.2 Languages Supported by CodeFuse-Query for Analysis + As of October 31, 2023, CodeFuse-Query supports data analysis for 11 programming languages. Among these, support for 5 languages (Java, JavaScript, TypeScript, XML, Go) is very mature, while support for the remaining 6 languages (Objective-C, C++, Python3, Swift, SQL, Properties) is in beta and has room for further improvement. The specific support status is shown in the table below: -| Language | Status | Number of Nodes in the COREF Model | -| ------------- | ------ | ---------------------------------- | -| Java | Mature | 162 | -| XML | Mature | 12 | -| TS/JS | Mature | 392 | -| Go | Mature | 40 | -| OC/C++ | Beta | 53/397 | -| Python3 | Beta | 93 | -| Swift | Beta | 248 | -| SQL | Beta | 750 | -| Properties | Beta | 9 | +| Language | Status | Number of Nodes in the COREF Model | +| ---------- | ------ | ---------------------------------- | +| Java | Mature | 162 | +| XML | Mature | 12 | +| TS/JS | Mature | 392 | +| Go | Mature | 40 | +| OC/C++ | Beta | 53/397 | +| Python3 | Beta | 93 | +| Swift | Beta | 248 | +| SQL | Beta | 750 | +| Properties | Beta | 9 | Note: The maturity level of the language status above is determined based on the types of information included in COREF and the actual implementation. Except for OC/C++, all languages support complete AST information and Documentation. For example, COREF for Java also supports ASG, Call Graph, Class Hierarchy, and some CFG information. + ## 2.3 Use Cases of CodeFuse-Query + ### Querying Code Features + A developer wants to know which String type variables are used in Repo A, so they write a Gödel script as follows and submit it to the CodeFuse-Query system for results. + ```rust // script use coref::java::* @@ -64,31 +80,46 @@ fn main() { output(out()) } ``` + Similar needs: Queries for classes, functions, variables, return values, call graphs, class inheritance, etc. ### Outputting Static Analysis Capabilities + A security team member sets up **a system** to cross-verify that log data and code data are consistent. To complete a certain analysis task, they plan to derive static data D1 through Gödel queries, merge with dynamic data D2, and combine analysis to reach conclusion C. After verifying the technical feasibility on CodeFuse-Query, they integrate the system using the standard API provided by CodeFuse-Query. Similar needs: Using static analysis as a system checkpoint, improving testing efficiency, merging the analyzed data into a documentation. + ### Code Rule Checker + A team lead finds that the team often introduces similar bugs, Bug A, **and decides to establish a code rule and its checker** to be applied during CodeReview. After writing an analysis query on the CodeFuse-Query platform and testing that it meets requirements, they codify the query as a code rule and roll it out to the CodeReview/CI phase. Since then, this bug has never occurred again. Similar needs: Writing static defect scanning rules to intercept code risks. + ### Analyzing Code Characteristics + A developer from the R&D department wants to know the current proportion of Spring and Spring Boot projects in the code repository to quantify the promotion of the new framework. By writing a Gödel Query to describe different project analysis features, they **queried 110,000 code repositories at once** and obtained all the code data after a few dozen minutes, happily moving on to their KPIs. Similar needs: Application profiling, code profiling, architectural analysis. + ### Getting Statistical Data + A researcher finds that traditional code complexity metrics struggle to accurately measure the complexity of the code. Inspired by international advanced experiences and a moment of insight, they design a set of complexity metrics and algorithms. After implementing it with Gödel and finding it already highly performant with little optimization, they quickly apply it to over 10 languages and more than 110,000 repositories. They now have an in-depth understanding of the overall complexity of the code repositories, unlike before when they had to parse the code and analyze the syntax tree themselves, **which is so much more convenient**. Similar needs: Code statistics, code metrics, algorithm design, academic research. + ### Architectural Analysis + An architect recently promoted a new message middleware based on txt files, and existing analysis platforms couldn't support analyzing dependencies in such systems. By quickly modeling the message format with Gödel, they soon obtain the dependency relationships between different components in the system. Similar needs: System overview, architecture governance, lineage analysis. + ### Model Validation + A developer designs a system that requires users to play games before claiming coupons. They describe **the model's validation logic** with Gödel, then use the CodeFuse-Query system to **ensure that both current and future system implementations** fully comply with the model. No longer worried about potential financial losses from the game! Similar needs: System verification, network validation, permission verification. + ## 2.4 Application Areas of CodeFuse-Query + Currently, CodeFuse-Query at Ant Group already supports **CodeFuse large language model data cleaning**, **code metrics evaluation**, **R&D risk control**, **privacy security analysis**, **code intelligence**, **terminal package size management**, and other scenarios with implemented applications, serving over a million monthly calls. -![image.png](/images/codefuse-query/introduction03.png) +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*2p6VToMNiPUAAAAAAAAAAAAADlHYAQ/original) ### High-Quality Code Data Cleaning - CodeFuse Large Code Model + The CodeFuse Large Code Model is a model by Ant Group for handling code-related issues and has been open-sourced. For the CodeFuse large language model, the quality of the training data directly affects the model's inference results. Low-quality code data can directly contaminate the language model's output, for example: the model might learn incorrect code patterns, generating erroneous code; if the data only contains code in a single programming language, the model might not adapt well to code in other languages. To control the quality of code data entering the model and thereby improve the model's inferencing capabilities, we have drawn upon the Ant Group program analysis team's years of practical experience coupled with industry consensus to clarify the definition of high-quality code. We have also implemented automated, large-scale code data cleaning using existing program analysis technologies. CodeFuse-Query provides the following data cleaning capabilities for the CodeFuse Large Code Model: @@ -96,23 +127,27 @@ CodeFuse-Query provides the following data cleaning capabilities for the CodeFus - High-quality code data cleaning: Clean code data, including vulnerability scanning for 7 languages (Python, Java, JavaScript, TypeScript, Go, C, C++), filtering by language type/star number, filtering out data with 0 valid lines of code, etc. We have currently accumulated about **2TB** of cleaned code data from GitHub and internally at Ant Group. - Code Profiling: Implements high-performance, multi-dimensional automatic tagging for large-scale code, supporting **10** languages (Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go), **77** common tags, **40** Ant-specific tags, totaling **117** tags. The current auto-tagging performance can reach **40MB/s**. - Other Atomic Abilities - - Advanced code feature extraction, including extraction of AST (Abstract Syntax Tree), DFG (Data Flow Graph), etc. The AST information has been used for SFT training with about 97% accuracy. - - Code snippet identification, used for extracting code from text data, convenient for formatting or adding Markdown: - - Text extraction of code: Extracting code block information from text, parsing main languages, function and class definitions, only verifying a binary problem, that is, verifying whether the text contains code blocks with about 83% accuracy. - - Identifying the programming language of a code snippet: Identifying the programming language of any code snippet, supporting 30+ languages, with about 80% accuracy. - - Code comment pair extraction: Supports extracting method-level comment-code pair information, covering **15** most popular languages on GitHub, used for Text To Code/Code To Text SFT training. + - Advanced code feature extraction, including extraction of AST (Abstract Syntax Tree), DFG (Data Flow Graph), etc. The AST information has been used for SFT training with about 97% accuracy. + - Code snippet identification, used for extracting code from text data, convenient for formatting or adding Markdown: + - Text extraction of code: Extracting code block information from text, parsing main languages, function and class definitions, only verifying a binary problem, that is, verifying whether the text contains code blocks with about 83% accuracy. + - Identifying the programming language of a code snippet: Identifying the programming language of any code snippet, supporting 30+ languages, with about 80% accuracy. + - Code comment pair extraction: Supports extracting method-level comment-code pair information, covering **15** most popular languages on GitHub, used for Text To Code/Code To Text SFT training. + ### Code Data Metrics - Guangmu + Guangmu is an internal product at Ant Group aimed at different R&D personnel and team managers, providing objective data and analysis results to assess code capabilities. Guangmu offers individual code capability assessment reports, daily code capability metric data analysis, team code capability management, and code excellence award displays, all aimed at helping Ant Group's R&D engineers continuously improve code quality, reduce code debt, and enhance R&D efficiency in the long run. CodeFuse-Query provides Guangmu with two types of capabilities: - Code Evaluation Metrics: Code complexity, code annotation rate, standard development volume, etc. - Code Excellence Metrics: Code reuse degree. + ### Change Analysis - Youku Server-Side R&D Efficiency + The Youku Quality Assurance team started exploring server-side precision testing in 2023. After six months of technical sedimentation and system building, they established a precision testing system capable of **change content identification, change impact analysis, testing capability recommendation, and test coverage assessment**. In this process, CodeFuse-Query can provide capabilities including: - Analyzing the impacted objects based on code change content (file + line number): methods, entry points (HTTP entry, HSF entry), call routes (all call routes from the entry to the changed method), database operations (tables, types of operations). - Enhancing the effectiveness and readiness of change analysis impact by combining the precise analysis capabilities of online dynamic call routes (method routes) and CodeFuse-Query static analysis call routes. -To date, Youku has integrated all core applications through CodeFuse-Query and based on static analysis data collection, has built a complete server-side code and traffic knowledge base. \ No newline at end of file +To date, Youku has integrated all core applications through CodeFuse-Query and based on static analysis data collection, has built a complete server-side code and traffic knowledge base. diff --git a/docs/docs/developer-docs/CodeFuse-Query/main/introduction.zh-CN.md b/docs/docs/developer-docs/CodeFuse-Query/main/introduction.zh-CN.md new file mode 100644 index 0000000..2b994e9 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-Query/main/introduction.zh-CN.md @@ -0,0 +1,154 @@ +--- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 +title: 基本介绍 +order: 0 +toc: content +--- + +# 概述 + +CodeFuse-Query 是一个支持对 **各种编程语言** 进行 **结构化分析** 的 **代码数据平台**。核心思想是利用各种语言解析器将所有代码转化为数据,并将其结构化存储到代码数据库中。通过使用自定义查询语言,按照业务需求进行数据分析。如下图所示: +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*a-DPSbstzhAAAAAAAAAAAAAADlHYAQ/original) + +## 2.1 CodeFuse-Query 的架构 + +从整体上来说,CodeFuse-Query 代码数据平台分为三大部分:代码数据模型、代码查询 DSL、平台产品化服务。主要工作流程如下图所示: + +### ![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*F2FURp24VVEAAAAAAAAAAAAADlHYAQ/original) + +### 代码数据化和标准化:COREF + +我们定义了一种代码数据化和标准化的模型:COREF,要求所有代码都要能通过各种语言抽取器转化到该模型。 +COREF 主要包含以下几种信息: +**COREF** = AST (抽象语法树) + ASG(抽象语义图) + CFG(控制流图) + PDG(程序依赖图)+ Call Graph(函数调用图) + Class Hierarchy (类继承关系)+ Documentation(文档/注释信息) +注:由于每种信息的计算难度不一,所以并不是所有语言的 COREF 信息均包含以上全部信息,基础信息主要有 AST、ASG、Call Graph、Class Hierarchy 和 Documentation,其他信息( CFG 和 PDG )仍在建设中,后续会逐步支持。 + +### 代码查询 DSL + +基于生成的 COREF 代码数据,CodeFuse-Query 使用一种自定义的 DSL 语言 **Gödel** 来进行查询,从而完成代码分析需求。 +Gödel 是一种逻辑推理语言,它的底层实现是基于逻辑推理语言 Datalog,通过描述“事实”和“规则”, 程序可以不断地推导出新的事实。Gödel 也是一个声明式语言,相较于命令式编程,声明式编程更加着重描述“要什么”,而把如何实现交给计算引擎。 +既然代码已经转化为关系型数据(COREF 数据以关系型数据表的形式存储),相信大家会有疑问,为什么不直接用 SQL,或者是直接使用 SDK,而是又要专门去学习一个新的 DSL 语言呢?因为 Datalog 的计算具备单调性和终止性,简单理解就是,Datalog 是在牺牲了表达能力的前提下获得了更高的性能,而 Gödel 继承了这个特点。 + +- 相比较 SDK,Gödel 的主要优点是易学易用,声明式的描述,用户不需要关注中间的运算过程,只需要像 SQL 一样简单描述清楚需求即可。 +- 相比较 SQL,Gödel 的优点主要是描述能力更强、计算速度更快,例如描述递归算法和多表联合查询,而这些对于 SQL 来说都是比较困难的。 + +### 平台化、产品化 + +CodeFuse-Query 包括**Sparrow CLI **和 CodeFuse-Query**在线服务 Query 中心**。Sparrow CLI 包含了所有组件和依赖,例如抽取器,数据模型,编译器等,用户完全可以通过使用 Sparrow CLI 在本地进行代码数据生成和查询(Sparrow CLI 的使用方式请见 第 3 节 安装、配置、运行)。如果用户有在线查询的需求,可以使用 Query 中心进行实验。 + +## 2.2 CodeFuse-Query 支持的分析语言 + +截至 2023-10-31 为止,CodeFuse-Query 支持对 11 种编程语言进行数据分析。其中对 5 种编程语言( Java、JavaScript、TypeScript、XML、Go )的支持度非常成熟,对剩余 6 种编程语言(Object-C、C++、Python3、Swift、SQL、Properties )的支持度处于 beta 阶段,还有进一步提升和完善的空间,具体的支持情况见下表: + +| 语言 | 状态 | COREF 模型节点数 | +| ---------- | ---- | ---------------- | +| Java | 成熟 | 162 | +| XML | 成熟 | 12 | +| TS/JS | 成熟 | 392 | +| Go | 成熟 | 40 | +| OC/C++ | beta | 53/397 | +| Python3 | beta | 93 | +| Swift | beta | 248 | +| SQL | beta | 750 | +| Properties | beta | 9 | + +注:以上语言状态的成熟程度判断标准是根据 COREF 包含的信息种类和实际落地情况来进行判定,除了 OC/C++外,所有语言均支持了完整的 AST 信息和 Documentation 信息,以 Java 为例,COREF for Java 还支持了 ASG、Call Graph、Class Hierarchy、以及部分 CFG 信息。 + +## 2.3 CodeFuse-Query 的使用场景 + +### 查询代码特征 + +小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Godel 如下,交给 CodeFuse-Query 系统给他返回了结果。 + +```rust +// script +use coref::java::* + +fn out(var: string) -> bool { + for(v in Variable(JavaDB::load("coref_java_src.db"))) { + if (v.getType().getName() = "String" && var = v.getName()) { + return true + } + } +} + +fn main() { + output(out()) +} +``` + +类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。 + +### 输出静态分析能力 + +小安全是 XX 团队的安全同学,他做了**一套系统**交叉验证日志数据和代码数据是否一致。为了完成某个分析任务,他计划通过写 Godel 查询出来静态数据 D1,合并动态数据 D2,联合分析得出结论 C。小安全通过在 CodeFuse-Query 上面编写 Godel Query 测试技术上可行之后,使用 CodeFuse-Query 提供的标准 API 将系统对接了起来。 +类似需求:通过静态分析进行系统的卡点,提高测试的效率,通过分析出来的数据合并成说明文档。 + +### 代码规则检查器 + +小 TL 同学发现团队总是写出很多类似的 Bug A,**他想针对 Bug A 制定一个代码规则和其检查器**,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 +类似需求:编写静态缺陷扫描规则进行代码风险拦截。 + +### 分析代码特性 + +研发部同学小框架想知道目前代码仓库中 Spring 工程和 Spring Boot 工程比例。 好量化新框架的推广情况。小架构通过编写 Godel Query 描述不同项目分析特征,**然后一次性 Query 了 11 万个代码仓库**,过了几十分钟后就拿到了所有代码的数据,开开心心做 KPI 去了。 +类似需求:应用画像,代码画像,架构分析。 + +### 获取统计数据 + +小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Godel 实现出来以后,**发现不怎么优化就已经性能非常高了**,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,**不知道方便了多少。** +类似需求:代码统计,代码度量,算法设计,学术研究。 + +### 架构分析 + +小架构同学最近推行了一种新的基于 txt 文件的消息中间件,目前已有的分析平台都不能支持分析此类系统的上下游依赖。小架构通过 Godel**快速建模了该消息格式**,并马上获取到了目前系统中不同组件的依赖关系。 +类似需求:系统 Overview,架构治理,血缘分析。 + +### 模型验证 + +小促销设计的系统里面要求用户一定是先玩游戏再领券。他通过 Godel 描述了**该模型的验证逻辑**,然后通过 CodeFuse-Query 系统**保障当前以及未来系统的代码实现**,都是完全符合该模型的。从此再不担心游戏出资损~ +类似需求:系统验证,网络验证,权限验证 + +## 2.4 CodeFuse-Query 的应用领域 + +目前,CodeFuse-Query 在蚂蚁集团已经支持 **CodeFuse 大语言模型数据清洗**、**代码度量评估**、**研发风险控制**、**隐私安全分析**、**代码智能**、**终端包大小治理 **等多个场景的落地应用,服务月均调用量超过百万。 +![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*2p6VToMNiPUAAAAAAAAAAAAADlHYAQ/original) + +### 高质量代码数据清洗 - CodeFuse 代码大模型 + +CodeFuse 代码大模型是蚂蚁集团对外开源的处理代码相关问题的模型,对于 CodeFuse 大语言模型而言,训练的数据质量直接影响模型的推理结果。低质量的代码数据会直接污染语言模型的输出,例如:模型可能会学习到错误的代码模式,从而生成错误的代码;数据中只包含某种编程语言的代码,模型可能无法很好地适应其他编程语言的代码。 +为了把控进入模型的代码数据质量,进而提升模型的推理能力。我们基于蚂蚁程序分析团队多年的实践积累结合业界共识,梳理了高质量代码的定义方式,并利用已有程序分析技术实现了自动化、大规模的代码数据清洗。 +CodeFuse-Query 为 CodeFuse 代码大模型提供了以下数据清洗能力: + +- 高质量代码数据清洗:对代码数据进行清洗,包括对 Python,Java,JavaScript,TypeScript,Go,C,C++ 7 种语言进行漏洞扫描,对语言种类 / star 数进行筛选,过滤有效代码行数为 0 的数据等。目前已沉淀清洗后的 GitHub 和蚂蚁内部代码数据总共约 **2TB**。 +- 代码画像:实现对大规模代码进行高性能多维度的自动标注,支持 Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go 等 **10** 种语言,**77** 种通用标签,**40** 种蚂蚁特有标签,共 **117** 种标签。目前自动标注性能能够达到 **40MB/s**。 +- 其他原子能力 + - 高级代码特征提取,包括提取 AST(抽象语法树),DFG(数据流图)数据等。目前 AST 信息已用于 SFT 训练,准确率 97% 左右。 + - 代码片段识别,用于针对文本数据中的代码进行提取,方便进行代码格式化或加上 Markdown 格式: + - 文本提取代码:从文本中提取代码块信息,支持主流语言的解析,函数及类定义,仅验证二分类问题,就是说仅验证文本是否含有代码块准确率 83% 左右。 + - 识别代码片段的编程语言种类:识别任意代码片段的编程语言种类,支持 30+ 种语言,准确率 80%左右。 + - 代码注释对提取:支持提取方法级别的注释-代码对信息,覆盖 **15 种** GitHub 最流行的语言,用于 Text To Code/Code To Text 的 SFT 训练。 + +### 代码数据指标 - 广目 + +广目是蚂蚁内部一款面向不同职能的研发同学和团队管理者,对代码力进行评估、展示客观数据和分析结果的数据产品。 +广目提供了个人代码力评估报告、日常代码力指标数据分析、团队代码力管理、代码评优荣誉展示等功能,旨在帮助蚂蚁研发工程师不断提升代码品质、减少代码负债,更长远的提升研发效能。 +CodeFuse-Query 为广目提供的能力分为两部分: + +- 代码评估指标:代码复杂度、代码注释率、标准开发量等 +- 代码评优指标:代码复用度 + +### 变更分析-优酷服务端研发效能 + +优酷质量保障团队从 2023 年开始针对服务端精准测试的探索,经过半年的技术沉淀和体系搭建,形成了具备**变更内容识别、变更影响分析、测试能力推荐、测试覆盖评估**的精准测试体系。 +在此过程中,CodeFuse-Query 能提供的能力主要有: + +- 根据代码变更内容(文件+行号),分析出影响的对象:方法、入口(http 入口、hsf 入口)、调用链路(从入口到变更方法的所有调用链路)、数据库操作(表、操作类型) +- 结合线上动态调用链路(方法链路)、CodeFuse-Query 静态分析调用链路的影响面精准分析能力,提升变更分析影响面的有效性、准备率 + +到目前为止,优酷已通过 CodeFuse-Query 接入所有核心应用,并基于静态分析采集数据,构建了服务端完整的代码知识库和流量知识库。 diff --git a/content/en/docs/codefuse-query/5_toolchain.en.md b/docs/docs/developer-docs/CodeFuse-Query/main/toolchain.en-US.md similarity index 94% rename from content/en/docs/codefuse-query/5_toolchain.en.md rename to docs/docs/developer-docs/CodeFuse-Query/main/toolchain.en-US.md index 803ee8d..9ab545b 100644 --- a/content/en/docs/codefuse-query/5_toolchain.en.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/toolchain.en-US.md @@ -1,22 +1,31 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + index: true + order: -1 title: Toolchain -slug: Toolchain -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-toolchain -aliases: -- "/docs/codefuse-query-toolchain" +order: -1 +toc: content --- - # Developing Plugins (VSCode) + ## Installation + ### Install from VSCode marketplace (Recommand) + [VSCode Extension](https://marketplace.visualstudio.com/items?itemName=CodeFuse-Query.codefuse-query-extension) + ### Install from local via VSIX pack + 1. Download the plugin. 2. Manually install from vsix: -![image.png](/images/codefuse-query/toolchain01.png) + ![image.png](/images/codefuse-query/toolchain01.png) 3. Or use the command directly from the terminal to install: + ```bash code --install-extension [extension vsix file path] ``` @@ -24,28 +33,46 @@ code --install-extension [extension vsix file path] ## Environment Preparation - Sparrow CLI, refer to Section 3 Installation, Configuration, and Running. + ## Extension Features + This extension provides the following feature modules: - COREF AST Viewer - Gödel Language Server - Gödel Language Runner + ### COREF AST Viewer + The following features need to be enabled in the extension settings. Currently, it only supports the Java language. + #### Convert Java Files into Tree-Like COREF Nodes + ![](/images/codefuse-query/toolchain02.gif) + #### Locate COREF Nodes and Code Positions Interactively + ![](/images/codefuse-query/toolchain03.gif) + #### View Node APIs and Copy Nodes in Lib API Viewer + ![](/images/codefuse-query/toolchain04.gif) + #### Lib API Viewer: Querying and Copying Usage + ![](/images/codefuse-query/toolchain05.gif) + ### Gödel Language Server Features + The following features need to be enabled after setting up the extension. Syntax highlighting is still available without setting related items. + #### Error Information Tips + Error information automatically updates with code changes. ![](/images/codefuse-query/toolchain06.gif) + #### Symbol Information Tips and Completion + Completion suggestions that include local variables and global symbols. Keywords provide corresponding usage examples; global symbol information offers more detailed internal information, such as member variables, member methods, and static methods. ![](/images/codefuse-query/toolchain07.gif) @@ -56,34 +83,49 @@ Completion suggestions that include local variables and global symbols. Keywords - `::` followed by symbol information and completion - Annotation usage example tips - Global symbol type information (internal structure, member methods, static methods) + #### Go to Definition + You can jump to definitions with a right-click or `ctrl`/`command`+`left click` to go directly to the exact symbol definition location. ![](/images/codefuse-query/toolchain08.gif) + #### Code Snippets (Snippets) + The extension provides some code snippets to quickly write Gödel 1.0/script code. ![](/images/codefuse-query/toolchain09.gif) + ### GödelScript Runner + Use after setting the Sparrow CLI path in the extension. The database needs to be loaded before running the script. For how to generate a database, refer to Section 3.4, Running, in the data extraction part. + #### Running Scripts + ![panel.gif](/images/codefuse-query/toolchain10.gif) There are four different script running buttons provided: + 1. Right-click to execute at the script you want to run. 2. Choose `Run GödelScript` on the extension `GodelScript Runner` panel. 3. Choose `Run` on the extension `GodelScript Runner Setting` panel. 4. Click the run button at the top right of the extension `GodelScript Runner Setting` panel. + #### Database Folder Loading + 1. Right-click at the script you want to run and choose the folder containing the database to load. 2. Choose `Load Database Directory` on the extension `GodelScript Runner` panel. 3. Choose `Database` on the extension `GodelScript Runner Setting` panel. 4. Click the database load button at the top right of the extension `GodelScript Runner Setting` panel. + ## Extension Settings + ### COREF AST Viewer Settings - `corefASTViewer.sparrowCliRoot` - Specify the root directory of Sparrow CLI, referring to Section 3 of the installation part. + ### Gödel Language Server Settings + When the extension starts, a prompt will pop up if any one of the following two items is not set. Clicking the `configure` button will redirect to the respective configuration page. - `godelScript.executablePath` @@ -95,4 +137,4 @@ When the extension starts, a prompt will pop up if any one of the following two # Smart Assistant -Stay tuned for the opening! \ No newline at end of file +Stay tuned for the opening! diff --git a/docs/docs/developer-docs/CodeFuse-Query/main/toolchain.zh-CN.md b/docs/docs/developer-docs/CodeFuse-Query/main/toolchain.zh-CN.md new file mode 100644 index 0000000..0614ee4 --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-Query/main/toolchain.zh-CN.md @@ -0,0 +1,139 @@ +--- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 +title: VSCode插件 +order: 3 +toc: content +--- + +# 开发插件(VSCode) + +## 安装 + +### 从 VSCode 官方插件市场安装(推荐) + +[插件地址](https://marketplace.visualstudio.com/items?itemName=CodeFuse-Query.codefuse-query-extension) + +### 使用 VSIX 安装包安装 + +1. 下载插件 +2. 手动从 vsix 安装: + ![image.png](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*fOE-T5L4f8gAAAAAAAAAAAAADlHYAQ/original) +3. 或者使用指令直接从终端安装: + +```bash +code --install-extension [扩展vsix文件路径] +``` + +## 环境准备 + +- Sparrow CLI ,参照 3 安装、配置、运行 + +## 扩展特性 + +本扩展提供了以下功能模块: + +- COREF AST Viewer +- Gödel Language Server +- Gödel Language Runner + +### COREF AST Viewer + +以下功能需要在扩展设置中设置相关项后启用。目前仅支持于 Java 语言 + +#### Java 文件转成树状的 COREF Node + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*P9CQQp1q2wsAAAAAAAAAAAAADlHYAQ/original) + +#### Node 与代码位置的相互定位 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*x-VqT74thusAAAAAAAAAAAAADlHYAQ/original) + +#### 在 Lib API Viewer 查看 Node 的 API,Node 复制 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*qlKjS6cZzl0AAAAAAAAAAAAADlHYAQ/original) + +#### Lib API Viewer:查询与复制使用 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*2Uu8QYcfdbwAAAAAAAAAAAAADlHYAQ/original) + +### Gödel Language Server Features + +以下功能均需要在设置扩展后启用。不设置相关项的情况下,语法高亮仍然可用。 + +#### 错误信息提示 + +错误信息会随着代码的更新而自动更新。 +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*Rh2RT6KJRBIAAAAAAAAAAAAADlHYAQ/original) + +#### 符号信息提示和补全 + +包含 local 变量和全局符号信息的补全提示,关键字等信息会提供对应的使用样例,全局符号信息会提供更详细的内部信息,如包含的成员变量、成员方法、静态方法。 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*Q7NoSYqc8ScAAAAAAAAAAAAADlHYAQ/original) + +- 关键字补全和使用样例提示 +- local 变量类型信息和符号补全 +- `.` 跟随的符号信息和补全 +- `::` 跟随的符号信息和补全 +- 注解使用样例提示 +- 全局符号类型信息 (内部结构,成员方法,静态方法) + +#### 跳转到定义 + +可以通过右键跳转定义或者`ctrl`/`command`+`left click`直接跳转到准确的符号定义位置。 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*Ocg5SpI9mMMAAAAAAAAAAAAADlHYAQ/original) + +#### 代码片段 (Snippets) + +扩展提供了一些代码片段补齐以供快速编写 Gödel 1.0/script 代码。 + +![](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*rBq7Sb9HHG4AAAAAAAAAAAAADlHYAQ/original) + +### GödelScript Runner + +需要在扩展中设置 sparrow cli 路径后使用。运行脚本之前需要先加载数据库。关于如何生成数据库 参考 3.4.章节 运行 中的数据抽取部分。 + +#### 运行脚本 + +![panel.gif](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*PM2_QpHmb3AAAAAAAAAAAAAADlHYAQ/original) +提供了四种不同的脚本运行按钮: + +1. 在要运行的脚本处右键执行。 +2. 在 extension `GodelScript Runner` 面板上选择 `Run GödelScript`。 +3. 在 extension `GodelScript Runner Setting` 面板上选择 `Run`。 +4. 在 extension `GodelScript Runner Setting` 面板右上角点击运行按钮。 + +#### 数据库文件夹加载 + +1. 在要运行的脚本处右键选择包含数据库的文件夹进行加载。 +2. 在 extension `GodelScript Runner` 面板上选择 `Load Database Directory`。 +3. 在 extension `GodelScript Runner Setting` 面板上选择 `Database`。 +4. 在 extension `GodelScript Runner Setting` 面板右上角点击数据库加载按钮。 + +## 扩展设置 + +### COREF AST Viewer 设置 + +- `corefASTViewer.sparrowCliRoot` + - 指定 Sparrow CLI 的根目录,参照第 3 章节的安装部分 + +### Gödel Language Server 设置 + +扩展启动时,以下两项中存在任意一项未被设置,则会弹出提示。点击`configure`按钮会跳转至相应配置页面。 + +- `godelScript.executablePath` + - 用于指定 GödelScript 的可执行文件路径,默认为空。需要时请替换为实际的 GödelScript 可执行文件的绝对路径。 + - 如果已经下载 Sparrow CLI ,则 GödelScript 可执行文件为 `[sparrow cli root]/godel-script/usr/bin/godel`。 +- `godelScript.libraryDirectoryPath` + - 用于指定 GödelScript 的库文件夹路径,默认为空。需要时请替换为 GödelScript 库文件夹绝对路径。 + - 如果已经下载 Sparrow CLI ,则库文件夹路径为 `[sparrow cli root]/lib-1.0`。 + +# 智能助手 + +待开放,尽情期待! diff --git a/content/en/docs/codefuse-query/user_case.en.md b/docs/docs/developer-docs/CodeFuse-Query/main/user_case.en-US.md similarity index 82% rename from content/en/docs/codefuse-query/user_case.en.md rename to docs/docs/developer-docs/CodeFuse-Query/main/user_case.en-US.md index ff2eef0..b789bb5 100644 --- a/content/en/docs/codefuse-query/user_case.en.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/user_case.en-US.md @@ -1,15 +1,21 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 title: User Case -slug: 用户案例 -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-usercase -aliases: -- "/docs/codefuse-query-usercase" +order: 4 +toc: content --- # Use Cases + ## Querying Code Features + A developer wants to know which String type variables are used in Repo A, so he writes a Gödel script as follows and submits it to the CodeFuse-Query system for results. + ```rust // script use coref::java::* @@ -26,17 +32,25 @@ fn main() { output(out()) } ``` + Similar needs: querying for classes, functions, variables, return values, call graphs, class inheritance, etc. + ## Code Rule Checker + A team leader found that the team always wrote many bugs similar to Bug A. **He wanted to establish a code rule for Bug A and its checker** and do a check at the CodeReview stage. Through writing a query analysis on the CodeFuse-Query platform, and after testing it on the platform to meet the requirements, he solidified this analysis query as a code rule and launched it to the CodeReview/CI stage. Since then, this bug has never happened again. Similar needs: writing static defect scanning rules for code risk interception. + ## Obtaining Statistical Data + A researcher found that traditional code complexity metrics are difficult to accurately measure code complexity. By learning from international advanced experience and a stroke of genius, he designed a set of complexity metrics and algorithms. After implementing it with Gödel, **he found that without much optimization, the performance was already very high**, and it was quickly applied to more than 10 languages and over 110,000 repositories. He immediately had an in-depth understanding of the overall complexity of code repositories. Compared to the past, when he had to parse code and analyze syntax trees himself, and interface with systems, **it's hard to know how much more convenient it has become**. Similar needs: code statistics, code metrics, algorithm design, academic research. # Application Fields + Currently, CodeFuse-Query at Ant Group has already supported the implementation of multiple scenarios such as **CodeFuse large language model data cleaning**, **code metric assessment**, **R&D risk control**, **privacy security analysis**, **code intelligence**, **end-package size governance**, etc., with a monthly service call volume exceeding one million. + ## High-Quality Code Data Cleaning - CodeFuse Code Large Model + The CodeFuse code large model is a model for dealing with code-related issues open-sourced by Ant Group. For the CodeFuse large language model, the quality of the training data directly affects the inference results of the model. Low-quality code data will directly pollute the output of the language model. For example, the model may learn incorrect code patterns, thereby generating incorrect code. If the data only contains code in a certain programming language, the model may not adapt well to the code of other programming languages. To control the quality of code data entering the model and thereby improve the inferential capability of the model, we have sorted out the definition of high-quality code based on years of practical experience of the Ant code analysis team combined with industry consensus, and implemented automated, large-scale code data cleaning using existing program analysis technology. CodeFuse-Query provides the following data cleaning capabilities for the CodeFuse code large model: @@ -44,12 +58,14 @@ CodeFuse-Query provides the following data cleaning capabilities for the CodeFus - High-quality code data cleaning: clean code data, including vulnerability scanning for Python, Java, JavaScript, TypeScript, Go, C, C++ 7 languages, filtering by language type/star count, filtering out data with 0 effective code lines, etc. Currently, about **2TB** of cleaned GitHub and Ant internal code data has been accumulated. - Code Portrait: Implement high-performance, multi-dimensional automatic annotation of large-scale code, supporting Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go, and other **10** languages, **77** common tags, **40** Ant-specific tags, a total of **117** tags. Current auto-annotation performance can reach **40MB/s**. - Other atomic capabilities - - Advanced code feature extraction, including AST (Abstract Syntax Tree), DFG (Data Flow Graph) data extraction, etc. Currently, AST information has been used for SFT training, with an accuracy of about 97%. - - Code snippet identification, used for extracting code from text data, convenient for code formatting or adding Markdown format: - - Text extraction code: extract code block information from the text, support parsing of mainstream languages, function and class definitions, only validate binary classification, which is to verify whether the text contains code blocks, accuracy is about 83%. - - Identify the programming language type of code snippets: identify the programming language type of any code snippet, support 30+ languages, accuracy is about 80%. - - Code comment pair extraction: support extraction of method-level comment-code pair information, cover **15 kinds** of GitHub's most popular languages, used for Text To Code/Code To Text SFT training. + - Advanced code feature extraction, including AST (Abstract Syntax Tree), DFG (Data Flow Graph) data extraction, etc. Currently, AST information has been used for SFT training, with an accuracy of about 97%. + - Code snippet identification, used for extracting code from text data, convenient for code formatting or adding Markdown format: + - Text extraction code: extract code block information from the text, support parsing of mainstream languages, function and class definitions, only validate binary classification, which is to verify whether the text contains code blocks, accuracy is about 83%. + - Identify the programming language type of code snippets: identify the programming language type of any code snippet, support 30+ languages, accuracy is about 80%. + - Code comment pair extraction: support extraction of method-level comment-code pair information, cover **15 kinds** of GitHub's most popular languages, used for Text To Code/Code To Text SFT training. + ## Change Analysis - Youku Server-side R&D Efficiency + From 2023, the Youku quality assurance team started exploring precise testing for the server-side. After half a year of technical accumulation and system building, a precise testing system with **change content identification, change impact analysis, testing capability recommendation, testing coverage assessment** was formed. In this process, the capabilities that CodeFuse-Query can provide mainly include: diff --git a/content/zh/docs/codefuse-query/user_case.md b/docs/docs/developer-docs/CodeFuse-Query/main/user_case.zh-CN.md similarity index 52% rename from content/zh/docs/codefuse-query/user_case.md rename to docs/docs/developer-docs/CodeFuse-Query/main/user_case.zh-CN.md index 48722e9..4a9ac3e 100644 --- a/content/zh/docs/codefuse-query/user_case.md +++ b/docs/docs/developer-docs/CodeFuse-Query/main/user_case.zh-CN.md @@ -1,15 +1,21 @@ --- +store: + title: CodeFuse-Query + version: main +group: + title: 🌱 CodeFuse-Query + order: -1 title: 用户案例 -slug: 用户案例 -description: CodeFuse介绍主要功能 -url: /docs/codefuse-query-usercase-zh -aliases: -- "/docs/codefuse-query-usercase-zh" +order: 4 +toc: content --- # 使用场景 + ## 查询代码特征 + 小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Gödel 如下,交给 CodeFuse-Query 系统给他返回了结果。 + ```rust // script use coref::java::* @@ -26,34 +32,44 @@ fn main() { output(out()) } ``` + 类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。 + ## 代码规则检查器 + 小 TL 同学发现团队总是写出很多类似的 Bug A,**他想针对 Bug A 制定一个代码规则和其检查器**,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 类似需求:编写静态缺陷扫描规则进行代码风险拦截。 + ## 获取统计数据 + 小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Gödel 实现出来以后,**发现不怎么优化就已经性能非常高了**,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,**不知道方便了多少。** 类似需求:代码统计,代码度量,算法设计,学术研究。 -# 应用领域 -目前,CodeFuse-Query在蚂蚁集团已经支持 **CodeFuse大语言模型数据清洗**、**代码度量评估**、**研发风险控制**、**隐私安全分析**、**代码智能**、**终端包大小治理 **等多个场景的落地应用,服务月均调用量超过百万。 -## 高质量代码数据清洗 - CodeFuse代码大模型 -CodeFuse代码大模型是蚂蚁集团对外开源的处理代码相关问题的模型,对于CodeFuse大语言模型而言,训练的数据质量直接影响模型的推理结果。低质量的代码数据会直接污染语言模型的输出,例如:模型可能会学习到错误的代码模式,从而生成错误的代码;数据中只包含某种编程语言的代码,模型可能无法很好地适应其他编程语言的代码。 +# 应用领域 + +目前,CodeFuse-Query 在蚂蚁集团已经支持 **CodeFuse 大语言模型数据清洗**、**代码度量评估**、**研发风险控制**、**隐私安全分析**、**代码智能**、**终端包大小治理 **等多个场景的落地应用,服务月均调用量超过百万。 + +## 高质量代码数据清洗 - CodeFuse 代码大模型 + +CodeFuse 代码大模型是蚂蚁集团对外开源的处理代码相关问题的模型,对于 CodeFuse 大语言模型而言,训练的数据质量直接影响模型的推理结果。低质量的代码数据会直接污染语言模型的输出,例如:模型可能会学习到错误的代码模式,从而生成错误的代码;数据中只包含某种编程语言的代码,模型可能无法很好地适应其他编程语言的代码。 为了把控进入模型的代码数据质量,进而提升模型的推理能力。我们基于蚂蚁程序分析团队多年的实践积累结合业界共识,梳理了高质量代码的定义方式,并利用已有程序分析技术实现了自动化、大规模的代码数据清洗。 -CodeFuse-Query为CodeFuse代码大模型提供了以下数据清洗能力: +CodeFuse-Query 为 CodeFuse 代码大模型提供了以下数据清洗能力: - 高质量代码数据清洗:对代码数据进行清洗,包括对 Python,Java,JavaScript,TypeScript,Go,C,C++ 7 种语言进行漏洞扫描,对语言种类 / star 数进行筛选,过滤有效代码行数为 0 的数据等。目前已沉淀清洗后的 GitHub 和蚂蚁内部代码数据总共约 **2TB**。 - 代码画像:实现对大规模代码进行高性能多维度的自动标注,支持 Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go 等 **10** 种语言,**77** 种通用标签,**40** 种蚂蚁特有标签,共 **117** 种标签。目前自动标注性能能够达到 **40MB/s**。 - 其他原子能力 - - 高级代码特征提取,包括提取 AST(抽象语法树),DFG(数据流图)数据等。目前 AST 信息已用于 SFT 训练,准确率 97% 左右。 - - 代码片段识别,用于针对文本数据中的代码进行提取,方便进行代码格式化或加上 Markdown 格式: - - 文本提取代码:从文本中提取代码块信息,支持主流语言的解析,函数及类定义,仅验证二分类问题,就是说仅验证文本是否含有代码块准确率 83% 左右。 - - 识别代码片段的编程语言种类:识别任意代码片段的编程语言种类,支持 30+ 种语言,准确率80%左右。 - - 代码注释对提取:支持提取方法级别的注释-代码对信息,覆盖 **15 种** GitHub 最流行的语言,用于 Text To Code/Code To Text 的 SFT 训练。 + - 高级代码特征提取,包括提取 AST(抽象语法树),DFG(数据流图)数据等。目前 AST 信息已用于 SFT 训练,准确率 97% 左右。 + - 代码片段识别,用于针对文本数据中的代码进行提取,方便进行代码格式化或加上 Markdown 格式: + - 文本提取代码:从文本中提取代码块信息,支持主流语言的解析,函数及类定义,仅验证二分类问题,就是说仅验证文本是否含有代码块准确率 83% 左右。 + - 识别代码片段的编程语言种类:识别任意代码片段的编程语言种类,支持 30+ 种语言,准确率 80%左右。 + - 代码注释对提取:支持提取方法级别的注释-代码对信息,覆盖 **15 种** GitHub 最流行的语言,用于 Text To Code/Code To Text 的 SFT 训练。 + ## 变更分析-优酷服务端研发效能 -优酷质量保障团队从2023年开始针对服务端精准测试的探索,经过半年的技术沉淀和体系搭建,形成了具备**变更内容识别、变更影响分析、测试能力推荐、测试覆盖评估**的精准测试体系。 -在此过程中,CodeFuse-Query能提供的能力主要有: -- 根据代码变更内容(文件+行号),分析出影响的对象:方法、入口(http入口、hsf入口)、调用链路(从入口到变更方法的所有调用链路)、数据库操作(表、操作类型) -- 结合线上动态调用链路(方法链路)、CodeFuse-Query静态分析调用链路的影响面精准分析能力,提升变更分析影响面的有效性、准备率 +优酷质量保障团队从 2023 年开始针对服务端精准测试的探索,经过半年的技术沉淀和体系搭建,形成了具备**变更内容识别、变更影响分析、测试能力推荐、测试覆盖评估**的精准测试体系。 +在此过程中,CodeFuse-Query 能提供的能力主要有: + +- 根据代码变更内容(文件+行号),分析出影响的对象:方法、入口(http 入口、hsf 入口)、调用链路(从入口到变更方法的所有调用链路)、数据库操作(表、操作类型) +- 结合线上动态调用链路(方法链路)、CodeFuse-Query 静态分析调用链路的影响面精准分析能力,提升变更分析影响面的有效性、准备率 -到目前为止,优酷已通过CodeFuse-Query接入所有核心应用,并基于静态分析采集数据,构建了服务端完整的代码知识库和流量知识库。 \ No newline at end of file +到目前为止,优酷已通过 CodeFuse-Query 接入所有核心应用,并基于静态分析采集数据,构建了服务端完整的代码知识库和流量知识库。 diff --git a/content/en/docs/overview/b10.codefuse-evalution.md b/docs/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution.en-US.md similarity index 68% rename from content/en/docs/overview/b10.codefuse-evalution.md rename to docs/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution.en-US.md index e81e980..d0cde22 100644 --- a/content/en/docs/overview/b10.codefuse-evalution.md +++ b/docs/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution.en-US.md @@ -1,25 +1,34 @@ ---- -title: "CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model" -description: 介绍主要功能 -url: "/docs/codefuse-evalution" -aliases: -- "/docs/codefuse-evalution" ---- - - -# CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model - - - -CodeFuseEval is a Code Generation benchmark that combines the multi-tasking scenarios of CodeFuse Model with the benchmarks of HumanEval-x and MBPP. This benchmark is designed to evaluate the performance of models in various multi-tasking tasks, including code completion, code generation from natural language, test case generation, cross-language code translation, and code generation from Chinese commands, among others.Continuously open, stay tuned ! - -

    - English Introduction -

    +--- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-evalution + version: main +group: + title: 🌱 CodeFuse-evalution + index: true + order: -1 +title: CodeFuse-evalution +order: -1 +toc: content +--- + +# CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model + + + +CodeFuseEval is a Code Generation benchmark that combines the multi-tasking scenarios of CodeFuse Model with the benchmarks of HumanEval-x and MBPP. This benchmark is designed to evaluate the performance of models in various multi-tasking tasks, including code completion, code generation from natural language, test case generation, cross-language code translation, and code generation from Chinese commands, among others.Continuously open, stay tuned ! + +

    + English Introduction +

    diff --git a/docs/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution.zh-CN.md b/docs/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution.zh-CN.md new file mode 100644 index 0000000..a90b9bf --- /dev/null +++ b/docs/docs/developer-docs/CodeFuse-evalution/main/codefuse-evalution.zh-CN.md @@ -0,0 +1,29 @@ +--- +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: CodeFuse-evalution + version: main +group: + title: 🌱 CodeFuse-evalution + index: true + order: -1 +title: 代码大语言模型的多任务评估基准 +order: -1 +toc: content +--- + + + +CodeFuseEval 在 HumanEval-x、MBPP 的基准上,结合 CodeFuse 大模型多任务场景,开发的编程领域多任务的评测基准, 可用于评估模型在代码补全,自然语言生成代码,测试用例生成、跨语言代码翻译,中文指令生成代码等多类任务的性能。持续开放中,敬请期待! + +![img](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*7CZtSpgj3K4AAAAAAAAAAAAADlHYAQ/original) diff --git a/content/en/docs/codefuse-evalution/1_quickstart.md b/docs/docs/developer-docs/CodeFuse-evalution/main/quickstart.en-US.md similarity index 74% rename from content/en/docs/codefuse-evalution/1_quickstart.md rename to docs/docs/developer-docs/CodeFuse-evalution/main/quickstart.en-US.md index cbad2e6..8627dd9 100644 --- a/content/en/docs/codefuse-evalution/1_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-evalution/main/quickstart.en-US.md @@ -1,258 +1,280 @@ ---- -title: QuickStart -description: 介绍主要功能 -url: docs/codefuse-evalution-quickstart -aliases: -- "/docs/codefuse-evalution-quickstart" ---- - - - - - -## Generation environment: -CodeFuse-13B: Python 3.8 or above,PyTorch 1.12 or above, with a recommendation for 2.0 or above, Transformers 4.24.0 or above ,CUDA 11.4 or above (for GPU users and flash-attention users, this option should be considered). - -CodeFuse-CodeLlama-34B:python>=3.8,pytorch>=2.0.0,transformers==4.32.0,Sentencepiece,CUDA 11. - -## Evaluation Environment -The evaluation of the generated codes involves compiling and running in multiple programming languages. The versions of the programming language environments and packages we use are as follows: - -| Dependency | Version | -| ---------- |----------| -| Python | 3.10.9 | -| JDK | 18.0.2.1 | -| Node.js | 16.14.0 | -| js-md5 | 0.7.3 | -| C++ | 11 | -| g++ | 7.5.0 | -| Boost | 1.75.0 | -| OpenSSL | 3.0.0 | -| go | 1.18.4 | -| cargo | 1.71.1 | - -In order to save everyone the trouble of setting up the environments for these languages, we create a Docker image with the required environments and codefuseEval. -```bash -docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest -``` - -If you are familiar with docker, you can build the image from `codefuseEval/docker/Dockerfile` or configure the Dockerfile as you like it: - -```bash -cd codefuseEval/docker -docker build [OPTIONS] . -``` - -After obtaining the image, you can build a container using the following command: - -```bash -docker run -it --gpus all --mount type=bind,source=,target= [OPTIONS] -``` - -## Check result Command: -We provide the script to check the result for provided code LLMs. Please use following scripts to check corresponding results and the environment . - -```bash -bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python -bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-13B/humaneval_result_python.jsonl humaneval_python -``` - -## How to use CodeFuseEval -1. Download the model and update current model infomation in ckpt_config.json. Mainly update 「path」parameter in corresponding model and version. -2. Run following generation comand to generate result. -``` -bash codefuseEval/script/generation.sh MODELNAME MODELVERSION EVALDATASET OUTFILE - -eg: -bash codefuseEval/script/generation.sh CodeFuse-13B v1 humaneval_python result/test.jsonl -``` -3. Run following evaluation command to evaluate the generated result for corresponding model and version. -``` -bash codefuseEval/script/evaluation.sh -eg: -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python -``` - - -## Evaluation - -We recommend evaluating in [the provided image](#evaluation-environment). To evaluate the generated samples, save generated codes in the following JSON list format: - -``` -{"task_id": "../..", "generation: "..."} -{"task_id": "../..", "generation: "..."} -... -``` - -and evaluate them using the following script under the root directory of the repository (please execute with caution, the generated codes might have unexpected behaviours though with very low possibility. See the warnings in [execution.py](execution.py) and uncomment the execution lines at your own risk): - -### Evaluation Data -Data are stored in ``codefuseEval/data``, using JSON list format. We first integrated humaneval-X dataset. - -* ``task_id``: indicates the target language and ID of the problem. Language is one of ["Python", "Java", "JavaScript", "CPP", "Go"]. -* ``prompt``: the function declaration and docstring, used for code generation. -* ``declaration``: only the function declaration, used for code translation. -* ``canonical_solution``: human-crafted example solutions. -* ``test``: hidden test samples, used for evaluation -* ``example_test``: public test samples (appeared in prompt), used for evaluation. -* ``prompt_text``: prompt text -* ``prompt_explain``: prompt explanation -* ``func_title``: code function title -* ``prompt_text_chinese``: Chinese prompt - - -### Evaluation Metrics -In addition to the unbiased pass@k indicators currently provided in [Codex](https://arxiv.org/abs/2107.03374), we will also integrate the relevant indicators of huggingface open source with [CodeBLEU](https://arxiv.org/abs/2009.10297) for integration. -The main indicators currently recommended for users are as follows: -* ``codebleu`` -* ``pass@k`` -* ``bleu`` -* ``bleurt`` - -For other related metrics, you can check the code of the metric or the evaluation code to meet your requirements. - -At the same time, we supplemented the indicators of the total and average generation time of the model for the dataset `total_time_cost` and `Average time cost` - -Output during each generation, making it convenient for users to measure the generation performance of the model in the same environment. This indicator is passive output, and it will be output every time it is generated. - -### Evaluation Command: -``` -bash codefuseEval/script/evaluation.sh -eg: -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python -``` - -At the same time, we currently provide the following flags, which can directly bring the sample answers in the test data set as generated answers for testing. - -* ``TEST_GROUDTRUTH`` default False - -When TEST_GROUDTRUTH is True, the self-test mode is turned on, PROBLEM_FILE will be read, and the sample answer will be substituted as the generated answer for testing. - -When TEST_GROUDTRUTH is False, open the evaluation mode, read RESULT_FILE and PROBLEM_FILE, and substitute the generated answer for testing. - - -## More Infomation - -### Evaluation self model and dataset - -1. Registry your evaluate dataset. -* Download evaluation dataset to store in `codefuseEval/data` or other directory. Dataset must be jsonl. -* Setup information dataset `EVAL_DATASET`,`DATASET_SUPPORT` and `DATASET_LANGUAGE` in `codefuseEval/util.py` for dataset path, dataset task_mode and generation code language -2. Registry your evaluate model. -* Download evaluation model to store in `codefuseEval/model` or other directory. -* Write your evaluation model processor code in `codefuseEval/processor` package. - -We designed an infrastructure called Processor. Its main purpose is to handle the differences between different models. It mainly needs to complete three abstract functions: -* ``load_model_tokenizer``:Due to differences in model loading parameters and tokenizer terminators, models need to use different parameters for adaptation and loading. The current function is mainly to help users load and adapt different models. -* ``process_before``: Since prompt adapts to different prompt styles according to different types of evaluation tasks or different models selected by users, the 「process_before」function is extracted mainly to help users process prompts. -* ``process_after``:Due to the diversity of model generation results, in order to adapt to the evaluation framework, the generated result data can be spliced into appropriate use cases for automated operation. The current function mainly processes the generated results to adapt to the evaluation data set and results based on the task type and data set conditions. - -You can extend the `BaseProcessor` in `codefuseEval/processor/base.py` and implement above functions - -* Setup information model in `ckpt_config.json`. For Example as follow -``` -{ - "CodeFuse-13B": { //model name - "v1": { //model version - "path": "/mnt/model/CodeFuse13B-evol-instruction-4K/", // model path - "processor_class": "codefuseEval.process.codefuse13b.Codefuse13BProcessor", // model processor - "tokenizer": { // tokenizer params to token input string. - "truncation": true, - "padding": true, - "max_length": 600 - }, - "generation_config": { //generation config params. - "greedy": { //If JsonObject, it is a decode mode, you can set 「decode_mode」param to load params defined in the decode_mode. - "do_sample": false, - "num_beams": 1, - "max_new_tokens": 512 - }, - "beams": { - "do_sample": false, - "num_beams": 5, - "max_new_tokens": 600, - "num_return_sequences": 1 - }, - "dosample": { - "da_sample": true - }, - "temperature": 0.2, //If not JsonObject, it is a default param, we will set in generation_config default. You can cover param in decode_mode same name param. - "max_new_tokens": 600, - "num_return_sequences": 1, - "top_p": 0.9, - "num_beams": 1, - "do_sample": true - }, - "batch_size": 1, // batch size for generate - "sample_num": 1, // The number of samples generated by a single piece of data - "decode_mode": "beams" // choose decode mode defined in generation_config - } - } -``` - -### Check dataset Command: -To check whether the reference values provided by the evaluation data set are correct, -we provide the following command to check the dataset. - -CodeCompletion -```bash -bash codefuseEval/script/check_dataset.sh humaneval_python - -bash codefuseEval/script/check_dataset.sh humaneval_java - -bash codefuseEval/script/check_dataset.sh humaneval_js - -bash codefuseEval/script/check_dataset.sh humaneval_rust - -bash codefuseEval/script/check_dataset.sh humaneval_go - -bash codefuseEval/script/check_dataset.sh humaneval_cpp -``` -NL2Code -```bash -bash codefuseEval/script/check_dataset.sh mbpp -``` -CodeTrans -``` -bash codefuseEval/script/check_dataset.sh codeTrans_python_to_java - -bash codefuseEval/script/check_dataset.sh codeTrans_python_to_cpp - -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_java - -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_python - -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_python - -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_cpp -``` -CodeScience -``` -bash codefuseEval/script/check_dataset.sh codeCompletion_matplotlib - -bash codefuseEval/script/check_dataset.sh codeCompletion_numpy - -bash codefuseEval/script/check_dataset.sh codeCompletion_pandas - -bash codefuseEval/script/check_dataset.sh codeCompletion_pytorch - -bash codefuseEval/script/check_dataset.sh codeCompletion_scipy - -bash codefuseEval/script/check_dataset.sh codeCompletion_sklearn - -bash codefuseEval/script/check_dataset.sh codeCompletion_tensorflow - -bash codefuseEval/script/check_dataset.sh codeInsertion_matplotlib - -bash codefuseEval/script/check_dataset.sh codeInsertion_numpy - -bash codefuseEval/script/check_dataset.sh codeInsertion_pandas - -bash codefuseEval/script/check_dataset.sh codeInsertion_pytorch - -bash codefuseEval/script/check_dataset.sh codeInsertion_scipy - -bash codefuseEval/script/check_dataset.sh codeInsertion_sklearn - -bash codefuseEval/script/check_dataset.sh codeInsertion_tensorflow -``` \ No newline at end of file +--- +store: + title: CodeFuse-evalution + version: main +group: + title: 🌱 CodeFuse-evalution + order: -1 +title: QuickStart +order: 0 +toc: content +--- + +## Generation environment: + +CodeFuse-13B: Python 3.8 or above,PyTorch 1.12 or above, with a recommendation for 2.0 or above, Transformers 4.24.0 or above ,CUDA 11.4 or above (for GPU users and flash-attention users, this option should be considered). + +CodeFuse-CodeLlama-34B:python>=3.8,pytorch>=2.0.0,transformers==4.32.0,Sentencepiece,CUDA 11. + +## Evaluation Environment + +The evaluation of the generated codes involves compiling and running in multiple programming languages. The versions of the programming language environments and packages we use are as follows: + +| Dependency | Version | +| ---------- | -------- | +| Python | 3.10.9 | +| JDK | 18.0.2.1 | +| Node.js | 16.14.0 | +| js-md5 | 0.7.3 | +| C++ | 11 | +| g++ | 7.5.0 | +| Boost | 1.75.0 | +| OpenSSL | 3.0.0 | +| go | 1.18.4 | +| cargo | 1.71.1 | + +In order to save everyone the trouble of setting up the environments for these languages, we create a Docker image with the required environments and codefuseEval. + +```bash +docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest +``` + +If you are familiar with docker, you can build the image from `codefuseEval/docker/Dockerfile` or configure the Dockerfile as you like it: + +```bash +cd codefuseEval/docker +docker build [OPTIONS] . +``` + +After obtaining the image, you can build a container using the following command: + +```bash +docker run -it --gpus all --mount type=bind,source=,target= [OPTIONS] +``` + +## Check result Command: + +We provide the script to check the result for provided code LLMs. Please use following scripts to check corresponding results and the environment . + +```bash +bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python +bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-13B/humaneval_result_python.jsonl humaneval_python +``` + +## How to use CodeFuseEval + +1. Download the model and update current model infomation in ckpt_config.json. Mainly update 「path」parameter in corresponding model and version. +2. Run following generation comand to generate result. + +``` +bash codefuseEval/script/generation.sh MODELNAME MODELVERSION EVALDATASET OUTFILE + +eg: +bash codefuseEval/script/generation.sh CodeFuse-13B v1 humaneval_python result/test.jsonl +``` + +3. Run following evaluation command to evaluate the generated result for corresponding model and version. + +``` +bash codefuseEval/script/evaluation.sh +eg: +bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python +``` + +## Evaluation + +We recommend evaluating in [the provided image](#evaluation-environment). To evaluate the generated samples, save generated codes in the following JSON list format: + +``` +{"task_id": "../..", "generation: "..."} +{"task_id": "../..", "generation: "..."} +... +``` + +and evaluate them using the following script under the root directory of the repository (please execute with caution, the generated codes might have unexpected behaviours though with very low possibility. See the warnings in [execution.py]() and uncomment the execution lines at your own risk): + +### Evaluation Data + +Data are stored in `codefuseEval/data`, using JSON list format. We first integrated humaneval-X dataset. + +- `task_id`: indicates the target language and ID of the problem. Language is one of ["Python", "Java", "JavaScript", "CPP", "Go"]. +- `prompt`: the function declaration and docstring, used for code generation. +- `declaration`: only the function declaration, used for code translation. +- `canonical_solution`: human-crafted example solutions. +- `test`: hidden test samples, used for evaluation +- `example_test`: public test samples (appeared in prompt), used for evaluation. +- `prompt_text`: prompt text +- `prompt_explain`: prompt explanation +- `func_title`: code function title +- `prompt_text_chinese`: Chinese prompt + +### Evaluation Metrics + +In addition to the unbiased pass@k indicators currently provided in [Codex](https://arxiv.org/abs/2107.03374), we will also integrate the relevant indicators of huggingface open source with [CodeBLEU](https://arxiv.org/abs/2009.10297) for integration. +The main indicators currently recommended for users are as follows: + +- `codebleu` +- `pass@k` +- `bleu` +- `bleurt` + +For other related metrics, you can check the code of the metric or the evaluation code to meet your requirements. + +At the same time, we supplemented the indicators of the total and average generation time of the model for the dataset `total_time_cost` and `Average time cost` + +Output during each generation, making it convenient for users to measure the generation performance of the model in the same environment. This indicator is passive output, and it will be output every time it is generated. + +### Evaluation Command: + +``` +bash codefuseEval/script/evaluation.sh +eg: +bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python +``` + +At the same time, we currently provide the following flags, which can directly bring the sample answers in the test data set as generated answers for testing. + +- `TEST_GROUDTRUTH` default False + +When TEST_GROUDTRUTH is True, the self-test mode is turned on, PROBLEM_FILE will be read, and the sample answer will be substituted as the generated answer for testing. + +When TEST_GROUDTRUTH is False, open the evaluation mode, read RESULT_FILE and PROBLEM_FILE, and substitute the generated answer for testing. + +## More Infomation + +### Evaluation self model and dataset + +1. Registry your evaluate dataset. + +- Download evaluation dataset to store in `codefuseEval/data` or other directory. Dataset must be jsonl. +- Setup information dataset `EVAL_DATASET`,`DATASET_SUPPORT` and `DATASET_LANGUAGE` in `codefuseEval/util.py` for dataset path, dataset task_mode and generation code language + +2. Registry your evaluate model. + +- Download evaluation model to store in `codefuseEval/model` or other directory. +- Write your evaluation model processor code in `codefuseEval/processor` package. + +We designed an infrastructure called Processor. Its main purpose is to handle the differences between different models. It mainly needs to complete three abstract functions: + +- `load_model_tokenizer`:Due to differences in model loading parameters and tokenizer terminators, models need to use different parameters for adaptation and loading. The current function is mainly to help users load and adapt different models. +- `process_before`: Since prompt adapts to different prompt styles according to different types of evaluation tasks or different models selected by users, the 「process_before」function is extracted mainly to help users process prompts. +- `process_after`:Due to the diversity of model generation results, in order to adapt to the evaluation framework, the generated result data can be spliced into appropriate use cases for automated operation. The current function mainly processes the generated results to adapt to the evaluation data set and results based on the task type and data set conditions. + +You can extend the `BaseProcessor` in `codefuseEval/processor/base.py` and implement above functions + +- Setup information model in `ckpt_config.json`. For Example as follow + +``` +{ + "CodeFuse-13B": { //model name + "v1": { //model version + "path": "/mnt/model/CodeFuse13B-evol-instruction-4K/", // model path + "processor_class": "codefuseEval.process.codefuse13b.Codefuse13BProcessor", // model processor + "tokenizer": { // tokenizer params to token input string. + "truncation": true, + "padding": true, + "max_length": 600 + }, + "generation_config": { //generation config params. + "greedy": { //If JsonObject, it is a decode mode, you can set 「decode_mode」param to load params defined in the decode_mode. + "do_sample": false, + "num_beams": 1, + "max_new_tokens": 512 + }, + "beams": { + "do_sample": false, + "num_beams": 5, + "max_new_tokens": 600, + "num_return_sequences": 1 + }, + "dosample": { + "da_sample": true + }, + "temperature": 0.2, //If not JsonObject, it is a default param, we will set in generation_config default. You can cover param in decode_mode same name param. + "max_new_tokens": 600, + "num_return_sequences": 1, + "top_p": 0.9, + "num_beams": 1, + "do_sample": true + }, + "batch_size": 1, // batch size for generate + "sample_num": 1, // The number of samples generated by a single piece of data + "decode_mode": "beams" // choose decode mode defined in generation_config + } + } +``` + +### Check dataset Command: + +To check whether the reference values provided by the evaluation data set are correct, +we provide the following command to check the dataset. + +CodeCompletion + +```bash +bash codefuseEval/script/check_dataset.sh humaneval_python + +bash codefuseEval/script/check_dataset.sh humaneval_java + +bash codefuseEval/script/check_dataset.sh humaneval_js + +bash codefuseEval/script/check_dataset.sh humaneval_rust + +bash codefuseEval/script/check_dataset.sh humaneval_go + +bash codefuseEval/script/check_dataset.sh humaneval_cpp +``` + +NL2Code + +```bash +bash codefuseEval/script/check_dataset.sh mbpp +``` + +CodeTrans + +``` +bash codefuseEval/script/check_dataset.sh codeTrans_python_to_java + +bash codefuseEval/script/check_dataset.sh codeTrans_python_to_cpp + +bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_java + +bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_python + +bash codefuseEval/script/check_dataset.sh codeTrans_java_to_python + +bash codefuseEval/script/check_dataset.sh codeTrans_java_to_cpp +``` + +CodeScience + +``` +bash codefuseEval/script/check_dataset.sh codeCompletion_matplotlib + +bash codefuseEval/script/check_dataset.sh codeCompletion_numpy + +bash codefuseEval/script/check_dataset.sh codeCompletion_pandas + +bash codefuseEval/script/check_dataset.sh codeCompletion_pytorch + +bash codefuseEval/script/check_dataset.sh codeCompletion_scipy + +bash codefuseEval/script/check_dataset.sh codeCompletion_sklearn + +bash codefuseEval/script/check_dataset.sh codeCompletion_tensorflow + +bash codefuseEval/script/check_dataset.sh codeInsertion_matplotlib + +bash codefuseEval/script/check_dataset.sh codeInsertion_numpy + +bash codefuseEval/script/check_dataset.sh codeInsertion_pandas + +bash codefuseEval/script/check_dataset.sh codeInsertion_pytorch + +bash codefuseEval/script/check_dataset.sh codeInsertion_scipy + +bash codefuseEval/script/check_dataset.sh codeInsertion_sklearn + +bash codefuseEval/script/check_dataset.sh codeInsertion_tensorflow +``` diff --git a/content/zh/docs/codefuse-evalution/1_quickstart.md b/docs/docs/developer-docs/CodeFuse-evalution/main/quickstart.zh-CN.md similarity index 68% rename from content/zh/docs/codefuse-evalution/1_quickstart.md rename to docs/docs/developer-docs/CodeFuse-evalution/main/quickstart.zh-CN.md index 0241859..74fd7eb 100644 --- a/content/zh/docs/codefuse-evalution/1_quickstart.md +++ b/docs/docs/developer-docs/CodeFuse-evalution/main/quickstart.zh-CN.md @@ -1,248 +1,280 @@ ---- -title: 快速使用 -description: 介绍主要功能 -url: docs/codefuse-evalution-quickstart-zh -aliases: -- "/docs/codefuse-evalution-quickstart-zh" ---- - -## 推理环境: -CodeFuse-13B: python 3.8及以上版本,pytorch 2.0及以上版本,transformers 4.24.0及以上版本,CUDA 11.4及以上; - -CodeFuse-CodeLlama-34B: python 3.8及以上版本,pytorch2.0及以上版本,transformers==4.32.0 ,Sentencepiece,CUDA 11.4及以上。 - -## 评测执行环境 - -评测生成的代码需要使用多种语言编译、运行。我们使用的各编程语言依赖及所用包的版本如下: - -| 依赖 | 版本 | -| ------- |----------| -| Python | 3.10.9 | -| JDK | 18.0.2.1 | -| Node.js | 16.14.0 | -| js-md5 | 0.7.3 | -| C++ | 11 | -| g++ | 7.5.0 | -| Boost | 1.75.0 | -| OpenSSL | 3.0.0 | -| go | 1.18.4 | -| cargo | 1.71.1 | - - -为了省去使用者配置这些语言环境的麻烦,我们构建了一个Docker镜像,并在其中配置了所需要的环境,你可以按照下面的指令拉取使用 -```bash -docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest -``` - -如果您熟悉Dockerfile,也可以从`codefuseEval/docker/Dockerfile`构建镜像,或者修改之以定制自己的配置: - -```bash -cd codefuseEval/docker -docker build [OPTIONS] . -``` - -获取镜像后,使用如下命令创建容器: - -```bash -docker run -it --gpus all --mount type=bind,source=,target= [OPTIONS] -``` - -## 检查推理结果指令 -我们提供脚本来检查所提供代码 LLM 的结果。请使用以下脚本检查相应的推理结果。 -``` -bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python -bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-13B/humaneval_result_python.jsonl humaneval_python -``` - -## 如何使用CodeFuseEval -1. 下载模型并更新 ckpt config.json 中的当前模型信息。 主要更新对应型号和版本中的「path」参数。 -2. 运行以下生成命令以生成结果。 -``` -bash codefuseEval/script/generation.sh MODELNAME MODELVERSION EVALDATASET OUTFILE - -eg: -bash codefuseEval/script/generation.sh CodeFuse-13B v1 humaneval_python result/test.jsonl -``` -3. 运行以下评估命令来评估相应模型版本的生成结果。 -``` -bash codefuseEval/script/evaluation.sh -eg: -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python -``` - -## 评测说明 - -我们推荐使用给定的[评测环境](#评测环境)进行评测。在评测前,将生成的代码以如下JSON列表形式存储: - -``` -{"task_id": "../..", "generation: "..."} -{"task_id": "../..", "generation: "..."} -... -``` - -### 评测数据集 -样本使用JSON列表格式存储在``codefuseEval/data``中,根据用户所需的下游任务情况,每条样本包含 - -* ``task_id``: 题目的目标语言与ID。语言为["Python", "Java", "JavaScript", "CPP", "Go"]中之一。 -* ``prompt``: 函数声明与描述,用于代码生成。 -* ``declaration``: 仅有函数声明,用于代码翻译。 -* ``canonical_solution``: 手写的示例解答。 -* ``test``: 隐藏测例,用于评测。 -* ``example_test``: 公共测试样本,用于评估生成代码。 -* ``prompt_text``: prompt文本情况。 -* ``prompt_explain``: prompt信息说明。 -* ``func_title``: 生成函数头信息。 -* ``prompt_text_chinese``: 中文prompt信息。 - -### 评测指标 -除了目前提供的[Codex](https://arxiv.org/abs/2107.03374) 中提出的无偏 pass@k 指标之外,我们还将huggingface开源的相关指标与[CodeBLEU](https://arxiv.org/abs/2009.10297)提出的相似性指标进行集成。 -目前建议用户主要使用的指标如下: -* ``codebleu``: codebleu相似性评测指标。 -* ``pass@k``: 无偏pass@k的评测指标。 -* ``bleu``: 文本相似性指标bleu -* ``bleurt``: 文本语义相似性指标bleurt -* ``total_time_cost``: 基于被评数据集、模型推理总耗时 -* ``Average time cost``: 基于被评数据集单个任务、模型推理平均耗时 - - -### 评测命令: -``` -bash codefuseEval/script/evaluation.sh -eg: -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python -``` - -并在本仓库的根目录下使用如下指令(请谨慎执行,生成的代码可能有极低概率产生意外行为。在[execution.py](execution.py)中查看警告并取消执行代码的注释,风险自负): - -同时我们当前提供如下的标志位,可以直接将测试数据集中的示例解答作为生成答案带入进行测试。 -* ``TEST_GROUDTRUTH`` 取值为True或False - -当TEST_GROUDTRUTH为True时,开启self-test模式,将读取PROBLEM_FILE,将示例解答作为生成答案代入进行测试。 -TEST_GROUDTRUTH为False时,开启评测模式,读取RESULT_FILE和将读取PROBLEM_FILE,将生成答案代入进行测试 - -## 更多信息 - -### 使用自己的数据集评估自己的模型 -如果你想用自己的数据集评估自己的模型,可以参考以下步骤: -1. 注册自己的数据集 -* 下载评估数据集并存储在`codefuseEval/data`或其他目录中。 数据集必须是jsonl格式。 -* 针对于数据集路径、数据集任务模式task_mode和使用数据集后生成结果的代码语言情况,需要在`codefuseEval/util.py`中的`EVAL_DATASET`、`DATASET_SUPPORT`和`DATASET_LANGUAGE`变量中进行设置。 -2. 注册你的评测模型 -* 下载评估模型并存储在`codefuseEval/model`或其他目录中。 -* 在`codefuseEval/processor`包中编写评估模型处理器代码。 -#### 处理适配器 - -我们设计了一个名为Processor的基础结构,用户可以自己根据推理模型的情况创建自己需要的处理器, 主要目的是为了处理不同模型的区别情况进行处理,主要需要完成3个抽象函数: -``` -load_model_tokenizer: 由于模型加载参数的区别以及tokenizer的终止符的区别,模型需要使用不同的参数进行适配加载,当前函数主要是为了帮助用户加载适配不同的模型 -process_before:由于prompt根据用户不同的选择评测任务的类型或不同模型来适配不同的prompt样式,因此抽取出process_before函数主要用来帮助用户处理prompt -process_after:由于模型生成结果多样性,为了适配评测框架,方便生成结果数据可以拼接成合适的用例进行自动化运行,当前函数主要是根据任务类型和数据集情况,处理生成结果适配评测数据集和结果进行评测 -``` -您可以在`codefuseEval/processor/base.py`中查看`BaseProcessor`情况,创建自己模型的处理器,并实现上述函数功能 - -* 在`ckpt_config.json`中设置信息模型。 举例如下 -``` -{ - "CodeFuse-13B": { //模型名称 - "v1": { //模型版本 - "path": "/mnt/model/CodeFuse13B-evol-instruction-4K/", // 模型路径 - "processor_class": "codefuseEval.process.codefuse13b.Codefuse13BProcessor", // 模型处理器路径 - "tokenizer": { // 将prompt token化时tokenizer传入的参数 - "truncation": true, - "padding": true, - "max_length": 600 - }, - "generation_config": { //生成配置参数 - "greedy": { //如果是JsonObject,当前配置的是解码策略,可以通过设置下方「decode_mode」参数来加载生成配置参数中定义的不同的解码策略。 - "do_sample": false, - "num_beams": 1, - "max_new_tokens": 512 - }, - "beams": { - "do_sample": false, - "num_beams": 5, - "max_new_tokens": 600, - "num_return_sequences": 1 - }, - "dosample": { - "da_sample": true - }, - "temperature": 0.2, //如果不是 JsonObject,它是一个默认参数,我们将在 Generation_config 中设置默认值。 你可以通过读取解码策略中同名参数的方式覆盖当前参数的默认值。 - "max_new_tokens": 600, - "num_return_sequences": 1, - "top_p": 0.9, - "num_beams": 1, - "do_sample": true - }, - "batch_size": 1, // 单次生成的batch size大小 - "sample_num": 1, // 单条评测数据生成的样本数 - "decode_mode": "beams" // 选择在 Generation_config 中定义的解码模式 - } - } -``` - -### 检查数据集命令 -为了检查评估数据集提供的参考值是否正确,我们提供以下命令来检查数据集,针对于已经集成的数据集情况,检查数据集的命令如下所示 - -代码补全 -```bash -bash codefuseEval/script/check_dataset.sh humaneval_python - -bash codefuseEval/script/check_dataset.sh humaneval_java - -bash codefuseEval/script/check_dataset.sh humaneval_js - -bash codefuseEval/script/check_dataset.sh humaneval_rust - -bash codefuseEval/script/check_dataset.sh humaneval_go - -bash codefuseEval/script/check_dataset.sh humaneval_cpp -``` -自然语言生成代码 -```bash -bash codefuseEval/script/check_dataset.sh mbpp -``` -代码翻译 -``` -bash codefuseEval/script/check_dataset.sh codeTrans_python_to_java - -bash codefuseEval/script/check_dataset.sh codeTrans_python_to_cpp - -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_java - -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_python - -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_python - -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_cpp -``` -科学计算 -``` -bash codefuseEval/script/check_dataset.sh codeCompletion_matplotlib - -bash codefuseEval/script/check_dataset.sh codeCompletion_numpy - -bash codefuseEval/script/check_dataset.sh codeCompletion_pandas - -bash codefuseEval/script/check_dataset.sh codeCompletion_pytorch - -bash codefuseEval/script/check_dataset.sh codeCompletion_scipy - -bash codefuseEval/script/check_dataset.sh codeCompletion_sklearn - -bash codefuseEval/script/check_dataset.sh codeCompletion_tensorflow - -bash codefuseEval/script/check_dataset.sh codeInsertion_matplotlib - -bash codefuseEval/script/check_dataset.sh codeInsertion_numpy - -bash codefuseEval/script/check_dataset.sh codeInsertion_pandas - -bash codefuseEval/script/check_dataset.sh codeInsertion_pytorch - -bash codefuseEval/script/check_dataset.sh codeInsertion_scipy - -bash codefuseEval/script/check_dataset.sh codeInsertion_sklearn - -bash codefuseEval/script/check_dataset.sh codeInsertion_tensorflow -``` \ No newline at end of file +--- +store: + title: CodeFuse-evalution + version: main +group: + title: 🌱 CodeFuse-evalution + order: -1 +title: 快速开始 +order: 0 +toc: content +--- + +## 推理环境: + +CodeFuse-13B: python 3.8 及以上版本,pytorch 2.0 及以上版本,transformers 4.24.0 及以上版本,CUDA 11.4 及以上; + +CodeFuse-CodeLlama-34B: python 3.8 及以上版本,pytorch2.0 及以上版本,transformers==4.32.0 ,Sentencepiece,CUDA 11.4 及以上。 + +## 评测执行环境 + +评测生成的代码需要使用多种语言编译、运行。我们使用的各编程语言依赖及所用包的版本如下: + +| 依赖 | 版本 | +| ------- | -------- | +| Python | 3.10.9 | +| JDK | 18.0.2.1 | +| Node.js | 16.14.0 | +| js-md5 | 0.7.3 | +| C++ | 11 | +| g++ | 7.5.0 | +| Boost | 1.75.0 | +| OpenSSL | 3.0.0 | +| go | 1.18.4 | +| cargo | 1.71.1 | + +为了省去使用者配置这些语言环境的麻烦,我们构建了一个 Docker 镜像,并在其中配置了所需要的环境,你可以按照下面的指令拉取使用 + +```bash +docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest +``` + +如果您熟悉 Dockerfile,也可以从`codefuseEval/docker/Dockerfile`构建镜像,或者修改之以定制自己的配置: + +```bash +cd codefuseEval/docker +docker build [OPTIONS] . +``` + +获取镜像后,使用如下命令创建容器: + +```bash +docker run -it --gpus all --mount type=bind,source=,target= [OPTIONS] +``` + +## 检查推理结果指令 + +我们提供脚本来检查所提供代码 LLM 的结果。请使用以下脚本检查相应的推理结果。 + +``` +bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python +bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-13B/humaneval_result_python.jsonl humaneval_python +``` + +## 如何使用 CodeFuseEval + +1. 下载模型并更新 ckpt config.json 中的当前模型信息。 主要更新对应型号和版本中的「path」参数。 +2. 运行以下生成命令以生成结果。 + +``` +bash codefuseEval/script/generation.sh MODELNAME MODELVERSION EVALDATASET OUTFILE + +eg: +bash codefuseEval/script/generation.sh CodeFuse-13B v1 humaneval_python result/test.jsonl +``` + +3. 运行以下评估命令来评估相应模型版本的生成结果。 + +``` +bash codefuseEval/script/evaluation.sh +eg: +bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python +``` + +## 评测说明 + +我们推荐使用给定的[评测环境](#评测环境)进行评测。在评测前,将生成的代码以如下 JSON 列表形式存储: + +``` +{"task_id": "../..", "generation: "..."} +{"task_id": "../..", "generation: "..."} +... +``` + +### 评测数据集 + +样本使用 JSON 列表格式存储在`codefuseEval/data`中,根据用户所需的下游任务情况,每条样本包含 + +- `task_id`: 题目的目标语言与 ID。语言为["Python", "Java", "JavaScript", "CPP", "Go"]中之一。 +- `prompt`: 函数声明与描述,用于代码生成。 +- `declaration`: 仅有函数声明,用于代码翻译。 +- `canonical_solution`: 手写的示例解答。 +- `test`: 隐藏测例,用于评测。 +- `example_test`: 公共测试样本,用于评估生成代码。 +- `prompt_text`: prompt 文本情况。 +- `prompt_explain`: prompt 信息说明。 +- `func_title`: 生成函数头信息。 +- `prompt_text_chinese`: 中文 prompt 信息。 + +### 评测指标 + +除了目前提供的[Codex](https://arxiv.org/abs/2107.03374) 中提出的无偏 pass@k 指标之外,我们还将 huggingface 开源的相关指标与[CodeBLEU](https://arxiv.org/abs/2009.10297)提出的相似性指标进行集成。 +目前建议用户主要使用的指标如下: + +- `codebleu`: codebleu 相似性评测指标。 +- `pass@k`: 无偏 pass@k 的评测指标。 +- `bleu`: 文本相似性指标 bleu +- `bleurt`: 文本语义相似性指标 bleurt +- `total_time_cost`: 基于被评数据集、模型推理总耗时 +- `Average time cost`: 基于被评数据集单个任务、模型推理平均耗时 + +### 评测命令: + +``` +bash codefuseEval/script/evaluation.sh +eg: +bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python +``` + +并在本仓库的根目录下使用如下指令(请谨慎执行,生成的代码可能有极低概率产生意外行为。在[execution.py]()中查看警告并取消执行代码的注释,风险自负): + +同时我们当前提供如下的标志位,可以直接将测试数据集中的示例解答作为生成答案带入进行测试。 + +- `TEST_GROUDTRUTH` 取值为 True 或 False + +当 TEST_GROUDTRUTH 为 True 时,开启 self-test 模式,将读取 PROBLEM_FILE,将示例解答作为生成答案代入进行测试。 +TEST_GROUDTRUTH 为 False 时,开启评测模式,读取 RESULT_FILE 和将读取 PROBLEM_FILE,将生成答案代入进行测试 + +## 更多信息 + +### 使用自己的数据集评估自己的模型 + +如果你想用自己的数据集评估自己的模型,可以参考以下步骤: + +1. 注册自己的数据集 + +- 下载评估数据集并存储在`codefuseEval/data`或其他目录中。 数据集必须是 jsonl 格式。 +- 针对于数据集路径、数据集任务模式 task_mode 和使用数据集后生成结果的代码语言情况,需要在`codefuseEval/util.py`中的`EVAL_DATASET`、`DATASET_SUPPORT`和`DATASET_LANGUAGE`变量中进行设置。 + +2. 注册你的评测模型 + +- 下载评估模型并存储在`codefuseEval/model`或其他目录中。 +- 在`codefuseEval/processor`包中编写评估模型处理器代码。 + +#### 处理适配器 + +我们设计了一个名为 Processor 的基础结构,用户可以自己根据推理模型的情况创建自己需要的处理器, 主要目的是为了处理不同模型的区别情况进行处理,主要需要完成 3 个抽象函数: + +``` +load_model_tokenizer: 由于模型加载参数的区别以及tokenizer的终止符的区别,模型需要使用不同的参数进行适配加载,当前函数主要是为了帮助用户加载适配不同的模型 +process_before:由于prompt根据用户不同的选择评测任务的类型或不同模型来适配不同的prompt样式,因此抽取出process_before函数主要用来帮助用户处理prompt +process_after:由于模型生成结果多样性,为了适配评测框架,方便生成结果数据可以拼接成合适的用例进行自动化运行,当前函数主要是根据任务类型和数据集情况,处理生成结果适配评测数据集和结果进行评测 +``` + +您可以在`codefuseEval/processor/base.py`中查看`BaseProcessor`情况,创建自己模型的处理器,并实现上述函数功能 + +- 在`ckpt_config.json`中设置信息模型。 举例如下 + +``` +{ + "CodeFuse-13B": { //模型名称 + "v1": { //模型版本 + "path": "/mnt/model/CodeFuse13B-evol-instruction-4K/", // 模型路径 + "processor_class": "codefuseEval.process.codefuse13b.Codefuse13BProcessor", // 模型处理器路径 + "tokenizer": { // 将prompt token化时tokenizer传入的参数 + "truncation": true, + "padding": true, + "max_length": 600 + }, + "generation_config": { //生成配置参数 + "greedy": { //如果是JsonObject,当前配置的是解码策略,可以通过设置下方「decode_mode」参数来加载生成配置参数中定义的不同的解码策略。 + "do_sample": false, + "num_beams": 1, + "max_new_tokens": 512 + }, + "beams": { + "do_sample": false, + "num_beams": 5, + "max_new_tokens": 600, + "num_return_sequences": 1 + }, + "dosample": { + "da_sample": true + }, + "temperature": 0.2, //如果不是 JsonObject,它是一个默认参数,我们将在 Generation_config 中设置默认值。 你可以通过读取解码策略中同名参数的方式覆盖当前参数的默认值。 + "max_new_tokens": 600, + "num_return_sequences": 1, + "top_p": 0.9, + "num_beams": 1, + "do_sample": true + }, + "batch_size": 1, // 单次生成的batch size大小 + "sample_num": 1, // 单条评测数据生成的样本数 + "decode_mode": "beams" // 选择在 Generation_config 中定义的解码模式 + } + } +``` + +### 检查数据集命令 + +为了检查评估数据集提供的参考值是否正确,我们提供以下命令来检查数据集,针对于已经集成的数据集情况,检查数据集的命令如下所示 + +代码补全 + +```bash +bash codefuseEval/script/check_dataset.sh humaneval_python + +bash codefuseEval/script/check_dataset.sh humaneval_java + +bash codefuseEval/script/check_dataset.sh humaneval_js + +bash codefuseEval/script/check_dataset.sh humaneval_rust + +bash codefuseEval/script/check_dataset.sh humaneval_go + +bash codefuseEval/script/check_dataset.sh humaneval_cpp +``` + +自然语言生成代码 + +```bash +bash codefuseEval/script/check_dataset.sh mbpp +``` + +代码翻译 + +``` +bash codefuseEval/script/check_dataset.sh codeTrans_python_to_java + +bash codefuseEval/script/check_dataset.sh codeTrans_python_to_cpp + +bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_java + +bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_python + +bash codefuseEval/script/check_dataset.sh codeTrans_java_to_python + +bash codefuseEval/script/check_dataset.sh codeTrans_java_to_cpp +``` + +科学计算 + +``` +bash codefuseEval/script/check_dataset.sh codeCompletion_matplotlib + +bash codefuseEval/script/check_dataset.sh codeCompletion_numpy + +bash codefuseEval/script/check_dataset.sh codeCompletion_pandas + +bash codefuseEval/script/check_dataset.sh codeCompletion_pytorch + +bash codefuseEval/script/check_dataset.sh codeCompletion_scipy + +bash codefuseEval/script/check_dataset.sh codeCompletion_sklearn + +bash codefuseEval/script/check_dataset.sh codeCompletion_tensorflow + +bash codefuseEval/script/check_dataset.sh codeInsertion_matplotlib + +bash codefuseEval/script/check_dataset.sh codeInsertion_numpy + +bash codefuseEval/script/check_dataset.sh codeInsertion_pandas + +bash codefuseEval/script/check_dataset.sh codeInsertion_pytorch + +bash codefuseEval/script/check_dataset.sh codeInsertion_scipy + +bash codefuseEval/script/check_dataset.sh codeInsertion_sklearn + +bash codefuseEval/script/check_dataset.sh codeInsertion_tensorflow +``` diff --git a/content/en/docs/overview/b4.MFTCoder.md b/docs/docs/developer-docs/MFTCoder/main/MFTCoder.en-US.md similarity index 89% rename from content/en/docs/overview/b4.MFTCoder.md rename to docs/docs/developer-docs/MFTCoder/main/MFTCoder.en-US.md index d9f51e9..a0a05ed 100644 --- a/content/en/docs/overview/b4.MFTCoder.md +++ b/docs/docs/developer-docs/MFTCoder/main/MFTCoder.en-US.md @@ -1,9 +1,20 @@ --- -title: "MFTCoder: High Accuracy and Efficiency Multi-task Fine-Tuning Framework" -slug: MFTCoder -description: 介绍主要功能 -aliases: -- "/docs/mftcoder" +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + index: true + order: -1 +title: MFTCoder +order: -1 +toc: content ---
    @@ -14,13 +25,10 @@ aliases:

    -[[中文]](/docs/mftcoder-zh) [**English**] -
    - - ## Contents + - [News](#News) - [Articles](#Articles) - [Introduction](#Introduction) @@ -30,8 +38,8 @@ aliases: - [Datasets](#Datasets) - [Star History](#Star-History) - ## News + 🔥🔥🔥 [2024/01/17] We released MFTCoder v0.3.0, mainly for MFTCoder-accelerate. It now supports new models like Mixtral(MoE), DeepSeek-coder, chatglm3. It supports FSDP as an option. It also supports Self-paced Loss as a solution for convergence balance in Multitask Fine-tuning. 🔥🔥🔥 [2024/01/17] [CodeFuse-DeepSeek-33B](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B) has been released, achieving a pass@1 (greedy decoding) score of 78.7% on HumanEval. It lists as top-1 LLM on Bigcode Leardboard in terms of win-rate, the official result is going to be published later. @@ -51,8 +59,9 @@ aliases: 🔥🔥 [2023/08/26]We released MFTCoder-v0.1.0 which supports finetuning Code Llama, Llama, Llama2, StarCoder, ChatGLM2, CodeGeeX2, Qwen, and GPT-NeoX models with LoRA/QLoRA. ### HumanEval Performance -| Model | HumanEval(Pass@1) | Date | -|:----------------------------|:-----------------:|:-------:| + +| Model | HumanEval(Pass@1) | Date | +| :------------------------------- | :---------------: | :-----: | | **CodeFuse-DeepSeek-33B** | **78.7%** | 2024/01 | | **CodeFuse-CodeLlama-34B** | **74.4%** | 2023/09 | | **CodeFuse-CodeLlama-34B-4bits** | **73.8%** | 2023/09 | @@ -69,8 +78,8 @@ aliases: | StarCoder-15B | 33.6% | 2023/05 | | QWen-14B | 32.3% | 2023/10 | - ## Articles + [MFT Arxiv paper](https://arxiv.org/abs/2311.02303) ## Introduction @@ -80,16 +89,19 @@ aliases: **MFTCoder** is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework. -In MFTCoder, we released two codebases for finetuning Large Language Models: -- **```MFTCoder-accelerate```** is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. We highly recommend you try this framework and make your fintuning accurate and efficient. -- ```MFTCoder-atorch``` is based on the [ATorch frameworks](https://github.com/intelligent-machine-learning/dlrover), which is a fast distributed training framework of LLM. +In MFTCoder, we released two codebases for finetuning Large Language Models: + +- **`MFTCoder-accelerate`** is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. We highly recommend you try this framework and make your fintuning accurate and efficient. +- `MFTCoder-atorch` is based on the [ATorch frameworks](https://github.com/intelligent-machine-learning/dlrover), which is a fast distributed training framework of LLM. The aim of this project is to foster collaboration and share advancements in large language models, particularly within the domain of code development. ### Frameworks -![img.jpg](/images/mftcoder/img.jpg) + +![img.jpg](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*p8ahSYMtrwsAAAAAAAAAAAAADlHYAQ/original) ### Highlights + :white_check_mark: **Multi-task**: Train models on multiple tasks while maintaining a balance between them. The models can even generalize to new, previously unseen tasks. :white_check_mark: **Multi-model**: It integrates state-of-the-art open-source models such as gpt-neox, llama, llama-2, baichuan, Qwen, chatglm2, and more. (These finetuned models will be released in the near future.) @@ -99,6 +111,7 @@ The aim of this project is to foster collaboration and share advancements in lar :white_check_mark: **Efficient fine-tuning**: It supports LoRA, QLoRA as well as Full-parameters training, enabling fine-tuning of large models with minimal resources. The training speed meets the demands of almost all fine-tuning scenarios. The main components of this project include: + - Support for both SFT (Supervised FineTuning) and MFT (Multi-task FineTuning). The current MFTCoder achieves data balance among multiple tasks, and future releases will achieve a balance between task difficulty and convergence speed during training. - Support for QLoRA instruction fine-tuning, LoRA fine-tuning as well as Full-parameters fine-tuning. - Support for most mainstream open-source large models, particularly those relevant to Code-LLMs, such as DeepSeek-coder, Mistral, Mixtral, Chatglm3, Code-LLaMA, Starcoder, Codegeex2, Qwen, GPT-Neox, and more. @@ -106,15 +119,17 @@ The main components of this project include: - Release of 2 high-quality code-related instruction fine-tuning datasets: [Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) and [CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k). - Release of many Code LLMs, please refer to organizations: [codefuse-ai on huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai on modelscope](https://modelscope.cn/organization/codefuse-ai). - ## Contributing + Contributions are welcome! If you have any suggestions, ideas, bug reports, or new model/feature supported, please open an issue or submit a pull request. ## Citation + If you find our work useful or helpful for your R&D works, please feel free to cite our paper as below. + ``` @article{mftcoder2023, - title={MFTCoder: Boosting Code LLMs with Multitask Fine-Tuning}, + title={MFTCoder: Boosting Code LLMs with Multitask Fine-Tuning}, author={Bingchang Liu and Chaoyu Chen and Cong Liao and Zi Gong and Huan Wang and Zhichao Lei and Ming Liang and Dajun Chen and Min Shen and Hailian Zhou and Hang Yu and Jianguo Li}, year={2023}, journal={arXiv preprint arXiv}, @@ -122,4 +137,3 @@ If you find our work useful or helpful for your R&D works, please feel free to c eprint={2311.02303} } ``` - diff --git a/docs/docs/developer-docs/MFTCoder/main/MFTCoder.zh-CN.md b/docs/docs/developer-docs/MFTCoder/main/MFTCoder.zh-CN.md new file mode 100644 index 0000000..cd5a8d2 --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/MFTCoder.zh-CN.md @@ -0,0 +1,128 @@ +--- +nav: + title: 文档 + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + index: true + order: -1 +title: MFTCoder +order: -1 +toc: content +--- + +
    + +

    + 🤗 HuggingFace + • 🤖 魔搭 +

    + +
    + +## 目录 + +- [新闻](#新闻) +- [文章](#文章) +- [项目简介](#项目简介) +- [环境](#环境) +- [训练](#训练) +- [模型](#模型) +- [数据集](#数据集) + +## 新闻 + +🔥🔥🔥 [2024/01/17] **MFTCoder-v0.3.0**发布。新增对 Mixtral(MoE), DeepSeek 等模型的支持;新增支持 FSDP(Fully Sharded Data Parallel);新增 Self-paced Loss, 支持多任务收敛均衡。 感兴趣详见微信公众号 CodeFuse 的文章[MFTCoder 重磅升级 v0.3.0 发布](https://mp.weixin.qq.com/s/xI3f0iUKq9TIIKZ_kMtcQg) + +🔥🔥🔥 [2024/01/17] 开源了[CodeFuse-DeepSeek-33B](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B)模型,在 HumanEval pass@1(greedy decoding)上可以达到 78.7%。该模型在 Big Code 榜单的结果近期发布,请关注公众号获取最新信息。 + +🔥🔥🔥 [2024/01/17] 开源了[CodeFuse-Mixtral-8x7B](https://huggingface.co/codefuse-ai/CodeFuse-Mixtral-8x7B)模型,在 HumanEval pass@1(greedy decoding)上可以达到 56.1%。感兴趣详见微信公众号 CodeFuse 的文章[MFTCoder 提升 Mixtral-8x7B 混合专家模型的代码能力实践](https://mp.weixin.qq.com/s/xI3f0iUKq9TIIKZ_kMtcQg) + +🔥🔥 [2023/11/07] [MFTCoder 论文](https://arxiv.org/abs/2311.02303)在 Arxiv 公布,介绍了多任务微调的技术细节。 + +🔥🔥 [2023/10/20] 开源了[CodeFuse-QWen-14B](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B)模型,在 HumanEval pass@1(greedy decoding)上可以达到 48.8%。相比较与基座模型 Qwen-14b 提升 16%。感兴趣详见微信公众号 CodeFuse[文章](https://mp.weixin.qq.com/s/PCQPkvbvfxSPzsqjOILCDw) + +🔥🔥 [2023/09/27] 开源了[CodeFuse-StarCoder-15B](https://huggingface.co/codefuse-ai/CodeFuse-StarCoder-15B)模型,在 HumanEval pass@1(greedy decoding)上可以达到 54.9%。 + +🔥🔥 [2023/09/26] [CodeFuse-CodeLlama-34B-4bits](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B-4bits)量化版本发布,量化后模型在 HumanEval pass@1 指标为 73.8% (贪婪解码)。 + +🔥🔥 [2023/09/07]MFTCoder 微调的模型**CodeFuse-CodeLlama-34B**在[HumanEval Benchmarks](https://github.com/openai/human-eval)的 Python **Pass@1** 取得了**74.4%**(greedy decoding)的开源 SOTA 成绩。 + +🔥🔥 [2023/08/26]MFTCoder-v0.1.0 支持使用 LoRA/QLoRA 对 Code Llama、Llama、Llama2、StarCoder、ChatGLM2、CodeGeeX2、Qwen 和 GPT-NeoX 模型进行微调。 + +### HumanEval 表现 + +| 模型 | HumanEval(Pass@1) | 日期 | +| :------------------------------- | :---------------: | :-----: | +| **CodeFuse-DeepSeek-33B** | **78.7%** | 2024/01 | +| **CodeFuse-CodeLlama-34B** | **74.4%** | 2023/09 | +| **CodeFuse-CodeLlama-34B-4bits** | **73.8%** | 2023/09 | +| WizardCoder-Python-34B-V1.0 | 73.2% | 2023/08 | +| GPT-4(zero-shot) | 67.0% | 2023/03 | +| PanGu-Coder2 15B | 61.6% | 2023/08 | +| **CodeFuse-Mixtral-8x7B** | **56.1%** | 2024/01 | +| **CodeFuse-StarCoder-15B** | **54.9%** | 2023/08 | +| CodeLlama-34b-Python | 53.7% | 2023/08 | +| **CodeFuse-QWen-14B** | **48.8%** | 2023/10 | +| CodeLlama-34b | 48.8% | 2023/08 | +| GPT-3.5(zero-shot) | 48.1% | 2022/11 | +| OctoCoder | 46.2% | 2023/08 | +| StarCoder-15B | 33.6% | 2023/05 | +| QWen-14B | 32.3% | 2023/10 | + +## 文章 + +🔥 [CodeFuse-MFTCoder 提升 CodeGeeX2-6B 代码能力](https://mp.weixin.qq.com/s/kWMtHIoe3ytN8pRVi_CHZg) + +🔥 [CodeFuse-MFTCoder 提升 Qwen-14B 代码能力](https://mp.weixin.qq.com/s/PCQPkvbvfxSPzsqjOILCDw) + +## 项目简介 + +**国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架;** + +**Codefuse-MFTCoder** 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。 + +### 项目框架 + +![img_1.jpg](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*zc9pRJ-hdZMAAAAAAAAAAAAADlHYAQ/original) + +### 项目优势 + +:white_check_mark: **多任务**:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去; + +:white_check_mark: **多模型**:支持最新的多个开源模型,包括 gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2 等; + +:white_check_mark: **多框架**:既支持主流开源的 Accelerate+DeepSpeed/FSDP,也支持新开源的[ATorch 框架](https://github.com/intelligent-machine-learning/dlrover); + +:white_check_mark: **高效微调**:支持 LoRA 和 QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景; + +本项目主要内容如下: + +- 同时支持单任务 SFT(Supervised FineTuning)和 MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等 +- 支持 QLoRA 低成本高效指令微调、LoRA 高效指令微调、全量参数高精度微调。 +- 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如 DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA 等。 +- 支持 lora 与 base model 进行权重合并,推理更便捷。 +- 整理并开源 2 个指令微调数据集:[Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k)和[CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k)。 +- 开源多个[Codefuse 系列指令微调模型权重],具体参见我们的 huggingface 组织和 modelscope 组织下的模型:[codefuse-ai huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai 魔搭](https://modelscope.cn/organization/codefuse-ai)。 + | + +## 引用 + +如果你觉得我们的工作对你有帮助,请引用我们的论文 + +``` +@article{mftcoder2023, + title={MFTCoder: Boosting Code LLMs with Multitask Fine-Tuning}, + author={Bingchang Liu and Chaoyu Chen and Cong Liao and Zi Gong and Huan Wang and Zhichao Lei and Ming Liang and Dajun Chen and Min Shen and Hailian Zhou and Hang Yu and Jianguo Li}, + year={2023}, + journal={arXiv preprint arXiv}, + archivePrefix={arXiv}, + eprint={2311.02303} +} +``` diff --git a/content/en/docs/mftcoder/3_accelerate.md b/docs/docs/developer-docs/MFTCoder/main/accelerate.en-US.md similarity index 75% rename from content/en/docs/mftcoder/3_accelerate.md rename to docs/docs/developer-docs/MFTCoder/main/accelerate.en-US.md index cd4977e..b5047a2 100644 --- a/content/en/docs/mftcoder/3_accelerate.md +++ b/docs/docs/developer-docs/MFTCoder/main/accelerate.en-US.md @@ -1,353 +1,381 @@ ---- -title: "MFTCoder-accelerate: Training Framework with Accelerate and DeepSpeed/FSDP" -description: 介绍主要功能 -url: /docs/mftcoder-accelerate -aliases: -- "/docs/mftcoder-accelerate" ---- - - -[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai) - - GitHub - - -[[中文]](/docs/mftcoder-accelerate-zh) [**English**] - -## 1. Updates - -🔥 MFTCoder-accelerate supports Full-parameters/LoRA using accelerate + FSDP Framework; - -🔥 MFTCoder-accelerate supports MFT/SFT on more new mainstream open-source base models: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; - -🔥 MFTCoder-accelerate supports Self-Paced Loss for Convergence Balance; - -🔥 MFTCoder-accelerate supports Full-parameters/QLoRA/LoRA using accelerate + DeepSpeed Framework; - -🔥 MFTCoder-accelerate supports Multitask Fine-Tuning(MFT), which is able to balance diffenrent tasks in data level. - -🔥 MFTCoder-accelerate supports finetuning most of mainstream open-source base models: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen. - -## 2. Data Format -### 2.1 Training Data Format -The training data is required to be a uniformed JSONL format, in which each line of data has the following "chatML"-style JSON format. The "chat_rounds" field is required, and other fields can be added or removed based on specific needs. -The reason why we selected "chatML" style as our training and inference data format is that "chatML" style is compatible with both "conversation" and "instruction/response" scenarios. - -For the keys of roles in "chat_rounds", you could use "system/human/bot" tuple or "system/user/assistant" tuple. - -```json -{ - "id":0, - "data_name":"code-helper", - "chat_rounds":[ - { - "role": "system", - "content": "You are a expert in coding and help answer code questions" - }, - { - "role": "human", - "content": "Write a python function of quick sort" - }, - { - "role": "bot", - "content": "Below is the function of quick sort: ..." - }, - { - "role": "human", - "content": "Explain the code" - }, - { - "role": "bot", - "content": "OK, this code ..." - } - ] -} -``` - -### 2.2 Default Inference Data Format -Inference data format is the real string format consumed by tokenizers and then LLMs. It is also the string format to which the training data is converted before tokenization. -The default inference data format contains strings concatenated by conversation data(system, human and bot contents) in the training data format. -It is used as the data "seen"(before tokenization) by the model in training process. -It is used as input during the inference process as well. -Here is an example format of the inference string: - -``` -""" -system -System instruction -human -User 1st round input -bot -Assistant 1st round output{EOS_TOKEN} -human -User 2nd round input -bot -Assistant 2nd round output{EOS_TOKEN} -... -... -... -human -User nth round input -bot -{Assistant output to be genreated}{EOS_TOKEN} -""" -``` -When applying inference, you always make your input string end with ```bot\n``` to request the model generating answers. - - - -## 3. Model Training -Currently, the "MFTCoder-accelerate" codebase supports Full-parameters/LoRA/QLoR along with Multi-Task FineTuning(MFT). -In theory, this project can be used to train any publicly available model in the HuggingFace Format. - -Here are some excellent pre-trained models weights available on Huggingface that can be finetuned with this codebase: - -🤗 [Latest code pre-trained SOTA, CodeLlama-34b-Python](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) : code-llama-34b, code-llama-34b-python, a new SOTA base model. - -🤗 [Best 10B level pre-trained Code LLM, Starcoder:](https://huggingface.co/bigcode/starcoder) wizardCoder-15B, PanGu-coder2, and other previous SOTA were trained on it. - -🤗 [Multilingual powerhouse, Qwen-7b](https://huggingface.co/Qwen/Qwen-7B): Suitable for multilingual tasks, including Chinese tasks, for instruction fine-tuning. - -**mftcoder_accelerate directory structure** -``` -mftcoder_accelerate - | - src - configs - | - data - | - model - | - *pefts* - | - tokenizer - | - utils - | - evals -``` -我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化, 详见```src```目录下的实现。 - -训练入口文件是```mftcoder_accelerate/src/pefts/mft_accelerate.py``` - -参数配置存储在```mftcoder_accelerate/src/configs```目录下,方便统一管理和更改。 - -**_所以,在你开启训练之前,请进入src目录_** -``` -cd mftcoder_accelerate/src -``` - -You can find the implementations in the ```mftcoder_accelerate/src``` directory. -The entry directory for fine-tuning training is ```mftcoder_accelerate/src```, and the entry file for training is ```mftcoder_accelerate/src/pefts/mft_accelerate.py```. -Configurations are stored in the ```mftcoder_accelerate/src/configs``` directory for easy management and modification. - -**_As a result, before you start training, you should first change your dir by_** -``` -cd mftcoder_accelerate/src -``` - -### 3.1 Tokenization -During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned before) and then tokenize it. - -In default format, ```human\n``` starts the user's input (i.e., prompt),```bot\n``` starts the assistant's output (i.e., response) - -```{EOS_TOKEN}``` represents the proper eos_token. -We have different eos_tokens in ```src/pefts/model_mapping.py``` which fits different base models. - -Here is a visionable example of the training data after formatting: -``` -f"human\n{input1}bot\n{target1}{EOS_TOKEN}\nhuman\n{input2}bot\ntarget2{EOS_TOKEN}\n" -``` -During the calculation of loss, we use a ```loss mask``` to ensure that the loss from the input part does not contribute to parameter updates. Only the loss from the ```target{EOS_TOKEN}``` part is used for updating parameters. -This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. -By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient. - - -### 3.2 LoRA/QLoRA - -#### Intro -You can refer to the Lora paper for details about LoRA:[LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf) - -You can refer to the Qlora paper for details about QLoRA:[QLORA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/pdf/2305.14314.pdf) - -QLoRA (Quantized LoRA) is a method that combines 4-bit nf4 quantization and additional adapters to achieve a balance between reducing GPU memory consumption and approaching the performance of full-parameter fine-tuning. - -According to the QLoRA paper, this method enables fine-tuning of a 33B model on a single V100 GPU while achieving performance close to that of full-parameter fine-tuning. - -To perform LoRA/QLoRA fine-tuning, you can execute the following command: - -#### Launch via Deepspeed -DeepSpeed config in accelerate_ds_config.yaml. -```bash -accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "DeepSpeed" -``` -or -DeepSpeed config in command line arguments -```bash -sh ds_single_launch.sh -``` - -#### Launch via FSDP -FSDP config in accelerate_fsdp_config.yaml. -```bash -accelerate launch --config_file accelerate_fsdp_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "FSDP" -``` -or -FSDP config in command line arguments -```bash -sh ds_single_launch.sh -``` - -#### Traing Arguments -All arguments allowed in ***_train_config.josn are defined in ```arguments.py```. - -Frequently used arguments are provided in ```configs/***_train_config``` and explained as follows. You can modify these parameters according to your needs: - -- **load_raw_dataset**: Need to be true at present. Only JSONL format is supported. - -- **data_paths**: Input data paths in a String of list format, e.g., "[path1,path2,path3]". Each path represents a task directory and each task directory contains one or more JSONL data files. - -- **output_dir**: Training output directory to store checkpoints, Lora adapter, etc. - -- **tb_dir**: TensorBoard directory to store logs, metrics, etc. - -- **model_type**: Type of the model to train, e.g., "mixtral | llama | starcoder | chatglm2 | qwen | gpt_neox". - -- **attn_implementation**: "flash_attention_2" or "eager" or "sdpa", worked when model is supported by transformers officially - -- **peft_type**: null or "lora" or "qlora". null for full-params training - -- **lora_rank**: Rank value for Lora. - -- **lora_alpha**: Alpha value for Lora. - -- **lora_dropout**: Dropout rate for Lora. - -- **target_modules**: List of target modules in lora, we have default values if None - -- **quantization**: "4bit" for QLoRA/ null for LoRA and Full-params training. - -- **pretrained_model_path**: Local/Shared disk path or model name on HuggingFace for the pre-trained model. - -- **weighted_loss_mode**: Loss weighting method for multitask training. "case3" is recommended at present, "self-paced" is supported but need tuning of hyperparameters. - -- **padding_mode**: The way tokenized data is set. "padding" means padding for each sample to seq_length, "pack" means putting samples into seq_length as many as possible. - -- **num_train_epochs**: Number of training epochs. - -- **per_device_train_batch_size**: Batch size per GPU for training. - -- **per_device_eval_batch_size**: Batch size per GPU for evaluation. - -- **gradient_accumulation_steps**: Number of gradient accumulation steps. Global batch size is calculated as num_gpus * per_device_train_batch_size * gradient_accumulation_steps. - -- **learning_rate**: Initial Learning rate. For full-parameter fine-tuning, it is recommended to use a smaller value such as 1e-5 or 5e-6. For QLoRA, a larger learning rate is generally used, such as 1e-4 or 2e-4. - -- **min_lr**: Minimum learning rate. Usually set to one-tenth of the learning rate. - -- **seq_length**: Maximum input sequence length during training. - -- **log_interval**: Log training loss every ```log_interval``` steps. - -- **checkpointing_steps**: Save a checkpoint every ```checkpointing_steps``` steps. - -- **evaluation_steps**: Evaluate on the validation set every ```evaluation_steps``` steps. - -- **early_stopping**: Enable early stopping or not. - -- **early_stopping_stall_num**: Number of evaluation points without improvement which triggers early stopping. - -- **lr_scheduler_type**: Type of learning rate scheduler. "cosine" is a good choice already. - -- **num_warmup_steps**: Number of warm-up steps to gradually increase the learning rate. - -- **seed**: Random seed for reproducibility. - -- **saving_limit**: ckpt saving limit num, must be set in Full-parameter training. - -- **role_markers**: {"system": "\system\n", "user": "\human\n", "assistant": "\bot\n} as default(null). You could set your preferred role_markers as the templates startting "system", "user" and "assistant". e.g. {"system": "### System:\n", "user": "### Instruction:\n", "assistant": "### Response:\n"} - - -## 4. Model Usage - -### 4.1 Merge Adaptor weights -Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. -To merge the adapter weights with the base model: -``` -python pefts/merge_base_and_lora_to_hf.py \ - --base_model_or_path model_path \ - --adaptor_path lora_adapter_path \ - --model_type model_type \ - --merged_output_path output_path -``` - -### 4.2 Inference demo -Here is the script for inference on models trained by MFTCoder since v0.3.0, which is compatible with most HuggingFace models: -```python -from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, -) -model_name_or_path = "codefuse-ai/CodeFuse-Deepseek-33B" -tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, padding_side="left") -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("<|end▁of▁sentence|>") -tokenizer.pad_token_id = tokenizer.eos_token_id -model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True) - -HUMAN_ROLE_START_TAG = "human\n" -BOT_ROLE_START_TAG = "bot\n" -texts = ["write a python function of quick sort."] -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] - -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") -outputs = model.generate( - inputs=inputs["input_ids"], - attention_mask=inputs["attention_mask"], - max_new_tokens=512, - top_p=0.95, - temperature=0.1, - do_sample=True, - eos_token_id=tokenizer.eos_token_id, - pad_token_id=tokenizer.pad_token_id - ) -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) -print(gen_text) -``` - - -Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model's generation output. -You can modify these parameters based on your specific use case. - -In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric: - -top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results. - -temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic. - -These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results. - -If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings: - -beam_num: Set a smaller value such as 1 or 3. ```beam_num=1``` represents greedy decoding, which selects the most probable single generated word. ```beam_num=3``` represents beam search mode, which considers multiple potential generation paths and chooses the best path among them. - -## 5. FAQ -#### Q1:What should I do when cuda OOM happens? -If OOM happened,you can reduce parameters such as per_device_train_batch_size and seq_length. Since you are dealing with large models (6B, 13B, 34B, 70B, etc.), you are already using gradient checkpointing technology by default, which significantly reduces GPU memory consumption. -However, this may slightly slow down the training speed. - -#### Q2:install packages -Please refer to init_env.sh and requirements.txt -We highly recommend you install Flash Attention 2 (flash_attn>=2.1.0, 2.3.6 used by us) first to get memory-efficient and fast training. - -#### Q3:How should I specify the GPUs for training? -You can specify the visiable GPUs as below: -```bash -CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json -``` - -#### Q4:Whats is a recommended Distributed Training? -For LoRA/QLoRA, we recommend DeepSpeed(ZeRO2) as the underlying framework, because it is easy and stable to use, moreover it is more compatable for different settings. -And FSDP does not support Quantization(integer type in training). - -For Full-parameter finetuning, FSDP is usually faster, and may help you with very large models by sharding parameters and gradients. \ No newline at end of file +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: Training Framework with Accelerate and DeepSpeed/FSDP +order: 2 +toc: content +--- + +[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai)  + +GitHub + + +## 1. Updates + +🔥 MFTCoder-accelerate supports Full-parameters/LoRA using accelerate + FSDP Framework; + +🔥 MFTCoder-accelerate supports MFT/SFT on more new mainstream open-source base models: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; + +🔥 MFTCoder-accelerate supports Self-Paced Loss for Convergence Balance; + +🔥 MFTCoder-accelerate supports Full-parameters/QLoRA/LoRA using accelerate + DeepSpeed Framework; + +🔥 MFTCoder-accelerate supports Multitask Fine-Tuning(MFT), which is able to balance diffenrent tasks in data level. + +🔥 MFTCoder-accelerate supports finetuning most of mainstream open-source base models: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen. + +## 2. Data Format + +### 2.1 Training Data Format + +The training data is required to be a uniformed JSONL format, in which each line of data has the following "chatML"-style JSON format. The "chat_rounds" field is required, and other fields can be added or removed based on specific needs. +The reason why we selected "chatML" style as our training and inference data format is that "chatML" style is compatible with both "conversation" and "instruction/response" scenarios. + +For the keys of roles in "chat_rounds", you could use "system/human/bot" tuple or "system/user/assistant" tuple. + +```json +{ + "id": 0, + "data_name": "code-helper", + "chat_rounds": [ + { + "role": "system", + "content": "You are a expert in coding and help answer code questions" + }, + { + "role": "human", + "content": "Write a python function of quick sort" + }, + { + "role": "bot", + "content": "Below is the function of quick sort: ..." + }, + { + "role": "human", + "content": "Explain the code" + }, + { + "role": "bot", + "content": "OK, this code ..." + } + ] +} +``` + +### 2.2 Default Inference Data Format + +Inference data format is the real string format consumed by tokenizers and then LLMs. It is also the string format to which the training data is converted before tokenization. +The default inference data format contains strings concatenated by conversation data(system, human and bot contents) in the training data format. +It is used as the data "seen"(before tokenization) by the model in training process. +It is used as input during the inference process as well. +Here is an example format of the inference string: + +``` +""" +system +System instruction +human +User 1st round input +bot +Assistant 1st round output{EOS_TOKEN} +human +User 2nd round input +bot +Assistant 2nd round output{EOS_TOKEN} +... +... +... +human +User nth round input +bot +{Assistant output to be genreated}{EOS_TOKEN} +""" +``` + +When applying inference, you always make your input string end with `bot\n` to request the model generating answers. + +## 3. Model Training + +Currently, the "MFTCoder-accelerate" codebase supports Full-parameters/LoRA/QLoR along with Multi-Task FineTuning(MFT). +In theory, this project can be used to train any publicly available model in the HuggingFace Format. + +Here are some excellent pre-trained models weights available on Huggingface that can be finetuned with this codebase: + +🤗 [Latest code pre-trained SOTA, CodeLlama-34b-Python](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) : code-llama-34b, code-llama-34b-python, a new SOTA base model. + +🤗 [Best 10B level pre-trained Code LLM, Starcoder:](https://huggingface.co/bigcode/starcoder) wizardCoder-15B, PanGu-coder2, and other previous SOTA were trained on it. + +🤗 [Multilingual powerhouse, Qwen-7b](https://huggingface.co/Qwen/Qwen-7B): Suitable for multilingual tasks, including Chinese tasks, for instruction fine-tuning. + +**mftcoder_accelerate directory structure** + +``` +mftcoder_accelerate + | + src + configs + | + data + | + model + | + *pefts* + | + tokenizer + | + utils + | + evals +``` + +我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化, 详见`src`目录下的实现。 + +训练入口文件是`mftcoder_accelerate/src/pefts/mft_accelerate.py` + +参数配置存储在`mftcoder_accelerate/src/configs`目录下,方便统一管理和更改。 + +**_所以,在你开启训练之前,请进入 src 目录_** + +``` +cd mftcoder_accelerate/src +``` + +You can find the implementations in the `mftcoder_accelerate/src` directory. +The entry directory for fine-tuning training is `mftcoder_accelerate/src`, and the entry file for training is `mftcoder_accelerate/src/pefts/mft_accelerate.py`. +Configurations are stored in the `mftcoder_accelerate/src/configs` directory for easy management and modification. + +**_As a result, before you start training, you should first change your dir by_** + +``` +cd mftcoder_accelerate/src +``` + +### 3.1 Tokenization + +During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned before) and then tokenize it. + +In default format, `human\n` starts the user's input (i.e., prompt),`bot\n` starts the assistant's output (i.e., response) + +`{EOS_TOKEN}` represents the proper eos_token. +We have different eos_tokens in `src/pefts/model_mapping.py` which fits different base models. + +Here is a visionable example of the training data after formatting: + +``` +f"human\n{input1}bot\n{target1}{EOS_TOKEN}\nhuman\n{input2}bot\ntarget2{EOS_TOKEN}\n" +``` + +During the calculation of loss, we use a `loss mask` to ensure that the loss from the input part does not contribute to parameter updates. Only the loss from the `target{EOS_TOKEN}` part is used for updating parameters. +This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. +By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient. + +### 3.2 LoRA/QLoRA + +#### Intro + +You can refer to the Lora paper for details about LoRA:[LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf) + +You can refer to the Qlora paper for details about QLoRA:[QLORA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/pdf/2305.14314.pdf) + +QLoRA (Quantized LoRA) is a method that combines 4-bit nf4 quantization and additional adapters to achieve a balance between reducing GPU memory consumption and approaching the performance of full-parameter fine-tuning. + +According to the QLoRA paper, this method enables fine-tuning of a 33B model on a single V100 GPU while achieving performance close to that of full-parameter fine-tuning. + +To perform LoRA/QLoRA fine-tuning, you can execute the following command: + +#### Launch via Deepspeed + +DeepSpeed config in accelerate_ds_config.yaml. + +```bash +accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "DeepSpeed" +``` + +or +DeepSpeed config in command line arguments + +```bash +sh ds_single_launch.sh +``` + +#### Launch via FSDP + +FSDP config in accelerate_fsdp_config.yaml. + +```bash +accelerate launch --config_file accelerate_fsdp_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "FSDP" +``` + +or +FSDP config in command line arguments + +```bash +sh ds_single_launch.sh +``` + +#### Traing Arguments + +All arguments allowed in \*\*\*\_train_config.josn are defined in `arguments.py`. + +Frequently used arguments are provided in `configs/***_train_config` and explained as follows. You can modify these parameters according to your needs: + +- **load_raw_dataset**: Need to be true at present. Only JSONL format is supported. + +- **data_paths**: Input data paths in a String of list format, e.g., "[path1,path2,path3]". Each path represents a task directory and each task directory contains one or more JSONL data files. + +- **output_dir**: Training output directory to store checkpoints, Lora adapter, etc. + +- **tb_dir**: TensorBoard directory to store logs, metrics, etc. + +- **model_type**: Type of the model to train, e.g., "mixtral | llama | starcoder | chatglm2 | qwen | gpt_neox". + +- **attn_implementation**: "flash_attention_2" or "eager" or "sdpa", worked when model is supported by transformers officially + +- **peft_type**: null or "lora" or "qlora". null for full-params training + +- **lora_rank**: Rank value for Lora. + +- **lora_alpha**: Alpha value for Lora. + +- **lora_dropout**: Dropout rate for Lora. + +- **target_modules**: List of target modules in lora, we have default values if None + +- **quantization**: "4bit" for QLoRA/ null for LoRA and Full-params training. + +- **pretrained_model_path**: Local/Shared disk path or model name on HuggingFace for the pre-trained model. + +- **weighted_loss_mode**: Loss weighting method for multitask training. "case3" is recommended at present, "self-paced" is supported but need tuning of hyperparameters. + +- **padding_mode**: The way tokenized data is set. "padding" means padding for each sample to seq_length, "pack" means putting samples into seq_length as many as possible. + +- **num_train_epochs**: Number of training epochs. + +- **per_device_train_batch_size**: Batch size per GPU for training. + +- **per_device_eval_batch_size**: Batch size per GPU for evaluation. + +- **gradient_accumulation_steps**: Number of gradient accumulation steps. Global batch size is calculated as num*gpus * per*device_train_batch_size * gradient_accumulation_steps. + +- **learning_rate**: Initial Learning rate. For full-parameter fine-tuning, it is recommended to use a smaller value such as 1e-5 or 5e-6. For QLoRA, a larger learning rate is generally used, such as 1e-4 or 2e-4. + +- **min_lr**: Minimum learning rate. Usually set to one-tenth of the learning rate. + +- **seq_length**: Maximum input sequence length during training. + +- **log_interval**: Log training loss every `log_interval` steps. + +- **checkpointing_steps**: Save a checkpoint every `checkpointing_steps` steps. + +- **evaluation_steps**: Evaluate on the validation set every `evaluation_steps` steps. + +- **early_stopping**: Enable early stopping or not. + +- **early_stopping_stall_num**: Number of evaluation points without improvement which triggers early stopping. + +- **lr_scheduler_type**: Type of learning rate scheduler. "cosine" is a good choice already. + +- **num_warmup_steps**: Number of warm-up steps to gradually increase the learning rate. + +- **seed**: Random seed for reproducibility. + +- **saving_limit**: ckpt saving limit num, must be set in Full-parameter training. + +- **role_markers**: {"system": "\system\n", "user": "\human\n", "assistant": "\bot\n} as default(null). You could set your preferred role_markers as the templates startting "system", "user" and "assistant". e.g. {"system": "### System:\n", "user": "### Instruction:\n", "assistant": "### Response:\n"} + +## 4. Model Usage + +### 4.1 Merge Adaptor weights + +Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. +To merge the adapter weights with the base model: + +``` +python pefts/merge_base_and_lora_to_hf.py \ + --base_model_or_path model_path \ + --adaptor_path lora_adapter_path \ + --model_type model_type \ + --merged_output_path output_path +``` + +### 4.2 Inference demo + +Here is the script for inference on models trained by MFTCoder since v0.3.0, which is compatible with most HuggingFace models: + +```python +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, +) +model_name_or_path = "codefuse-ai/CodeFuse-Deepseek-33B" +tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, padding_side="left") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("<|end▁of▁sentence|>") +tokenizer.pad_token_id = tokenizer.eos_token_id +model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True) + +HUMAN_ROLE_START_TAG = "human\n" +BOT_ROLE_START_TAG = "bot\n" +texts = ["write a python function of quick sort."] +texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] + +inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` + +Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model's generation output. +You can modify these parameters based on your specific use case. + +In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric: + +top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results. + +temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic. + +These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results. + +If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings: + +beam_num: Set a smaller value such as 1 or 3. `beam_num=1` represents greedy decoding, which selects the most probable single generated word. `beam_num=3` represents beam search mode, which considers multiple potential generation paths and chooses the best path among them. + +## 5. FAQ + +#### Q1:What should I do when cuda OOM happens? + +If OOM happened,you can reduce parameters such as per_device_train_batch_size and seq_length. Since you are dealing with large models (6B, 13B, 34B, 70B, etc.), you are already using gradient checkpointing technology by default, which significantly reduces GPU memory consumption. +However, this may slightly slow down the training speed. + +#### Q2:install packages + +Please refer to init_env.sh and requirements.txt +We highly recommend you install Flash Attention 2 (flash_attn>=2.1.0, 2.3.6 used by us) first to get memory-efficient and fast training. + +#### Q3:How should I specify the GPUs for training? + +You can specify the visiable GPUs as below: + +```bash +CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json +``` + +#### Q4:Whats is a recommended Distributed Training? + +For LoRA/QLoRA, we recommend DeepSpeed(ZeRO2) as the underlying framework, because it is easy and stable to use, moreover it is more compatable for different settings. +And FSDP does not support Quantization(integer type in training). + +For Full-parameter finetuning, FSDP is usually faster, and may help you with very large models by sharding parameters and gradients. diff --git a/docs/docs/developer-docs/MFTCoder/main/accelerate.zh-CN.md b/docs/docs/developer-docs/MFTCoder/main/accelerate.zh-CN.md new file mode 100644 index 0000000..09eba46 --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/accelerate.zh-CN.md @@ -0,0 +1,327 @@ +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: Accelerate + DeepSpeed/FSDP 框架篇 +order: 2 +toc: content +--- + +[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai)  + +GitHub + + +## 1. 更新 + +🔥 MFTCoder-accelerate 新增支持 accelerate + FSDP 框架, 支持全量微调和 LoRA; + +🔥 MFTCoder-accelerate 支持最新更多主流开源模型: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; + +🔥 MFTCoder-accelerate 新增 self-paced Loss, 用于收敛均衡; + +🔥 MFTCoder-accelerate 支持使用 accelerate + DeepSpeed 框架下支持 全量参数/QLoRA/LoRA 微调; + +🔥 MFTCoder-accelerate 在训练中支持了多任务微调 MFT, 可以同时平衡多个任务的训练,训练的模型支持多任务推理; + +🔥 MFTCoder-accelerate 在训练中支持多种模型基座: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen 等 + +## 2. 数据格式 + +### 2.1 训练数据格式 + +训练数据为 jsonl 格式,每一行的数据格式如下,其中 chat_rounds 字段是必需的,可以根据实际需求添加或删除其他字段。 +可以参考项目中的 xxx.jsonl 文件。 + +```json +{ + "id": 0, + "data_name": "code-helper", + "chat_rounds": [ + { + "role": "system", + "content": "你是一个智能代码助手,可以回复用户与代码相关的问题" + }, + { + "role": "human", + "content": "写一个快速排序" + }, + { + "role": "bot", + "content": "以下是一个快速排序算法xxxxxx" + }, + { + "role": "human", + "content": "解释一下这段代码" + }, + { + "role": "bot", + "content": "好的,这段代码xxx" + } + ] +} +``` + +### 2.2 推理数据格式 + +推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入 prompt 拼接的方式: + +``` +""" +system +这是System指令 +human +这是第1轮用户输入的问题 +bot +这是第1轮模型生成的内容{EOS_TOKEN} +human +这是第2轮用户输入的问题 +bot +这是第2轮模型生成的内容{EOS_TOKEN} +... +... +... +human +这是第n轮用户输入的问题 +bot +{模型现在要生成的内容}{EOS_TOKEN} +""" +``` + +## 3. 模型训练 + +目前支持全量参数(Full-parameters)指令微调、QLoRA 指令微调,LoRA 指令微调。 +一些优秀的代码预训练模型权重,理论上,HuggingFace 上开源的模型,均可使用本项目进行训练: + +🤗 [最新代码预训练 SOTA,CodeLlama](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) :code-llama-34b, code-llama-34b-python, 新的 SOTA 基座。 + +🤗 [10B 级别最佳代码预训练模型 Starcoder](https://huggingface.co/bigcode/starcoder) wizardCoder-15B, PanGu-coder2 等前 SOTA 的基座模型。 + +🤗 [多语言能手 Qwen-7b](https://huggingface.co/Qwen/Qwen-7B) :适用于多语言任务,也适用中文任务。进行指令微调时。 + +**mftcoder_accelerate 文件结构** + +``` +mftcoder_accelerate + | + src + configs + | + data + | + model + | + *pefts* + | + tokenizer + | + utils + | + evals +``` + +我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化, 详见`src`目录下的实现。 + +训练入口文件是`mftcoder_accelerate/src/pefts/mft_accelerate.py` + +参数配置存储在`mftcoder_accelerate/src/configs`目录下,方便统一管理和更改。 + +**_所以,在你开启训练之前,请进入 src 目录_** + +``` +cd mftcoder_accelerate/src +``` + +### 3.1 数据 tokenization + +训练时,我们将多轮对话拼接成如下格式(也是上文中的推理数据格式),然后进行 tokenize。 +其中,默认情况下: + +`human\n`作为 human/user 的起始符,`bot\n`作为 bot/assistant 的起始符,`{EOS_TOKEN}` 表示 eos_token。 +其中 eos_token 可以根据不同模型修改替换。不同角色的起始符可以配置,用来实现不同的对话/问答模版。 + +``` +"human\n{input1}bot\n{target1}{EOS_TOKEN}human\n{input2}bot\n{target2}{EOS_TOKEN}\n" +``` + +在计算 loss 时,我们通过 loss mask 的方式,input 部分的 loss 不参与参数更新,只有“target{EOS_TOKEN}”部分的 loss 参与参数更新。 +这种方式充分利用了模型并行计算的优势,训练更加高效,同时也充分利用了 decoder-only 模型从左到右 attention 的特性,一次性将多轮对话中的每个 target 部分都参与了训练,训练更充分高效。 + +### 3.2 LoRA/QLoRA 微调 + +#### LoRA/QLoRA 微调简介 + +关于 LoRA 的详细介绍可参考论文:[LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf) + +关于 QLoRA 的详细介绍可参考论文:[QLORA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/pdf/2305.14314.pdf) + +QLoRA 通过 4-bit 的 nf4 量化,且加入更多 adapter,在大幅减少显存消耗的同时,尽可能逼近全量参数微调的效果。 +QLoRA 论文指出,该方法可以在一张 V100 上对 33B 的模型进行微调,并且性能逼近全量参数微调。 + +执行如下命令即可进行 Lora/QLora/全量 微调: + +#### Launch via Deepspeed + +DeepSpeed 配置在 accelerate_ds_config.yaml 中。 + +```bash +accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "DeepSpeed" +``` + +或者 + +DeepSpeed 配置在脚本中通过命令行输入。 + +```bash +sh ds_single_launch.sh +``` + +#### Launch via FSDP + +FSDP 配置在 accelerate_fsdp_config.yaml 中。 + +```bash +accelerate launch --config_file accelerate_fsdp_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "FSDP" +``` + +或者 + +FSDP 配置在脚本中通过命令行输入。 + +```bash +sh fsdp_single_launch.sh +``` + +#### 训练参数 + +_**训练需要的参数配置在`configs/*_train_config`中,主要参数说明如下:**_ + +- **load_raw_dataset**: 需要保持 true,后续会支持其它模式数据,当前仅支持 jsonl 输入 +- **data_paths**: "[path1,path2,path3]" 输入数据地址,字符串,开头结尾用[],中间用`,`间隔不同 path,每个 path 是一个目录,目录的最后一级名字作为任务名称,下面包含 1 到多个 jsonl 数据 +- **output_dir**:训练输出目录,存储 checkpoint(全量训练时)、lora_adaptor(Lora 或者 Qlora 时)等 +- **tb_dir**: 存储 tensorboard 等 +- **model_type**: "mixtral|mistral|deepseek|llama|starcoder|chatglm2|qwen|gpt_neox" +- **attn_implementation**: "flash_attention_2" 或者 "eager" +- **peft_type**: lora 或者 qlora 或者 null(全量微调) +- **lora_rank**: lora rank +- **lora_alpha**: lora alpha +- **lora_dropout**: lora dropout +- **target_modules**: List[str], lora 目标模块,如果 null,会使用默认,参考 model_mapping.py +- **quantization**: 是否量化,"4bit", "8bit" 或者 null, qlora 推荐 4bit 量化 +- **pretrained_model_path**:预训练模型的本地目录,或者在 huggingface 上的模型名称。 +- **weighted_loss_mode**: 多任务 loss 加权模式, "case3"是当前推荐。 +- **padding_mode**: 数据的样本组织方式, "padding"是将每个原始样本填充到 seq_length, "pack"是将尽量多的样本打包到每个 seq_length 的序列中。 +- **num_train_epochs**:训练的轮次。如果数据量足够大,一般建议只训 1-2 个 epoch。 +- **per_device_train_batch_size**:每张显卡 train 的 batch size。 +- **per_device_eval_batch_size**:每张显卡 eval 的 batch size。 +- **gradient_accumulation_steps**:梯度累计步数。global batch=num*gpus * per*device_train_batch_size * gradient_accumulation_steps。 +- **learning_rate**:学习率。全量参数微调的时候,建议小一些,1e-5 或 5e-6。qlora 中的学习率设置更大一些,一般为 1e-4、2e-4。 +- **min_lr**: 最低学习率, 一般是 learning_rate 的十分之一 +- **seq_length**:训练时的最大长度。按照自己的设备进行设置,越长需要占用越多显存。 +- **log_interval**:每隔多少步统计一次 train loss。 +- **checkpointing_steps**:每隔多少步保存一个模型。 +- **evaluation_steps**:每隔多少步在验证集上 evaluate 一次。 +- **early_stopping** : 是否执行 early_stop +- **early_stopping_stall_num**: 多少个 eval point 不继续收敛,则停止训练 +- **lr_scheduler_type**:学习率变化策略。常用"cosine" +- **warmup_steps**:warm up 步数。学习率经过多少步,增长到指定的数值。 +- **seed**:随机种子,用于复现实验结果。 +- **saving_limit**:整数,ckpt 存储数量上限, 全量训练必须设置。默认 null 即不限制数量。 +- **role_markers**: null,即使用{"system": "\system\n", "user": "\human\n", "assistant": "\bot\n"}。 你可以自定义 "system", "user" and "assistant"的模板, 用于定制自己的问答或者对话模板,比如 {"system": "### System:\n", "user": "### Instruction:\n", "assistant": "### Response:\n"} + +## 4. 模型使用 + +### 4.1 权重合并 + +如果使用 LoRA 或者 QLoRA 进行训练,本项目仅保存 adapter 的权重和配置文件,需要将 adapter 权重与 base model 进行合并。 +可以使用如下 merge_base_and_lora_to_hf.py 脚本。 + +``` +python pefts/merge_base_and_lora_to_hf.py \ + --base_model_or_path model_path \ + --adaptor_path lora_adapter_path \ + --model_type model_type \ + --merged_output_path output_path +``` + +### 4.2 模型推理 + +我们提供了单轮对话和多轮对话的如下脚本,该脚本可同时兼容大部分 huggingface 格式的模型。 + +```python +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, +) +model_name_or_path = "codefuse-ai/CodeFuse-Deepseek-33B" +tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, padding_side="left") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("<|end▁of▁sentence|>") +tokenizer.pad_token_id = tokenizer.eos_token_id +model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True) + +HUMAN_ROLE_START_TAG = "human\n" +BOT_ROLE_START_TAG = "bot\n" +texts = ["write a python function of quick sort."] +texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] + +inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` + +生成脚本中的 top_p、temperature、repetition_penalty、do_sample 等参数对模型的生成效果影响较大,可按照自己的使用场景进行调试修改。 +实践中,在代码生成场景中,如果采样模式,do_sample=True, top_p=0.95, temperature=0.1 是 pass@1 指标的不错选择; +如果非采样模式, do_sample=False, beam_num=1 或者 3 是不错的选择,其中 beam_num=1 即为 greedy decoding。 + +## 5. FAQ + +#### 问题 1:OOM 如何解决? + +如果发生 OOM,可以缩小 per_device_train_batch_size、seq_length 等参数来缓解。由于面对的模型普遍较大(6b, 13b, 34b, 70b 等)我们已经默认使用 gradient_checkpointing 技术,可以大幅降低显存占用,但训练速度会稍慢一些。 + +#### 问题 2:安装包错误 + +参考 init_env.sh 和 requirements.txt + +#### 问题 3:如何指定使用某些卡训练? + +通过如下方式,即可指定使用 0 和 1 号卡进行训练: + +```bash +CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file pefts/accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "deepspeed" +``` + +#### 问题 4:关于 Flash Attention, 该如何配置训练? + +首先,我们强烈建议您安装 Flash Attention 2(FA2),(>=2.1.0, 2.3.6 功能更齐全)。 + +训练参数中"attn_implementation" 设置成 "eager" 可以用 naive attention,也就是未经加速的 attention。 + +训练参数中"attn_implementation" 设置成 "flash_attention_2" 可以用 FA2,速度快,省显存。 + +如果你可以自行安装环境并使用 torch>=2.1.1,可以尝试设置参数"attn_implementation"为 "sdpa"。这样会尝试使用 transformers 兼容的 torch.nn.functional.scaled_dot_product_attention。支持的模型还不全面。 + +#### 问题 5:推荐的分布式框架是怎样的? + +对于 LoRA/QLoRA, 我们推荐使用 DeepSpeed 作为底层分布式框架,它具有易用性和兼容性好的特点,并且速度很快。 +FSDP 不支持 QLoRA, 因为 bitsandbytes 暂不支持 FSDP。 + +对于全量微调,我们推荐使用 FSDP, 因为它在全量训练时可以发挥 fully sharding 的优势,达到更快的训练速度。 + +#### 问题 6:当前支持的模型中,有什么区别 + +国产大模型比如 chatglm2, chatglm3, baichuan2, qwen, aquila2 等,使用的是和模型共同发布的 modeling_xxx.py. +其它被 transformers 官方支持的大模型,由于已经升级支持 flash attention 等,所以全面切换到官方的 modeling 支持训练,之前的自定义 modeling 会被 deprecated diff --git a/docs/docs/developer-docs/MFTCoder/main/atorch.en-US.md b/docs/docs/developer-docs/MFTCoder/main/atorch.en-US.md new file mode 100644 index 0000000..acdff73 --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/atorch.en-US.md @@ -0,0 +1,260 @@ +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: Atorch Framework +order: 3 +toc: content +--- + +[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai)  + +GitHub + + +## 1. Updates + +🔥 MFTCoder supports fine-tuning of the GPTNeoX model under the Atorch framework. + +🔥 MFTCoder supports both fully supervised fine-tuning. + +🔥 MFTCoder supports LoRA using the Atorch Framework. + +## 2. Data Format + +### 2.1 Training Data Format + +The training data is in a uniformed JSONL format, in which each line of data has the following JSON format. The "chat_rounds" field is required, and other fields can be added or removed based on the specific need. + +```json +{ + "id": 0, + "data_name": "code-helper", + "chat_rounds": [ + { + "role": "system", + "content": "You are a expert in coding and help answer code questions", + "chat_round_id": 0 + }, + { + "role": "human", + "content": "Write a python function of quick sort", + "chat_round_id": 1 + }, + { + "role": "bot", + "content": "Below is the function of quick sort: ...", + "chat_round_id": 1 + }, + { + "role": "human", + "content": "Explain the code", + "chat_round_id": 2 + }, + { + "role": "bot", + "content": "OK, this code ...", + "chat_round_id": 2 + } + ] +} +``` + +### 2.2 Inference Data Format + +The inference data contains strings concatenated by conversation data(system, human and bot contents) in the training data format. +It is used as the data "seen"(before tokenization) by the model in training process. +It is used as input during the inference process as well. +Here is an example format of the concatenated string: + +```python +""" +<|role_start|>system<|role_end|>System instruction +<|role_start|>human<|role_end|>Human 1st round input +<|role_start|>bot<|role_end|>Bot 1st round output +<|role_start|>human<|role_end|>Human 2nd round input +<|role_start|>bot<|role_end|>Bot 2nd round output +... +... +... +<|role_start|>human<|role_end|>Human nth round input +<|role_start|>bot<|role_end|>{Bot output to be genreated} +""" +``` + +When applying inference, you always make your input string end with "<|role_start|>bot<|role_end|>" to request the model generating answers. + +## 3. Model Training + +Currently, the "MFTCoder/mft_atorch" code repository supports fully instruction fine-tuning, and LoRA instruction fine-tuning. Only the training of the GPTNeoX model is supported. In theory, the pretrained weights of the GPTNeoX model available on HuggingFace can be used for training within this project. + +We have extracted various components used in training to facilitate future extension and optimization. Please refer to the implementation in the main directory for more details. The entry directory for fine-tuning training is `train/`, and the entry file for training is `train/run_train.py`. The parameter configurations are stored in the launch scripts such as `train/run_gpt_*.sh`, making it easier to manage and modify them uniformly. + +### 3.1 Tokenization + +During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned earlier) and then tokenize it. In this format, <|role_start|>human<|role_end|> represents the human input (i.e., prompt), <|role_start|>bot<|role_end|> represents the bot output, and represents the eos_token. +You can modify and replace the eos_token based on different models' requirements. + +Here is an example of the concatenated format with prompts: + +``` +"<|role_start|>human<|role_end|>input1target1input2target2... +``` + +During the calculation of loss, we use a `loss mask` to ensure that the loss from the input part does not contribute to the parameter updates. Only the loss from the `target` part is used for updating parameters. +This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. +By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient. + +### 3.2 Fully Supervised Fine-Tuning (SFT) + +To perform fully SFT, you can execute the following command: + +```bash +sh run_gpt_mft.sh 10 1 8 5 +``` + +Please note that the four parameters after the launch script have the following meanings: + +- The first parameter is the per GPU batch size. +- The second parameter is the number of tensor parallelism (currently only supports 1). +- The third parameter is the number of data parallelism, which should match the number of GPUs used. +- The fourth parameter is the number of training epochs. + +For other training modes, the same four parameters need to be configured in the launch script. + +### 3.3 LoRA Supervised Fine-Tuning + +To perform LoRA SFT, you can execute the following command: + +```bash +sh run_gpt_mft_peft.sh 10 1 8 5 +``` + +### 3.4 Parameter Explanations + +The main parameter explanations for the `train/run_gpt_*.sh` are as follows. You can modify these parameters according to your needs: + +- **tokenize_mode**: Need to be 'sft' at present. + +- **train_mode**: Need to be 'sft' at present. + +- **load_raw_dataset**: Need to be 'True' at present. Only JSONL format is supported. + +- **data_paths**: "[path1,path2,path3]" Input data addresses, a string enclosed in [], with different paths separated by commas (,). Each path is a directory where the last level of the directory name is considered as the task name. Each task directory contains 1 to multiple jsonl data files. + +- **output_dir**: Training output directory to store checkpoints, lora_adaptor checkpoints, etc. + +- **tensorboard_dir**: Can be temporarily ignored, as the actual tensorboard is stored in the runs directory under output_dir. + +- **model_type**: Currently only supports gpt_neox. + +- **peft_type**: Currently only supports lora. + +- **pretrained_model_path**: Local directory of the pre-trained model. + +- **total_train_batch_size**: The total batch size for training across all GPUs, calculated automatically based on per gpu batch size entered in the script. + +- **per_device_valid_batch_size**: The batch size for evaluation on each GPU, calculated automatically based on per gpu batch size entered in the script. + +- **gradient_accumulation_steps**: Number of gradient accumulation steps. Global batch size = num*gpus * per*device_train_batch_size * gradient_accumulation_steps. + +- **checkpoint_activations**: Enable if running out of GPU memory. Trades time for space by not caching activation states, resulting in two forward passes to save memory. + +- **learning_rate**: Learning rate. When fine-tuning the entire model, it is recommended to use a smaller value, such as 1e-5 or 5e-6. For lora, a larger learning rate is generally used, such as 1e-4 or 2e-4. + +- **min_lr**: Minimum learning rate, usually one-tenth of the learning_rate. + +- **seq_length**: Maximum length during training. Set according to your device, longer lengths require more memory. + +- **log_interval**: Frequency of logging training loss. + +- **checkpointing_steps**: Frequency of saving a model checkpoint. + +- **evalation_steps**: Frequency of evaluating on the validation set. + +- **early_stopping_patience**: Number of consecutive eval points without further convergence to stop training. + +- **lr_scheduler_type**: Learning rate changing strategy. + +- **num_warmup_steps**: Number of warm-up steps for the learning rate to increase to the specified value. + +- **seed**: Random seed used for reproducibility of experimental results. + +- **train_iters**: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats. + +- **valid_iters**: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats. + +- **evaluation_strategy**: Evaluation strategy during training. "steps" means to evaluate every "valid_interval" steps, "epoch" means to evaluate every epoch. Both can be enabled simultaneously. + +- **save_strategy**: Strategy for saving model weights during training. "steps" means to save every "checkpointing_steps" steps. +- **extra_save_by_epoch**: Whether to save an epoch-level checkpoint every epoch. + +- **save_total_limit**: Maximum number of model checkpoints to keep. Generally set to 2, retaining the checkpoint with the lowest valid loss and the latest checkpoint. Note that epoch-level checkpoints will always be retained and are not subject to this limit. + +- **weighted_loss_mode**: Loss weighting method for multi-task training. + +## 4. Model Usage + +### 4.1 Merge Adaptor weights + +Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. +To merge the adapter weights with the base model, see `src/pefts/merge_base_and_lora_to_hf.py` + +### 4.2 Inference demo + +Here is the script for inference on our trained models, which is compatible with most Hugging Face models: + +```python +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, +) +tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False) +tokenizer.padding_side = "left" +tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") +model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True) + +HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>" +BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>" +texts = ["write a python function of quick sort."] +texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] + +inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` + +Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model's generation output. +You can modify these parameters based on your specific use case. + +In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric: + +top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results. + +temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic. + +These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results. + +If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings: + +beam_num: Set a smaller value such as 1 or 3. `beam_num=1` represents greedy decoding, which selects the most probable single generated word. `beam_num=3` represents beam search mode, which considers multiple potential generation paths and chooses the best path among them. + +## 5. FAQ + +### Q1:What should I do when cuda OOM happens? + +If OOM (Out of Memory) occurs, you can mitigate it by reducing parameters such as per GPU batch size (the first argument when starting the training script) and seq_length. You can also set gradient_checkpointing=true, which significantly reduces memory usage but may slow down the training speed. diff --git a/docs/docs/developer-docs/MFTCoder/main/atorch.zh-CN.md b/docs/docs/developer-docs/MFTCoder/main/atorch.zh-CN.md new file mode 100644 index 0000000..d34a2f4 --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/atorch.zh-CN.md @@ -0,0 +1,243 @@ +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: Atorch框架篇 +order: 2 +toc: content +--- + +[![Generic badge](https://img.shields.io/badge/🤗-Huggingface%20Repo-green.svg)](https://huggingface.co/codefuse-ai)  + +GitHub + + +## 1. 更新 + +🔥 MFTCoder 在 Atorch 框架下支持 GPTNeoX 模型的微调; + +🔥 MFTCoder 支持全量的有监督微调; + +🔥 MFTCoder 支持 LoRA 微调; + +## 2. 数据格式 + +### 2.1 训练数据格式 + +训练数据为 jsonl 格式,每一行的数据格式如下,其中 chat_rounds 字段是必需的,可以根据实际需求添加或删除其他字段。 +可以参考项目中的 xxx.jsonl 文件。 + +```json +{ + "id": 0, + "data_name": "code-helper", + "chat_rounds": [ + { + "role": "system", + "content": "你是一个智能代码助手,可以回复用户与代码相关的问题", + "chat_round_id": 0 + }, + { + "role": "human", + "content": "写一个快速排序", + "chat_round_id": 1 + }, + { + "role": "bot", + "content": "以下是一个快速排序算法xxxxxx", + "chat_round_id": 1 + }, + { + "role": "human", + "content": "解释一下这段代码", + "chat_round_id": 2 + }, + { + "role": "bot", + "content": "好的,这段代码xxx", + "chat_round_id": 2 + } + ] +} +``` + +### 2.2 推理数据格式 + +推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入 prompt 拼接的方式: + +```python +""" +<|role_start|>system<|role_end|>这是System指令 +<|role_start|>human<|role_end|>这是第1轮用户输入的问题 +<|role_start|>bot<|role_end|>这是第1轮模型生成的内容 +<|role_start|>human<|role_end|>这是第2轮用户输入的问题 +<|role_start|>bot<|role_end|>这是第2轮模型生成的内容 +... +... +... +<|role_start|>human<|role_end|>这是第n轮用户输入的问题 +<|role_start|>bot<|role_end|>{模型现在要生成的内容} +""" +``` + +## 3. 模型训练 + +目前 "MFTCoder/mft_atorch" 代码库支持全量参数指令微调和 LoRA 指令微调。 +目前仅支持 GPTNeoX 模型的训练,理论上,HuggingFace 上开源的 GPTNeoX 模型权重,均可使用本项目进行训练。 + +我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化,详见主目录下的实现。微调训练的入口目录是`train/`, 训练入口文件是`train/run_train.py`, 参数配置存储在启动脚本`train/run_gpt_*.sh`等文件中,方便统一管理和更改。 + +### 3.1 数据格式 + +训练时,我们将多轮对话拼接成如下格式,然后进行 tokenize。其中<|role_start|>human<|role_end|>表示 human 输入提示符,<|role_start|>bot<|role_end|>表示 bot 输出提示符,`` 表示 eos_token。 + +``` +"<|role_start|>human<|role_end|>input1target1input2target2... +``` + +在计算 loss 时,我们通过 mask 的方式,input 部分的 loss 不参与参数更新,只有“target”部分的 loss 参与参数更新。 +这种方式充分利用了模型并行计算的优势,训练更加高效,且多轮对话中的每个 target 部分都参与了训练,训练更充分。 +否则,就需要把一个 n 轮对话,拆分成 n 条数据,且只计算最后一个 target 的 loss,大大降低了训练效率。 + +### 3.2 全量 SFT + +执行如下命令即可进行全量 SFT: + +```bash +sh run_gpt_mft.sh 10 1 8 5 +``` + +需注意,启动脚本后的四个参数,分别是: + +- 第一个参数是总的 per gpu batch size +- 第二个参数是 tensor parallel 数(暂时只支持 1) +- 第三个参数是 data parallel 数,与所用 GPU 数保持一致 +- 第四个参数是训练 epoch 数 + +后面其他的训练方式启动脚本,也同样需要配置这四个参数 + +### 3.3 LoRA 微调 + +执行如下命令即可进行 Lora 微调: + +```bash +sh run_gpt_mft_peft.sh 10 1 8 5 +``` + +### 3.4 启动脚本中主要参数说明 + +`train/run_gpt_*.sh`中的主要参数说明如下,以下参数可以根据需求进行修改,其他参数建议不做修改: + +- tokenize_mode: 目前仅支持"sft"。 + +- train_mode: 目前仅支持"sft"。 + +- load_raw_dataset: 需要保持"True",后续会支持其它模式数据,当前仅支持 jsonl 输入 + +- data_paths: "[path1,path2,path3]" 输入数据地址,字符串,开头结尾用[],中间用`,`间隔不同 path,每个 path 是一个目录,目录的最后一级名字作为任务名称,下面包含 1 到多个 jsonl 数据。 + +- output_dir: 训练输出目录,存储 checkpoint、lora_adaptor checkpoint 等。 + +- tensorboard_dir: 可以暂时忽略,实际 tensorboard 存储在 output_dir 的 runs 目录下。 + +- model_type: 目前仅支持 gpt_neox。 + +- peft_type: 目前仅支持 lora。 + +- pretrained_model_path: 预训练模型的本地目录。 + +- total_train_batch_size: 所有显卡 train 的 batch size 的总和,会根据启动脚本时输入的 per gpu batch size 自动计算。 + +- per_device_valid_batch_size: 每张显卡 eval 的 batch size,会根据启动脚本时输入的 per gpu batch size 自动计算。 + +- gradient*accumulation_steps: 梯度累计步数。global batch=num_gpus * per*device_train_batch_size * gradient_accumulation_steps。 + +- checkpoint_activations: 如果显存捉襟见肘,可以开启。以时间换空间,模型不缓存激活状态,会进行两次 forward 计算,以节省显存。 + +- learning_rate: 学习率。全量参数微调的时候,建议小一些,1e-5 或 5e-6。qlora 中的学习率设置更大一些,一般为 1e-4、2e-4。 + +- min_lr: 最低学习率, 一般是 learning_rate 的十分之一。 + +- seq_length: 训练时的最大长度。按照自己的设备进行设置,越长需要占用越多显存。 + +- log_interval: 每隔多少步统计一次 train loss。 + +- checkpointing_steps: 每隔多少步保存一个模型。 + +- evalation_steps: 每隔多少步在验证集上 evaluate 一次。 + +- early_stopping_patience: 多少个 eval point 不继续收敛,则停止训练。 + +- lr_scheduler_type: 学习率变化策略。 + +- num_warmup_steps: warm up 步数,学习率经过多少步,增长到指定的数值。 + +- seed: 随机种子,用于复现实验结果。 + +- train_iters: 可以暂时设为比较小的数,如 10,实际上不会影响训练步数,留作后面拓展读取其他形式数据集的功能。 + +- valid_iters: 可以暂时设为比较小的数,如 10,实际上不会影响训练步数,留作后面拓展读取其他形式数据集的功能。 + +- evaluation_strategy: 训练期间 evaluate 的策略,"steps"表示每隔"valid_interval"步做一次 evaluate,"epoch"表示每隔一个 epoch 做一次 evaluate,支持同时开启。 + +- save_strategy: 训练期间保存模型权重的策略,"steps"表示每隔"checkpointing_steps"步保存一次。 + +- extra_save_by_epoch: 每过一个 epoch 是否要保存一个 epoch 级别的 checkpoint。 + +- save_total_limit: 最多保留的模型 checkpoint 个数,一般设置为 2,会保留 valid loss 最低,以及最新的 checkpoint,注意 epoch 级别的 checkpoint 会一直保留,且不受限制。 + +- weighted_loss_mode: 多任务训练的 loss 加权方式。 + +## 4. 模型使用 + +### 4.1 权重合并 + +如果使用 LoRA 进行训练,本项目仅保存 adapter 的权重和配置文件,需要将 adapter 权重与 base model 进行合并。脚本见`utils/merge_base_and_lora_to_hf.py` + +### 4.2 模型推理 + +我们提供了单轮对话和多轮对话的如下脚本,该脚本可同时兼容大部分 huggingface 格式的模型。 + +```python +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, +) +tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False) +tokenizer.padding_side = "left" +tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") +model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True) + +HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>" +BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>" +texts = ["write a python function of quick sort."] +texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts] + +inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` + +生成脚本中的 top_p、temperature、repetition_penalty、do_sample 等参数对模型的生成效果影响较大,可按照自己的使用场景进行调试修改。 +实践中,在代码生成场景中,如果采样模式,do_sample=True, top_p=0.95, temperature=0.1 是 pass@1 指标的不错选择; +如果非采样模式, do_sample=False, beam_num=1 或者 3 是不错的选择,其中 beam_num=1 即为 greedy decoding。 + +## 5. FAQ + +#### 问题 1:OOM 如何解决? + +如果发生 OOM,可以缩小 per GPU batch size (启动训练脚本时的第一个参数)、seq_length 等参数来缓解。也可以设 gradient_checkpointing=true,可以大幅降低显存占用,但训练速度会变慢一些。 diff --git a/content/en/docs/mftcoder/1_introduction.md b/docs/docs/developer-docs/MFTCoder/main/introduction.en-US.md similarity index 80% rename from content/en/docs/mftcoder/1_introduction.md rename to docs/docs/developer-docs/MFTCoder/main/introduction.en-US.md index 5bd6e08..58fda12 100644 --- a/content/en/docs/mftcoder/1_introduction.md +++ b/docs/docs/developer-docs/MFTCoder/main/introduction.en-US.md @@ -1,44 +1,48 @@ ---- -title: Introduction -slug: Introduction -description: Introduction Document -url: /docs/mftcoder-introduction -aliases: -- "/docs/mftcoder-introduction" ---- - - - -## Introduction - -**High Accuracy and efficiency Multi-task Fine-tuning framework for Code LLMs.** - -**MFTCoder** is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). -Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework. - -In MFTCoder, we released two codebases for finetuning Large Language Models: -- **```MFTCoder-accelerate```** is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. We highly recommend you try this framework and make your fintuning accurate and efficient. -- ```MFTCoder-atorch``` is based on the [ATorch frameworks](https://github.com/intelligent-machine-learning/dlrover), which is a fast distributed training framework of LLM. - -The aim of this project is to foster collaboration and share advancements in large language models, particularly within the domain of code development. - -### Frameworks -![img.jpg](/images/mftcoder/img.jpg) - -### Highlights -:white_check_mark: **Multi-task**: Train models on multiple tasks while maintaining a balance between them. The models can even generalize to new, previously unseen tasks. - -:white_check_mark: **Multi-model**: It integrates state-of-the-art open-source models such as gpt-neox, llama, llama-2, baichuan, Qwen, chatglm2, and more. (These finetuned models will be released in the near future.) - -:white_check_mark: **Multi-framework**: It provides support for both Accelerate (with Deepspeed and FSDP) and ATorch - -:white_check_mark: **Efficient fine-tuning**: It supports LoRA, QLoRA as well as Full-parameters training, enabling fine-tuning of large models with minimal resources. The training speed meets the demands of almost all fine-tuning scenarios. - -The main components of this project include: -- Support for both SFT (Supervised FineTuning) and MFT (Multi-task FineTuning). The current MFTCoder achieves data balance among multiple tasks, and future releases will achieve a balance between task difficulty and convergence speed during training. -- Support for QLoRA instruction fine-tuning, LoRA fine-tuning as well as Full-parameters fine-tuning. -- Support for most mainstream open-source large models, particularly those relevant to Code-LLMs, such as DeepSeek-coder, Mistral, Mixtral, Chatglm3, Code-LLaMA, Starcoder, Codegeex2, Qwen, GPT-Neox, and more. -- Support for weight merging between the LoRA adaptor and base models, simplifying the inference process. -- Release of 2 high-quality code-related instruction fine-tuning datasets: [Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) and [CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k). -- Release of many Code LLMs, please refer to organizations: [codefuse-ai on huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai on modelscope](https://modelscope.cn/organization/codefuse-ai). - +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: Introduction +order: 0 +toc: content +--- + +## Introduction + +**High Accuracy and efficiency Multi-task Fine-tuning framework for Code LLMs.** + +**MFTCoder** is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). +Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework. + +In MFTCoder, we released two codebases for finetuning Large Language Models: + +- **`MFTCoder-accelerate`** is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. We highly recommend you try this framework and make your fintuning accurate and efficient. +- `MFTCoder-atorch` is based on the [ATorch frameworks](https://github.com/intelligent-machine-learning/dlrover), which is a fast distributed training framework of LLM. + +The aim of this project is to foster collaboration and share advancements in large language models, particularly within the domain of code development. + +### Frameworks + +![img.jpg](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*p8ahSYMtrwsAAAAAAAAAAAAADlHYAQ/original) + +### Highlights + +:white_check_mark: **Multi-task**: Train models on multiple tasks while maintaining a balance between them. The models can even generalize to new, previously unseen tasks. + +:white_check_mark: **Multi-model**: It integrates state-of-the-art open-source models such as gpt-neox, llama, llama-2, baichuan, Qwen, chatglm2, and more. (These finetuned models will be released in the near future.) + +:white_check_mark: **Multi-framework**: It provides support for both Accelerate (with Deepspeed and FSDP) and ATorch + +:white_check_mark: **Efficient fine-tuning**: It supports LoRA, QLoRA as well as Full-parameters training, enabling fine-tuning of large models with minimal resources. The training speed meets the demands of almost all fine-tuning scenarios. + +The main components of this project include: + +- Support for both SFT (Supervised FineTuning) and MFT (Multi-task FineTuning). The current MFTCoder achieves data balance among multiple tasks, and future releases will achieve a balance between task difficulty and convergence speed during training. +- Support for QLoRA instruction fine-tuning, LoRA fine-tuning as well as Full-parameters fine-tuning. +- Support for most mainstream open-source large models, particularly those relevant to Code-LLMs, such as DeepSeek-coder, Mistral, Mixtral, Chatglm3, Code-LLaMA, Starcoder, Codegeex2, Qwen, GPT-Neox, and more. +- Support for weight merging between the LoRA adaptor and base models, simplifying the inference process. +- Release of 2 high-quality code-related instruction fine-tuning datasets: [Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) and [CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k). +- Release of many Code LLMs, please refer to organizations: [codefuse-ai on huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai on modelscope](https://modelscope.cn/organization/codefuse-ai). diff --git a/docs/docs/developer-docs/MFTCoder/main/introduction.zh-CN.md b/docs/docs/developer-docs/MFTCoder/main/introduction.zh-CN.md new file mode 100644 index 0000000..49730ae --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/introduction.zh-CN.md @@ -0,0 +1,40 @@ +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: 基本介绍 +order: 0 +toc: content +--- + +## 项目简介 + +**国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架;** + +**Codefuse-MFTCoder** 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。 + +### 项目框架 + +![img_1.jpg](https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*zc9pRJ-hdZMAAAAAAAAAAAAADlHYAQ/original) + +### 项目优势 + +:white_check_mark: **多任务**:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去; + +:white_check_mark: **多模型**:支持最新的多个开源模型,包括 gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2 等; + +:white_check_mark: **多框架**:既支持主流开源的 Accelerate+DeepSpeed/FSDP,也支持新开源的[ATorch 框架](https://github.com/intelligent-machine-learning/dlrover); + +:white_check_mark: **高效微调**:支持 LoRA 和 QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景; + +本项目主要内容如下: + +- 同时支持单任务 SFT(Supervised FineTuning)和 MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等 +- 支持 QLoRA 低成本高效指令微调、LoRA 高效指令微调、全量参数高精度微调。 +- 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如 DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA 等。 +- 支持 lora 与 base model 进行权重合并,推理更便捷。 +- 整理并开源 2 个指令微调数据集:[Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k)和[CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k)。 +- 开源多个[Codefuse 系列指令微调模型权重],具体参见我们的 huggingface 组织和 modelscope 组织下的模型:[codefuse-ai huggingface](https://huggingface.co/codefuse-ai) or [codefuse-ai 魔搭](https://modelscope.cn/organization/codefuse-ai)。 diff --git a/docs/docs/developer-docs/MFTCoder/main/quickstart.en-US.md b/docs/docs/developer-docs/MFTCoder/main/quickstart.en-US.md new file mode 100644 index 0000000..7b84901 --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/quickstart.en-US.md @@ -0,0 +1,58 @@ +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: QuickStart +order: 1 +toc: content +--- + +## Requirements + +To begin, ensure that you have successfully installed CUDA (version >= 11.4, preferably 11.7) along with the necessary drivers. Additionally, make sure you have installed torch (version 2.0.1). + +Next, we have provided an init_env.sh script to simplify the installation of required packages. Execute the following command to run the script: + +```bash +sh init_env.sh +``` + +We highly recommend training with flash attention(version >= 2.1.0, preferably 2.3.6), please refer to the following link for installation instructions: https://github.com/Dao-AILab/flash-attention + +## Training + +As mentioned above, we open source two training frameworks. You could refer to their own READMEs for more details as followed. + +If you are familiar with open source `transformers`, `DeepSpeed` or `FSDP`, we highly recommend you try: + +🚀🚀 [**MFTCoder-accelerate: Accelerate + Deepspeed/FSDP Codebase for MFT(Multi-task Finetuning)**](./accelerate.en-US.md) + +If you want to explore some new framework like atorch, you could check: + +🚀 [MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning)](./atorch.en-US.md) + +## Models + +We are excited to release the following two CodeLLMs trained by MFTCoder, now available on both HuggingFace and ModelScope: + +| Model | HuggingFace Links | ModelScope Links | Base Model | Num of examples trained | Batch Size | Seq Length | +| ------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -------------------- | ----------------------- | ---------- | ---------- | +| 🔥 CodeFuse-DeepSeek-33B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-DeepSeek-33B) | DeepSeek-coder-33B | 60 万 | 80 | 4096 | +| 🔥 CodeFuse-Mixtral-8x7B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-Mixtral-8x7B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-Mixtral-8x7B) | Mixtral-8x7B | 60 万 | 80 | 4096 | +| 🔥 CodeFuse-CodeLlama-34B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B) | CodeLlama-34b-Python | 60 万 | 80 | 4096 | +| 🔥 CodeFuse-CodeLlama-34B-4bits | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | CodeLlama-34b-Python | | | 4096 | +| 🔥 CodeFuse-StarCoder-15B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-StarCoder-15B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-StarCoder-15B) | StarCoder-15B | 60 万 | 80 | 4096 | +| 🔥 CodeFuse-QWen-14B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-QWen-14B) | Qwen-14b | 110 万 | 256 | 4096 | +| 🔥 CodeFuse-CodeGeex2-6B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeGeex2-6B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeGeex2-6B) | CodeGeex2-6B | 110 万 | 256 | 4096 | + +## Datasets + +We are also pleased to release two code-related instruction datasets, meticulously selected from a range of datasets to facilitate multitask training. Moving forward, we are committed to releasing additional instruction datasets covering various code-related tasks. + +| Dataset | Description | +| ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [⭐ Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) | Based on open-evol-instruction-80k, filter out low-quality, repeated, and similar instructions to HumanEval, thus get high-quality code instruction dataset. | +| [⭐ CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k) | python code exercise instruction dataset | diff --git a/docs/docs/developer-docs/MFTCoder/main/quickstart.zh-CN.md b/docs/docs/developer-docs/MFTCoder/main/quickstart.zh-CN.md new file mode 100644 index 0000000..a4b2973 --- /dev/null +++ b/docs/docs/developer-docs/MFTCoder/main/quickstart.zh-CN.md @@ -0,0 +1,55 @@ +--- +store: + title: MFTCoder + version: main +group: + title: 🌱 MFTCoder + order: -1 +title: 快速使用 +order: 1 +toc: content +--- + +## 环境 + +首先, 你需要将 CUDA(>=11.4, 推荐 11.7)及其相关驱动安装成功,并确保其工作正常, 并且安装基本的 torch(>=2.0.0) +在 requirements.txt 下固定了几个主要的 python 包的版本,执行如下脚本即可: + +```bash +sh init_env.sh +``` + +我们强烈建议您安装 flash attention(>=2.1.0, 推荐 2.3.6), 安装请参考 https://github.com/Dao-AILab/flash-attention + +## 训练 + +如果你熟悉大模型训练的各种主流开源资源,例如 `transformers`, `DeepSpeed`, `FSDP`等, 为了用开源项目快速上手高性能微调,我们建议您尝试: + +🚀🚀 [MFTCoder-accelerate: Accelerate + DeepSpeed/FSDP Codebase for MFT(Multi-task Finetuning)](./accelerate.zh-CN.md) + +如果你想探索一些新兴的训练框架,可以尝试: + +🚀 [MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning)](./atorch.zh-CN.md) + +## 模型 + +使用本项目的训练代码,以及上述训练数据,我们训练并在 huggingface, modelscope 开源了以下模型。 + +| 模型 | HuggingFace 链接 | 魔搭 链接 | 基座模型 | 训练数据 | Batch Size | Seq Length | +| ----------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -------------------- | -------- | ---------- | ---------- | +| 🔥🔥🔥 CodeFuse-DeepSeek-33B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-DeepSeek-33B) | DeepSeek-coder-33B | 60 万 | 80 | 4096 | +| 🔥🔥🔥 CodeFuse-Mixtral-8x7B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-Mixtral-8x7B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-Mixtral-8x7B) | Mixtral-8x7B | 60 万 | 80 | 4096 | +| 🔥🔥🔥 CodeFuse-CodeLlama-34B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B) | CodeLlama-34b-Python | 60 万 | 80 | 4096 | +| 🔥🔥🔥 CodeFuse-CodeLlama-34B-4bits | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B-4bits) | CodeLlama-34b-Python | | | 4096 | +| 🔥🔥🔥 CodeFuse-StarCoder-15B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-StarCoder-15B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-StarCoder-15B) | StarCoder-15B | 60 万 | 80 | 4096 | +| 🔥🔥🔥 CodeFuse-QWen-14B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-QWen-14B) | Qwen-14b | 110 万 | 256 | 4096 | +| 🔥🔥🔥 CodeFuse-CodeGeex2-6B | [h-link](https://huggingface.co/codefuse-ai/CodeFuse-CodeGeex2-6B) | [m-link](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeGeex2-6B) | CodeGeex2-6B | 110 万 | 256 | 4096 | + +## 数据集 + +目前本项目主要整理了如下指令数据集,并将其整理成统一的数据格式,这两个指令微调数据集是我们多任务训练中数十个任务中的 2 个,未来我们会陆续开源更多的代码任务指令微调数据集: + +| 数据集 | 介绍 | +| ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | +| [⭐ Evol-instruction-66k](https://huggingface.co/datasets/codefuse-ai/Evol-instruction-66k) | 基于开源 open-evol-instruction-80k 过滤低质量,重复和 human eval 相似的数据后得到的高质量代码类微调数据 | +| [⭐ CodeExercise-Python-27k](https://huggingface.co/datasets/codefuse-ai/CodeExercise-Python-27k) | 高质量 python 练习题数据 | diff --git a/content/en/docs/overview/b7.TestAgent.md b/docs/docs/developer-docs/Test-Agent/main/TestAgent.en-US.md similarity index 54% rename from content/en/docs/overview/b7.TestAgent.md rename to docs/docs/developer-docs/Test-Agent/main/TestAgent.en-US.md index 1221291..d26a37a 100644 --- a/content/en/docs/overview/b7.TestAgent.md +++ b/docs/docs/developer-docs/Test-Agent/main/TestAgent.en-US.md @@ -1,58 +1,75 @@ --- -title: "Test-Agent: Your AI Test Assistant" -slug: "Test-Agent: Your AI Test Assistant" -description: 介绍主要功能 -aliases: -- "/docs/test-agent" +nav: + title: Docs + order: -1 + second: + title: Developer-Docs + order: -1 +store: + title: Test-Agent + version: main +group: + title: 🌱 Test-Agent + index: true + order: -1 +title: Test-Agent +order: -1 +toc: content --- ### Local Mac M1 Experience + ![图片](https://github.com/codefuse-ai/Test-Agent/assets/103973989/8dba860f-c1bb-49d5-b9dd-a58e541562a6) ### Moda Experience + Moda Model Access Link:[ModelScope TestGPT-7B](https://modelscope.cn/models/codefuse-ai/TestGPT-7B/summary) -![MS](https://github.com/codefuse-ai/Test-Agent/assets/103973989/0e50b258-44f9-4dc6-8e30-0a01cf62d02b) +![MS](https://github.com/codefuse-ai/Test-Agent/assets/103973989/0e50b258-44f9-4dc6-8e30-0a01cf62d02b) ## What is Test Agent? (Introduction) + **Test Agent** aims to build an "intelligent agent" in the testing domain, integrating large models with engineering technologies in the quality domain to promote the generational upgrade of quality technology. We look forward to collaborating with community members to create innovative solutions in the testing domain, establish a 24-hour online testing assistant service, and make testing as smooth as silk. + ## Current Features (Features) -* **Model**: This release open-sources the TestGPT-7B model for the testing domain. The model is based on CodeLlama-7B and has been fine-tuned for related downstream tasks: - * **Multilingual Test Case Generation (Java/Python/Javascript)**: This has always been an area of great interest to both academia and industry, with new products and tools like EvoSuite, Randoop, SmartUnit, etc., constantly being incubated. However, traditional test case generation has pain points that are difficult to address. Test case generation based on large models is superior to traditional tools in terms of readability, completeness of test scenarios, and multilingual support. This update focuses on multilingual test case generation, initially including Java, Python, and Javascript, and will gradually introduce Go, C++, and other languages in future releases. - * **Test Case Assert Completion**: Analyzing the current state of test cases, we found that a certain proportion of existing test cases in the code repositories do not contain Asserts. Test cases without Asserts may pass during regression but fail to detect issues. Therefore, we expanded the scenario of automatic completion of test case Asserts. With this model capability, combined with the right engineering support, it's possible to perform batch automatic completion for the entire test case repository, intelligently raising the quality level of the project. -* **Engineering Framework**: Local model quick release and experience engineering framework - - ChatBot page - - Quick model launch - - Private deployment, localized GPT large model interactions with your data and environment, no risk of data leakage, 100% safe. - + +- **Model**: This release open-sources the TestGPT-7B model for the testing domain. The model is based on CodeLlama-7B and has been fine-tuned for related downstream tasks: + - **Multilingual Test Case Generation (Java/Python/Javascript)**: This has always been an area of great interest to both academia and industry, with new products and tools like EvoSuite, Randoop, SmartUnit, etc., constantly being incubated. However, traditional test case generation has pain points that are difficult to address. Test case generation based on large models is superior to traditional tools in terms of readability, completeness of test scenarios, and multilingual support. This update focuses on multilingual test case generation, initially including Java, Python, and Javascript, and will gradually introduce Go, C++, and other languages in future releases. + - **Test Case Assert Completion**: Analyzing the current state of test cases, we found that a certain proportion of existing test cases in the code repositories do not contain Asserts. Test cases without Asserts may pass during regression but fail to detect issues. Therefore, we expanded the scenario of automatic completion of test case Asserts. With this model capability, combined with the right engineering support, it's possible to perform batch automatic completion for the entire test case repository, intelligently raising the quality level of the project. +- **Engineering Framework**: Local model quick release and experience engineering framework + - ChatBot page + - Quick model launch + - Private deployment, localized GPT large model interactions with your data and environment, no risk of data leakage, 100% safe. + **We will continue to iterate on the model and engineering capabilities:** + - Continuously adding more exciting test domain application scenarios, such as domain knowledge Q&A, test scenario analysis, etc. - Supporting the open copilot engineering framework focused on testing scenarios, such as intelligent embedding of testing domain knowledge, a common tool API system, intelligent testing Agent, and more, so stay tuned! - Expanding from a 7B base to 13B and 34B models gradually. Stay tuned! ## The Most Powerful 7B Test Domain Large Model (Model) + Currently, within TestAgent, we default to using the TestGPT-7B model. Compared to existing open-source models, the TestGPT-7B model leads the industry in case execution pass rate (pass@1) and test scenario coverage (average number of test scenarios). The core capability evaluation results of the TestGPT-7B model are as follows: Multilingual Test Case Generation For the three supported languages of the model: Java, Python, and Javascript, the Pass@1 evaluation results are as follows: -| Model | Java pass@1 | Java Average number of test scenarios | Python pass@1 | Python Average number of test scenarios | Javascript pass@1 | Javascript Average number of test scenarios | -| --- | --- | --- | --- | --- | --- | --- | -| TestGPT-7B | 48.6% | 4.37 | 35.67% | 3.56 | 36% | 2.76 | -| CodeLlama-13B-Instruct | 40.54% | 1.08 | 30.57% | 1.65 | 31.7% | 3.13 | -| Qwen-14B-Chat | 10.81% | 2.78 | 15.9% | 1.32 | 9.15% | 4.22 | -| Baichuan2-13B-Chat | 13.5% | 2.24 | 12.7% | 2.12 | 6.1% | 3.31 | - +| Model | Java pass@1 | Java Average number of test scenarios | Python pass@1 | Python Average number of test scenarios | Javascript pass@1 | Javascript Average number of test scenarios | +| ---------------------- | ----------- | ------------------------------------- | ------------- | --------------------------------------- | ----------------- | ------------------------------------------- | +| TestGPT-7B | 48.6% | 4.37 | 35.67% | 3.56 | 36% | 2.76 | +| CodeLlama-13B-Instruct | 40.54% | 1.08 | 30.57% | 1.65 | 31.7% | 3.13 | +| Qwen-14B-Chat | 10.81% | 2.78 | 15.9% | 1.32 | 9.15% | 4.22 | +| Baichuan2-13B-Chat | 13.5% | 2.24 | 12.7% | 2.12 | 6.1% | 3.31 | - Test Case Assert Completion -Currently, the model supports Assert completion for Java cases, and the Pass@1 evaluation - -| Model | pass@1 | Percentage of strong validation | -| --- | --- | --- | -| Codefuse-TestGPT-7B | 71.1% | 100% | + Currently, the model supports Assert completion for Java cases, and the Pass@1 evaluation +| Model | pass@1 | Percentage of strong validation | +| ------------------- | ------ | ------------------------------- | +| Codefuse-TestGPT-7B | 71.1% | 100% | ## Engineering Architecture + ![JG](https://github.com/codefuse-ai/Test-Agent/assets/103973989/1b61beff-df59-4ab3-843c-266413c8dbc4) The clarion call for large models has been sounded, and large models in the testing domain are continuously evolving. With the rich world knowledge accumulated during the pre-training process, they have demonstrated extraordinary reasoning and decision-making abilities in complex interactive environments. @@ -60,4 +77,3 @@ The clarion call for large models has been sounded, and large models in the test Despite significant achievements of the foundational models in the testing domain, there are still some limitations. Testing tasks in specific domains often require specialized tools or domain knowledge. For instance, foundational models can complete tasks such as single-instance test code generation and test text generation through pre-trained knowledge, but when dealing with complex integrated test case generation, domain-specific case creation, and interactions with test process pipelines, more specialized tools and domain knowledge are necessary. Therefore, integrating specialized tools with foundational models can fully harness their respective strengths. Specialized tools can address insufficiencies in model timeliness, enhance professional knowledge, and improve interpretability and robustness. On the other hand, foundational models possess human-like reasoning and planning abilities, capable of understanding complex data and scenarios, and interacting with the real world. Building upon the open model engineering deployment and ChatBot foundation in this release, we will continue to invest deeply in the open-source testing domain. Collaborating with community developers who share similar interests, we aim to create the most advanced engineering system for tools in the testing domain, an intelligent testing assistant, and open-source testing engineering! - diff --git a/docs/docs/developer-docs/Test-Agent/main/TestAgent.zh-CN.md b/docs/docs/developer-docs/Test-Agent/main/TestAgent.zh-CN.md new file mode 100644 index 0000000..96d0394 --- /dev/null +++ b/docs/docs/developer-docs/Test-Agent/main/TestAgent.zh-CN.md @@ -0,0 +1,82 @@ +--- +nav: + title: 文档 + order: -1 + second: + title: 开发者文档 + order: -1 +store: + title: Test-Agent + version: main +group: + title: 🌱 Test-Agent + index: true + order: -1 +title: Test-Agent +order: -1 +toc: content +--- + +### 本地 Mac M1 体验效果 + +![图片](https://github.com/codefuse-ai/Test-Agent/assets/103973989/8dba860f-c1bb-49d5-b9dd-a58e541562a6) + +### 魔搭体验效果 + +魔搭模型访问链接:[ModelScope TestGPT-7B](https://modelscope.cn/models/codefuse-ai/TestGPT-7B/summary) + +![MS](https://github.com/codefuse-ai/Test-Agent/assets/103973989/0e50b258-44f9-4dc6-8e30-0a01cf62d02b) + +## 什么是 Test Agent?(Introduction) + +**Test Agent** 旨在构建测试领域的“智能体”,融合大模型和质量领域工程化技术,促进质量技术代系升级。我们期望和社区成员一起合作,打造创新的测试领域解决方案,构建 24 小时在线的测试助理服务,让测试如丝般顺滑。 + +## 本期特性(Features) + +- **模型** 本期我们开源了测试领域模型 TestGPT-7B。模型以 CodeLlama-7B 为基座,进行了相关下游任务的微调: + + - **多语言测试用例生成(Java/Python/Javascript)** 一直以来都是学术界和工业界非常关注的领域,近年来不断有新产品或工具孵化出来,如 EvoSuite、Randoop、SmartUnit 等。然而传统的用例生成存在其难以解决的痛点问题,基于大模型的测试用例生成在测试用例可读性、测试场景完整度、多语言支持方面都优于传统用例生成工具。本次重点支持了多语言测试用例生成,在我们本次开源的版本中首先包含了 Java、Python、Javascript 的测试用例生成能力,下一版本中逐步开放 Go、C++等语言。 + - **测试用例 Assert 补全** 对当前测试用例现状的分析与探查时,我们发现代码仓库中存在一定比例的存量测试用例中未包含 Assert。没有 Assert 的测试用例虽然能够在回归过程中执行通过,却无法发现问题。因此我们拓展了测试用例 Assert 自动补全这一场景。通过该模型能力,结合一定的工程化配套,可以实现对全库测试用例的批量自动补全,智能提升项目质量水位。 + +- **工程框架** 本地模型快速发布和体验工程化框架 + - ChatBot 页面 + - 模型快速启动 + - 私有化部署,本地化的 GPT 大模型与您的数据和环境进行交互,无数据泄露风险,100%安全 + +**后续我们会持续迭代模型和工程化能力:** + +- 不断加入更多令人激动的测试域应用场景,如领域知识问答、测试场景分析等 +- 支撑面向测试场景的 copilot 工程框架开放,如测试领域知识智能 embedding、测试通用工具 API 体系、智能测试 Agent 等,敬请期待! +- 以 7B 为基础,逐步扩展至 13B、34B 模型。欢迎关注! + +## 性能最强的 7B 测试领域大模型(Model) + +目前在 TestAgent 中,我们默认使用了 TestGPT-7B 模型。与当前已有开源模型相比,**TestGPT-7B 模型在用例执行通过率(pass@1)、用例场景覆盖(平均测试场景数)上都处于业界领先水平。** +TestGPT-7B 模型核心能力的评测结果如下: + +- 多语言测试用例生成 + 针对模型支持的三种语言:Java、Python、Javascript,Pass@1 评测结果如下: + +| Model | Java pass@1 | Java Average number of test scenarios | Python pass@1 | Python Average number of test scenarios | Javascript pass@1 | Javascript Average number of test scenarios | +| ---------------------- | ----------- | ------------------------------------- | ------------- | --------------------------------------- | ----------------- | ------------------------------------------- | +| TestGPT-7B | 48.6% | 4.37 | 35.67% | 3.56 | 36% | 2.76 | +| CodeLlama-13B-Instruct | 40.54% | 1.08 | 30.57% | 1.65 | 31.7% | 3.13 | +| Qwen-14B-Chat | 10.81% | 2.78 | 15.9% | 1.32 | 9.15% | 4.22 | +| Baichuan2-13B-Chat | 13.5% | 2.24 | 12.7% | 2.12 | 6.1% | 3.31 | + +- 测试用例 Assert 补全 + 目前模型支持 Java 用例的 Assert 补全,Pass@1 评测结果如下: + +| Model | pass@1 | Percentage of strong validation | +| ------------------- | ------ | ------------------------------- | +| Codefuse-TestGPT-7B | 71.1% | 100% | + +## 工程架构(Engineering Architecture) + +![JG](https://github.com/codefuse-ai/Test-Agent/assets/103973989/1b61beff-df59-4ab3-843c-266413c8dbc4) + +大模型的号角已经吹响,测试领域大模型也在不断进化中,通过预训练过程中积累的丰富世界知识,在复杂交互环境中展现出了非凡的推理与决策能力。 + +尽管在测试领域中基础模型取得了显著的成果,但仍然存在一些局限性,特定领域的测试任务通常需要专业化的工具或领域知识来解决。例如,基础模型可以通过预训练知识完成单次测试代码生成和测试文本生成等任务,但处理复杂的集成用例生成、特定领域用例生成和测试流程 pipeline 交互等问题时,需要更专业的工具和领域知识。因此将专用工具与基础模型整合在一起,可以充分发挥它们各自的优势。专用工具可以解决模型时效性不足、增强专业知识、提高可解释性和鲁棒性的问题。而基础模型则具备类人的推理规划能力,可以理解复杂的数据和场景,并与现实世界进行交互。 + +在本期开放模型工程化部署和 ChatBot 基础上,我们将继续在测试开源领域深耕投入。协同社区志趣相投开发者们,一起打造测试领域最领先的 Tools 工程体系、智能测试助理和测试开源工程! diff --git a/content/en/docs/testagent/1_quickstart.md b/docs/docs/developer-docs/Test-Agent/main/quickstart.en-US.md similarity index 61% rename from content/en/docs/testagent/1_quickstart.md rename to docs/docs/developer-docs/Test-Agent/main/quickstart.en-US.md index 66f794b..1c4a2b4 100644 --- a/content/en/docs/testagent/1_quickstart.md +++ b/docs/docs/developer-docs/Test-Agent/main/quickstart.en-US.md @@ -1,65 +1,68 @@ ---- -title: "QuickStart" -slug: "QuickStart" -description: 介绍主要功能 -url: "/docs/test-agent-quickstart" -aliases: -- "/docs/test-agent-quickstart" ---- - - -## QuickStart -### Prerequisites - -#### Model Download -You can get detailed information about the model and download the model files from [modelscope](https://modelscope.cn/models/codefuse-ai/TestGPT-7B) or [huggingface](https://huggingface.co/codefuse-ai/TestGPT-7B). -Please note: -需要注意的是: -If you download the model through modelscope, refer to the download instructions: [Download Instructions]((https://www.modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%8B%E8%BD%BD#%E4%BD%BF%E7%94%A8Git%E4%B8%8B%E8%BD%BD%E6%A8%A1%E5%9E%8B)); -If you download the model through huggingface, please make sure you have proper access to huggingface. - -#### Environment Installation -- python>=3.8 -- transformers==4.33.2 - -```plain -git clone https://github.com/codefuse-ai/Test-Agent -cd Test-Agent -pip install -r requirements.txt -``` - -Before starting to run the TestGPT-7B model, please ensure that your execution environment has about 14GB of VRAM. - - -### Starting the Service - -The project provides the ability to quickly set up a web UI for a more intuitive display of model interactions and effects. We can use a few simple commands to wake up the front-end page and call the model capabilities in real time. In the project directory, start the following services in order: - -1.**Start controller** -![controller](https://github.com/codefuse-ai/Test-Agent/assets/103973989/e68ce187-c9f1-4ce8-9d59-ff9d8348d0ac) -python3 -m chat.server.controller - -2.**Start model worker** -![work](https://github.com/codefuse-ai/Test-Agent/assets/103973989/073e4e79-4005-4c98-87f7-0eaa0b2b1e22) -python3 -m chat.server.model_worker --model-path models/TestGPT-7B --device mps - -(models/TestGPT-7B is the actual model file path) - -For the launch method, you can choose from several configuration options as needed: -- --device mps for enabling GPU acceleration on Mac computers (Apple Silicon or AMD GPUs); -- --device xpu for enabling acceleration on Intel XPU (Intel Data Center and Arc A-Series GPUs): - - Install [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html) - - Set the OneAPI environment variable: source /opt/intel/oneapi/setvars.sh -- --device npu for enabling acceleration on Huawei AI processors; - - Install [Ascend PyTorch Adapter](https://github.com/Ascend/pytorch) - - 设置CANN环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh -- --device cpu for running using only CPU, no GPU needed; -- --num-gpus 2 to specify the option of running GPUs concurrently. - - -3. **Start the web service** -python3 -m chat.server.gradio_testgpt -![web](https://github.com/codefuse-ai/Test-Agent/assets/103973989/340dae35-573b-4046-a3e8-e87a91453601) -Once the service is ready, you can open the local web service address http://0.0.0.0:7860 and see the complete front-end page. At the bottom of the page, there are two examples: 【Single-test Generation】 and 【Assert Completion】. After clicking the button, a sample text will be automatically generated in the input box. Clicking the Send button will trigger the model to run. After waiting patiently for a while (running time depends on the performance of your machine), you can see the complete answer. -![demo](https://github.com/codefuse-ai/Test-Agent/assets/103973989/fd24274c-729b-4ce7-8763-a083b39300fb) - +--- +store: + title: Test-Agent + version: main +group: + title: 🌱 Test-Agent + order: -1 +title: QuickStart +order: 0 +toc: content +--- + +## QuickStart + +### Prerequisites + +#### Model Download + +You can get detailed information about the model and download the model files from [modelscope](https://modelscope.cn/models/codefuse-ai/TestGPT-7B) or [huggingface](https://huggingface.co/codefuse-ai/TestGPT-7B). +Please note: +需要注意的是: +If you download the model through modelscope, refer to the download instructions: [Download Instructions](https://www.modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%8B%E8%BD%BD#%E4%BD%BF%E7%94%A8Git%E4%B8%8B%E8%BD%BD%E6%A8%A1%E5%9E%8B); +If you download the model through huggingface, please make sure you have proper access to huggingface. + +#### Environment Installation + +- python>=3.8 +- transformers==4.33.2 + +```plain +git clone https://github.com/codefuse-ai/Test-Agent +cd Test-Agent +pip install -r requirements.txt +``` + +Before starting to run the TestGPT-7B model, please ensure that your execution environment has about 14GB of VRAM. + +### Starting the Service + +The project provides the ability to quickly set up a web UI for a more intuitive display of model interactions and effects. We can use a few simple commands to wake up the front-end page and call the model capabilities in real time. In the project directory, start the following services in order: + +1.**Start controller** +![controller](https://github.com/codefuse-ai/Test-Agent/assets/103973989/e68ce187-c9f1-4ce8-9d59-ff9d8348d0ac) +python3 -m chat.server.controller + +2.**Start model worker** +![work](https://github.com/codefuse-ai/Test-Agent/assets/103973989/073e4e79-4005-4c98-87f7-0eaa0b2b1e22) +python3 -m chat.server.model_worker --model-path models/TestGPT-7B --device mps + +(models/TestGPT-7B is the actual model file path) + +For the launch method, you can choose from several configuration options as needed: + +- --device mps for enabling GPU acceleration on Mac computers (Apple Silicon or AMD GPUs); +- --device xpu for enabling acceleration on Intel XPU (Intel Data Center and Arc A-Series GPUs): + - Install [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html) + - Set the OneAPI environment variable: source /opt/intel/oneapi/setvars.sh +- --device npu for enabling acceleration on Huawei AI processors; + - Install [Ascend PyTorch Adapter](https://github.com/Ascend/pytorch) + - 设置 CANN 环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh +- --device cpu for running using only CPU, no GPU needed; +- --num-gpus 2 to specify the option of running GPUs concurrently. + +3. **Start the web service** + python3 -m chat.server.gradio_testgpt + ![web](https://github.com/codefuse-ai/Test-Agent/assets/103973989/340dae35-573b-4046-a3e8-e87a91453601) + Once the service is ready, you can open the local web service address http://0.0.0.0:7860 and see the complete front-end page. At the bottom of the page, there are two examples: 【Single-test Generation】 and 【Assert Completion】. After clicking the button, a sample text will be automatically generated in the input box. Clicking the Send button will trigger the model to run. After waiting patiently for a while (running time depends on the performance of your machine), you can see the complete answer. + ![demo](https://github.com/codefuse-ai/Test-Agent/assets/103973989/fd24274c-729b-4ce7-8763-a083b39300fb) diff --git a/docs/docs/developer-docs/Test-Agent/main/quickstart.zh-CN.md b/docs/docs/developer-docs/Test-Agent/main/quickstart.zh-CN.md new file mode 100644 index 0000000..e5024d9 --- /dev/null +++ b/docs/docs/developer-docs/Test-Agent/main/quickstart.zh-CN.md @@ -0,0 +1,67 @@ +--- +store: + title: Test-Agent + version: main +group: + title: 🌱 Test-Agent + order: -1 +title: 快速开始 +order: 0 +toc: content +--- + +## 快速使用(QuickStart) + +### 前置准备 + +#### 模型下载 + +您可在[modelscope](https://modelscope.cn/models/codefuse-ai/TestGPT-7B)或[huggingface](https://huggingface.co/codefuse-ai/TestGPT-7B)上获取到模型的详细信息并下载模型文件。 +需要注意的是: +1)如果您通过 modelscope 下载模型,下载方式可参考:[下载说明](https://www.modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%8B%E8%BD%BD#%E4%BD%BF%E7%94%A8Git%E4%B8%8B%E8%BD%BD%E6%A8%A1%E5%9E%8B); +2)如果您通过 huggingface 下载模型,请确保您可以正常访问 huggingface。 + +#### 环境安装 + +- python>=3.8 +- transformers==4.33.2 + +```plain +git clone https://github.com/codefuse-ai/Test-Agent +cd Test-Agent +pip install -r requirements.txt +``` + +在开始运行 TestGPT-7B 模型之前,请确保你的执行环境拥有大约 14GB 的显存。 + +### 启动服务 + +项目提供了网页端快速搭建 UI 的能力能够更直观的展示模型交互和效果,我们可以使用简单的几个命令把前端页面唤醒并实时调用模型能力。在项目目录下,依次启动以下服务: + +1.**启动 controller** +![controller](https://github.com/codefuse-ai/Test-Agent/assets/103973989/e68ce187-c9f1-4ce8-9d59-ff9d8348d0ac) +python3 -m chat.server.controller + +2.**启动模型 worker** +![work](https://github.com/codefuse-ai/Test-Agent/assets/103973989/073e4e79-4005-4c98-87f7-0eaa0b2b1e22) +python3 -m chat.server.model_worker --model-path models/TestGPT-7B --device mps + +(models/TestGPT-7B 为实际模型文件路径) + +对于启动方式,可以按需选择以下几种配置选项: + +- --device mps 用于在 Mac 电脑上开启 GPU 加速的选项(Apple Silicon 或 AMD GPUs); +- --device xpu 用于在 Intel XPU 上开启加速的选项(Intel Data Center and Arc A-Series GPUs); + - 需安装[Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html) + - 设置 OneAPI 环境变量:source /opt/intel/oneapi/setvars.sh +- --device npu 用于在华为 AI 处理器上开启加速的选项; + - 需安装[Ascend PyTorch Adapter](https://github.com/Ascend/pytorch) + - 设置 CANN 环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh +- --device cpu 单独使用 CPU 运行的选项,不需要 GPU; +- --num-gpus 2 指定并发 gpu 运行的选项。 + +3. **启动 web 服务** + python3 -m chat.server.gradio_testgpt + ![web](https://github.com/codefuse-ai/Test-Agent/assets/103973989/340dae35-573b-4046-a3e8-e87a91453601) + 待服务准备就绪后,我们可以打开本地启动的 web 服务地址 http://0.0.0.0:7860 ,就能看到完整的前端页面了。在页面下方包含了【单测生成】和【Assert 补全】的两个例子,点击按钮后会自动生成一段样例文本到输入框中,点击 Send 按钮就会触发模型运行,之后耐心等待一段时间后(运行时间视本机性能而定)即可看到完整的回答了。 + ![demo](https://github.com/codefuse-ai/Test-Agent/assets/103973989/fd24274c-729b-4ce7-8763-a083b39300fb) diff --git a/docs/docs/devops_eval/tool_learning_info/index.html b/docs/docs/devops_eval/tool_learning_info/index.html deleted file mode 100644 index 99d2215..0000000 --- a/docs/docs/devops_eval/tool_learning_info/index.html +++ /dev/null @@ -1,914 +0,0 @@ - - - - - - - - - · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    -
    -
    - - -

    数据样例

    -

    在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下:

    -

    Function Call的数据格式

    - - - - - - - - - - - - - - - - - - - - -
    Input KeyInput TypeInput Description
    functionsList[Swagger]工具集合
    chatroundsList[chatround]多轮对话数据
    -

    chatrounds的数据格式

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Input KeyInput TypeInput Description
    rolestring角色名称,包含三种类别,user、assistant、function
    namestring若role为function,则存在name字段,为function的名称
    contentstringrole的返回内容
    function_calldict工具调用
    -
    {
    -    "functions":
    -    [
    -        {
    -            "name": "get_fudan_university_scoreline",
    -            "description": "查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线",
    -            "parameters":
    -            {
    -                "type": "object",
    -                "properties":
    -                {
    -                    "year":
    -                    {
    -                        "type": "string",
    -                        "description": "年份,例如:2020,2019,2018"
    -                    }
    -                },
    -                "required":
    -                [
    -                    "year"
    -                ]
    -            }
    -        }
    -    ],
    -    "chatrounds":
    -    [
    -        {
    -            "role": "system",
    -            "content": "CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。"
    -        },
    -        {
    -            "role": "user",
    -            "content": "查询2020年复旦大学的分数线"
    -        },
    -        {
    -            "role": "assistant",
    -            "content": null,
    -            "function_call":
    -            {
    -                "name": "get_fudan_university_scoreline",
    -                "arguments": "{\n  \"year\": \"2020\"\n}"
    -            }
    -        },
    -        {
    -            "role": "function",
    -            "name": "get_fudan_university_scoreline",
    -            "content": "{\n    \"scoreline\":{\n        \"文科一批\": 630,    \n        \"文科二批\": 610,  \n        \"理科一批\": 650,  \n        \"理科二批\": 630  \n    }\n}"
    -        },
    -        {
    -            "role": "assistant",
    -            "content": "2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分"
    -        }
    -    ]
    -}
    -

    上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。

    -

    评测指标

    -

    由于一般通用模型无法具备工具调用的能力,因此在进行Tool Learn-Eval评测之前需要对通用模型进行微调,先让模型学会工具使用的基本范式

    -

    下面,我们定义了几种评估工具使用的指标:

    - -

    ②③④⑤的和为1,代表工具调用失败的总数,⑤工具幻觉是工具名识别失败的一种特殊情况

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/devops_eval/tutorial/index.html b/docs/docs/devops_eval/tutorial/index.html deleted file mode 100644 index eedfb2b..0000000 --- a/docs/docs/devops_eval/tutorial/index.html +++ /dev/null @@ -1,916 +0,0 @@ - - - - - - - - - · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    -
    -
    - - -

    Evaluate Tutorial

    -

    🚀 How to Evaluate

    -

    If you need to test your own huggingface-formatted model, the overall steps are as follows:

    -
      -
    1. Write the loader function for the model.
    2. -
    3. Write the context_builder function for the model.
    4. -
    5. Register the model in the configuration file.
    6. -
    7. Run the testing script. -If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing.
    8. -
    -

    1. Write the loader function

    -

    If the model requires additional processing after loading (e.g. adjusting the tokenizer), you need to inherit the ModelAndTokenizerLoader class in src.context_builder.context_builder_family.py and override the corresponding load_model and load_tokenizer functions. You can refer to the following example:

    -
    class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader):
    -    def __init__(self):
    -      super().__init__()
    -      pass
    -    
    -    def load_model(self, model_path: str):
    -        model = super().load_model(model_path)
    -        model.generation_config = GenerationConfig.from_pretrained(model_path)
    -        return model
    -
    -    def load_tokenizer(self, model_path: str):
    -        tokenizer = super().load_tokenizer(model_path)
    -    
    -        # read generation config
    -        with open(model_path + '/generation_config.json', 'r') as f:
    -        generation_config = json.load(f)
    -        tokenizer.pad_token_id = generation_config['pad_token_id']
    -        tokenizer.eos_token_id = generation_config['eos_token_id']
    -        return tokenizer
    -

    2. Write the context_builder function for the Model

    -

    If the input needs to be converted to a specific format (e.g. chatml format or other human-bot formats), you need to inherit the ContextBuilder class in src.context_builder.context_builder_family and override the make_context function. This function is used to convert the input to the corresponding required format. An example is shown below:

    -
    class QwenChatContextBuilder(ContextBuilder):
    -    def __init__(self):
    -        super().__init__()
    -    
    -    def make_context(
    -        self,
    -        model,
    -        tokenizer, 
    -        query: str,
    -        system: str = "you are a helpful assistant"
    -    ):
    -        '''
    -        model: PretrainedModel
    -        tokenizer: PretrainedTokenzier
    -        query: Input string
    -        system: System prompt if needed
    -        '''
    -        im_start, im_end = "<|im_start|>", "<|im_end|>"
    -        im_start_tokens = [tokenizer.im_start_id]
    -        im_end_tokens = [tokenizer.im_end_id]
    -        nl_tokens = tokenizer.encode("\n")
    -
    -        def _tokenize_str(role, content):
    -            return f"{role}\n{content}", tokenizer.encode(
    -                role, allowed_special=set()
    -            ) + nl_tokens + tokenizer.encode(content, allowed_special=set())
    -
    -        system_text, system_tokens_part = _tokenize_str("system", system)
    -        system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
    -
    -        raw_text = ""
    -        context_tokens = []
    -
    -        context_tokens = system_tokens + context_tokens
    -        raw_text = f"{im_start}{system_text}{im_end}" + raw_text
    -        context_tokens += (
    -            nl_tokens
    -            + im_start_tokens
    -            + _tokenize_str("user", query)[1]
    -            + im_end_tokens
    -            + nl_tokens
    -            + im_start_tokens
    -            + tokenizer.encode("assistant")
    -            + nl_tokens
    -        )
    -        raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
    -        return raw_text, context_tokens
    -

    3. Register the model in the configuration file

    -

    Go to the model_conf.json file in the conf directory and register the corresponding model name and the loader and context_builder that will be used for this model. Simply write the class names defined in the first and second steps for the loader and context_builder. Here is an example:

    -
    {
    -  "Qwen-Chat": {
    -  "loader": "QwenModelAndTokenizerLoader",
    -  "context_builder": "QwenChatContextBuilder"
    -  }
    -}
    -

    4. Execute the testing script

    -

    Run the following code to initiate the test:

    -
    # model_path: path to the model for testing
    -# model_name: the model name corresponding to the model in the configuration file, default is Default, which represents using the default loader and context_builder
    -# model_conf_path: path to the model configuration file, usually the devopseval_dataset_fp.json file in the conf directory
    -# eval_dataset_list: the names of the datasets to be tested, default is all to test all datasets, if you need to test one or more datasets, use the # symbol to connect them, for example: dataset1#dataset2
    -# eval_dataset_fp_conf_path: path to the dataset configuration file
    -# eval_dataset_type: the type of testing, only supports the default test type of test dataset
    -# data_path: path to the evaluation dataset, fill in the downloaded dataset address
    -# k_shot: supports 0-5, represents the number of example prefixes added for few-shot
    -
    -python src/run_eval.py \
    ---model_path path_to_model \
    ---model_name model_name_in_conf \
    ---model_conf_path path_to_model_conf \
    ---eval_dataset_list all \
    ---eval_dataset_fp_conf_path path_to_dataset_conf \
    ---eval_dataset_type test \
    ---data_path path_to_downloaded_devops_eval_data \
    ---k_shot 0
    -

    For example, if the evaluation dataset is downloaded to folder1, the code is placed in folder2, and the model is in folder3, and the model does not require custom loader and context_builder, and all zero-shot scores of all datasets need to be tested, you can use the following script to initiate the test:

    -
    python folder2/src/run_eval.py \
    ---model_path folder3 \
    ---model_name Default \
    ---model_conf_path folder1/conf/model_conf.json \
    ---eval_dataset_list all \
    ---eval_dataset_fp_conf_path folder1/conf/devopseval_dataset_fp.json \
    ---eval_dataset_type test \
    ---data_path folder2 \
    ---k_shot 0
    -

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/en_overview/index.html b/docs/docs/en_overview/index.html deleted file mode 100644 index c0cf4dd..0000000 --- a/docs/docs/en_overview/index.html +++ /dev/null @@ -1,805 +0,0 @@ - - - - - - - - -overview · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    overview

    -
    -
    - - -

    - -

    - -

    Hello World! This is CodeFuse! -

    -

    CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis.

    -

    - -

    -We are passionating about creating innovative open-source solutions that empower developers throughout the software development process as shown above. We also encourage engineers and researchers within this community to join us in co-constructing/improving CodeFuse. -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/fastchat-zh/index.html b/docs/docs/fastchat-zh/index.html deleted file mode 100644 index 4f44793..0000000 --- a/docs/docs/fastchat-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - - - - - - diff --git a/docs/docs/fastchat/index.html b/docs/docs/fastchat/index.html deleted file mode 100644 index eb46019..0000000 --- a/docs/docs/fastchat/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/LLM-Configuration/ - - - - - - diff --git a/docs/docs/fastertransformer4codefuse-zh/index.html b/docs/docs/fastertransformer4codefuse-zh/index.html deleted file mode 100644 index 5ea0965..0000000 --- a/docs/docs/fastertransformer4codefuse-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/fastertransformer4codefuse-zh/ - - - - - - diff --git a/docs/docs/fastertransformer4codefuse/index.html b/docs/docs/fastertransformer4codefuse/index.html deleted file mode 100644 index 5de3f80..0000000 --- a/docs/docs/fastertransformer4codefuse/index.html +++ /dev/null @@ -1,799 +0,0 @@ - - - - - - - - -FasterTransformer4CodeFuse · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    FasterTransformer4CodeFuse

    -
    -
    - - -

    FasterTransformer4CodeFuse

    -

    FasterTransformer4CodeFuse

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/index.html b/docs/docs/index.html deleted file mode 100644 index 1849c07..0000000 --- a/docs/docs/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/en_overview/ - - - - - - diff --git a/docs/docs/index.xml b/docs/docs/index.xml deleted file mode 100644 index 22a3ba7..0000000 --- a/docs/docs/index.xml +++ /dev/null @@ -1,333 +0,0 @@ - - - - Docs on CodeFuse-AI - /docs/ - Recent content in Docs on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - - /docs/devops_eval/tool_learning_info/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/devops_eval/tool_learning_info/ - 数据样例 在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下: Function Call的数据格式 Input Key Input Type Input Description functions List[Swagger] 工具集合 chatrounds List[chatround] 多轮对话数据 chatrounds的数据格式 Input Key Input Type Input Description role string 角色名称,包含三种类别,user、assistant、function name string 若role为function,则存在name字段,为function的名称 content string role的返回内容 function_call dict 工具调用 { &#34;functions&#34;: [ { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;description&#34;: &#34;查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线&#34;, &#34;parameters&#34;: { &#34;type&#34;: &#34;object&#34;, &#34;properties&#34;: { &#34;year&#34;: { &#34;type&#34;: &#34;string&#34;, &#34;description&#34;: &#34;年份,例如:2020,2019,2018&#34; } }, &#34;required&#34;: [ &#34;year&#34; ] } } ], &#34;chatrounds&#34;: [ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。&#34; }, { &#34;role&#34;: &#34;user&#34;, &#34;content&#34;: &#34;查询2020年复旦大学的分数线&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: null, &#34;function_call&#34;: { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;arguments&#34;: &#34;{\n \&#34;year\&#34;: \&#34;2020\&#34;\n}&#34; } }, { &#34;role&#34;: &#34;function&#34;, &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;content&#34;: &#34;{\n \&#34;scoreline\&#34;:{\n \&#34;文科一批\&#34;: 630, \n \&#34;文科二批\&#34;: 610, \n \&#34;理科一批\&#34;: 650, \n \&#34;理科二批\&#34;: 630 \n }\n}&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: &#34;2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分&#34; } ] } 上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 - - - - /docs/devops_eval/tutorial/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/devops_eval/tutorial/ - Evaluate Tutorial 🚀 How to Evaluate If you need to test your own huggingface-formatted model, the overall steps are as follows: Write the loader function for the model. Write the context_builder function for the model. Register the model in the configuration file. Run the testing script. If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e. - - - Abstract - /docs/abstract/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/abstract/ - Abstract With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. - - - ChatBot-RoadMap - /docs/chatbot-roadmap/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/chatbot-roadmap/ - 中文&nbsp | &nbspEnglish&nbsp RoadMap Roadmap Overview Sandbox Environment ✅ Isolated sandbox environment for code execution ✅ File upload and download ✅ Support for Java execution environment ⬜ Vector Database &amp; Retrieval ✅ Task retrieval ✅ Tool retrieval ✅ Prompt Management ✅ Memory Management ✅ Multi Agent Framework ✅ PRD (Product Requirement Document), system analysis, interface design ⬜ Generate code based on requirement documents, system analysis, and interface design ⬜ Automated testing, automated debugger ⬜ Operations process integration (ToolLearning) ⬜ Fully automated end-to-end process ⬜ Integration with LLM based on fastchat ✅ Integration with Text Embedding based on sentencebert ✅ Improved vector loading speed ✅ Connector ✅ React Mode based on langchain ✅ Tool retrieval completed with langchain ✅ General Capability for Web Crawl ⬜ Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. - - - Codefuse-ChatBot Development by Private Knowledge Augmentation - /docs/codefuse-chatbot/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-chatbot/ - 中文&nbsp | &nbspEnglish&nbsp This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface. 📜 Contents 🤝 Introduction 🧭 Technical Route 🤝 Introduction 💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. - - - Codefuse-ChatBot Development by Private Knowledge Augmentation - /docs/overview/codefuse-chatbot/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-chatbot/ - 中文&nbsp | &nbspEnglish&nbsp This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface. 📜 Contents 🤝 Introduction 🧭 Technical Route 🤝 Introduction 💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. - - - codefuse-devops-eval - /docs/codefuse-devops-eval/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-eval/ - Comming soon - - - codefuse-devops-eval - /docs/overview/codefuse-devops-eval/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-devops-eval/ - DevOps-Eval is a comprehensive evaluation suite specifically designed for foundation models in the DevOps field. We hope DevOps-Eval could help developers, especially in the DevOps field, track the progress and analyze the important strengths/shortcomings of their models. 📚 This repo contains questions and exercises related to DevOps, including the AIOps, ToolLearning; 💥️ There are currently 7486 multiple-choice questions spanning 8 diverse general categories, as shown below. 🔥 There are a total of 2840 samples in the AIOps subcategory, covering scenarios such as log parsing, time series anomaly detection, time series classification, time series forecasting, and root cause analysis. - - - codefuse-devops-model - /docs/codefuse-devops-model/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model/ - Comming soon - - - codefuse-devops-model - /docs/overview/codefuse-devops-model/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-devops-model/ - codeFuse-devops-model DevOps-Model is a large language model for the Chinese DevOps field jointly released by Ant Group and Peking University. By collecting professional data related to the DevOps domain and conducting additional training and alignment on the model, a large model has been produced to help engineers enhance efficiency throughout the entire development and operations lifecycle. This fills the current gap in large models within the DevOps domain, with the aim to provide solutions to any problems by asking DevOps-Model! - - - CodeFuse-MFT-VLM - /docs/overview/codefuse-mft-vlm/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-mft-vlm/ - CodeFuse-VLM CodeFuse-VLM is a Multimodal LLM(MLLM) framework that provides users with multiple vision encoders, multimodal alignment adapters, and LLMs. Through CodeFuse-VLM framework, users are able to customize their own MLLM model to adapt their own tasks. As more and more models are published on Huggingface community, there will be more open-source vision encoders and LLMs. Each of these models has their own specialties, e.g. Code-LLama is good at code-related tasks but has poor performance for Chinese tasks. - - - CodeFuse-ModelCache - /docs/codefuse-modelcache/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache/ - CodeFuse-ModelCache CodeFuse-ModelCache - - - CodeFuse-ModelCache - /docs/overview/codefuse-modelcache/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-modelcache/ - 中文 | English Contents news Introduction Modules Acknowledgements Contributing news 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as &rsquo;llmEmb&rsquo;, &lsquo;ONNX&rsquo;, &lsquo;PaddleNLP&rsquo;, &lsquo;FastText&rsquo;, alone with the image embedding framework &rsquo;timm&rsquo;, to bolster embedding functionality. 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests. [2023.08.26] codefuse-ModelCache&hellip; Introduction Codefuse-ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience. - - - CodeFuse-Query - /docs/codefuse-query/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query/ - CodeFuse-Query CodeFuse-Query - - - CodeFuse-Query - /docs/overview/codefuse-query/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-query/ - CodeFuse-Query With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. - - - CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model - /docs/codefuse-evalution/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-evalution/ - CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model 简体中文| CodeFuseEval on ModelScope| CodeFuseEval on Hugging Face CodeFuseEval is a Code Generation benchmark that combines the multi-tasking scenarios of CodeFuse Model with the benchmarks of HumanEval-x and MBPP. This benchmark is designed to evaluate the performance of models in various multi-tasking tasks, including code completion, code generation from natural language, test case generation, cross-language code translation, and code generation from Chinese commands, among others. - - - Data - /docs/data/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/data/ - ⏬ Data Download Method 1: Download the zip file (you can also simply open the following link with the browser): wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip then unzip it and you may load the data with pandas: import os import pandas as pd File_Dir=&#34;devopseval-exam&#34; test_df=pd.read_csv(os.path.join(File_Dir,&#34;test&#34;,&#34;UnitTesting.csv&#34;)) Method 2: Directly load the dataset using Hugging Face datasets: from datasets import load_dataset dataset=load_dataset(r&#34;DevOps-Eval/devopseval-exam&#34;,name=&#34;UnitTesting&#34;) print(dataset[&#39;val&#39;][0]) # {&#34;id&#34;: 1, &#34;question&#34;: &#34;单元测试应该覆盖以下哪些方面?&#34;, &#34;A&#34;: &#34;正常路径&#34;, &#34;B&#34;: &#34;异常路径&#34;, &#34;C&#34;: &#34;边界值条件&#34;,&#34;D&#34;: 所有以上,&#34;answer&#34;: &#34;D&#34;, &#34;explanation&#34;: &#34;&#34;} ``` 👀 Notes To facilitate usage, we have organized the category name handlers and English/Chinese names corresponding to 55 subcategories. - - - Evaluate - /docs/codefuse-devops-eval-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-eval-quickstart/ - 🚀 How to Evaluate If you need to test your own huggingface-formatted model, the overall steps are as follows: Write the loader function for the model. Write the context_builder function for the model. Register the model in the configuration file. Run the testing script. If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing. - - - FasterTransformer4CodeFuse - /docs/fastertransformer4codefuse/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/fastertransformer4codefuse/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - FasterTransformer4CodeFuse - /docs/overview/fastertransformer4codefuse/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/fastertransformer4codefuse/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - Feature - /docs/codefuse-modelcache-feature/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-feature/ - From a functional standpoint, to address Huggingface network issues and improve inference speed, local inference capabilities for embeddings have been added. Given some limitations in the SQLAlchemy framework, we have rewritten the relational database interaction module for more flexible database operations. In practice, large model products need to interface with multiple users and models; thus, support for multi-tenancy has been added to ModelCache, as well as preliminary compatibility with system commands and multi-turn conversations. - - - GodelLanguage - /docs/codefuse-query-godellanguage/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-godellanguage/ - GödelScript Query Language Index GödelScript Basic Concepts and Syntax Introduction Basic Program Structure Fundamental Types and Compiler Built-in Functions Functions Statements Schema Database Trait Import Query Ungrounded Error: Unassigned/Unbound Error Query Examples Java Python JavaScript XML Go Query Debugging and Optimization Tips Schema Arguments Causing Excessively Large Cartesian Products Multiple Layers of for Causing Excessively Large Cartesian Products Avoid Misusing @inline and Strategies for Necessary Inline Optimization Using Query Scripts on a Local Machine Basic Concepts and Syntax of GödelScript Introduction // script fn hello(greeting: string) -&gt; bool { return greeting = &#34;hello world! - - - How to better configure your cache - /docs/codefuse-modelcache-config/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-config/ - Environment Dependencies Python version: 3.8 or higher To install dependencies: pip install requirements.txt Service Startup Before starting the service, the following environment configurations should be performed: Install relational database MySQL, import SQL to create tables, SQL file: reference_doc/create_table.sql Install vector database Milvus Add database access information to the configuration files, which are: modelcache/config/milvus_config.ini modelcache/config/mysql_config.ini Download offline model bin files, refer to: https://huggingface.co/shibing624/text2vec-base-chinese/tree/main, and place the downloaded bin files into the model/text2vec-base-chinese folder Start the backend service using the flask4modelcache. - - - Introduction - /docs/codefuse-query-introduction/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-introduction/ - Introduction CodeFuse-Query is a code data platform that supports structured analysis of various programming languages. The core idea is to transform all code into data using various language parsers and to store this data in a structured format within a code database. Data analysis is then performed according to business needs using a custom query language, as shown in the diagram below: 2.1 Architecture of CodeFuse-Query Overall, the CodeFuse-Query code data platform is divided into three main parts: the code data model, the code query DSL (Domain-Specific Language), and platform productization services. - - - Introduction - /docs/mftcoder-introduction/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-introduction/ - Introduction High Accuracy and efficiency Multi-task Fine-tuning framework for Code LLMs. MFTCoder is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework. In MFTCoder, we released two codebases for finetuning Large Language Models: MFTCoder-accelerate is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. - - - LLM-Configuration - /docs/LLM-Configuration/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/LLM-Configuration/ - 中文&nbsp | &nbspEnglish&nbsp Local Privatization/Large Model Interface Access Leveraging open-source LLMs (Large Language Models) and Embedding models, this project enables offline private deployment based on open-source models. In addition, the project supports the invocation of OpenAI API. Local Privatization Model Access Example of model address configuration, modification of the model_config.py configuration: # Recommendation: Use Hugging Face models, preferably the chat models, and avoid using base models, which may not produce correct outputs. - - - MFTCoder - /docs/mftcoder/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder/ - MFTCoder MFTCoder - - - MFTCoder Training: Atorch Framework - /docs/mftcoder-atorch/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-atorch/ - [中文] [English] 1. Updates 🔥 MFTCoder supports fine-tuning of the GPTNeoX model under the Atorch framework. 🔥 MFTCoder supports both fully supervised fine-tuning. 🔥 MFTCoder supports LoRA using the Atorch Framework. 2. Data Format 2.1 Training Data Format The training data is in a uniformed JSONL format, in which each line of data has the following JSON format. The &ldquo;chat_rounds&rdquo; field is required, and other fields can be added or removed based on the specific need. - - - MFTCoder-accelerate: Training Framework with Accelerate and DeepSpeed/FSDP - /docs/mftcoder-accelerate/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-accelerate/ - [中文] [English] 1. Updates 🔥 MFTCoder-accelerate supports Full-parameters/LoRA using accelerate + FSDP Framework; 🔥 MFTCoder-accelerate supports MFT/SFT on more new mainstream open-source base models: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; 🔥 MFTCoder-accelerate supports Self-Paced Loss for Convergence Balance; 🔥 MFTCoder-accelerate supports Full-parameters/QLoRA/LoRA using accelerate + DeepSpeed Framework; 🔥 MFTCoder-accelerate supports Multitask Fine-Tuning(MFT), which is able to balance diffenrent tasks in data level. 🔥 MFTCoder-accelerate supports finetuning most of mainstream open-source base models: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen. - - - MFTCoder: High Accuracy and Efficiency Multi-task Fine-Tuning Framework - /docs/overview/mftcoder/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/mftcoder/ - 🤗 HuggingFace • 🤖 ModelScope [中文] [English] Contents News Articles Introduction Requirements Training Models Datasets Star History News 🔥🔥🔥 [2024/01/17] We released MFTCoder v0.3.0, mainly for MFTCoder-accelerate. It now supports new models like Mixtral(MoE), DeepSeek-coder, chatglm3. It supports FSDP as an option. It also supports Self-paced Loss as a solution for convergence balance in Multitask Fine-tuning. 🔥🔥🔥 [2024/01/17] CodeFuse-DeepSeek-33B has been released, achieving a pass@1 (greedy decoding) score of 78. - - - overview - /docs/en_overview/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/en_overview/ - HuggingFace | ModelScope Hello World! This is CodeFuse! CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis. We are passionating about creating innovative open-source solutions that empower developers throughout the software development process as shown above. We also encourage engineers and researchers within this community to join us in co-constructing/improving CodeFuse. - - - QuickStart - /docs/codefuse-chatbot-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-chatbot-quickstart/ - 中文&nbsp | &nbspEnglish&nbsp 🚀 Quick Start To deploy private models, please install the NVIDIA driver by yourself. This project has been tested on Python 3.9.18 and CUDA 11.7 environments, as well as on Windows and macOS systems with x86 architecture. For Docker installation, private LLM access, and related startup issues, see: Start-detail&hellip; Preparation of Python environment It is recommended to use conda to manage the python environment (optional) # Prepare conda environment conda create --name Codefusegpt python=3. - - - QuickStart - /docs/codefuse-evalution-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-evalution-quickstart/ - Generation environment: CodeFuse-13B: Python 3.8 or above,PyTorch 1.12 or above, with a recommendation for 2.0 or above, Transformers 4.24.0 or above ,CUDA 11.4 or above (for GPU users and flash-attention users, this option should be considered). CodeFuse-CodeLlama-34B:python&gt;=3.8,pytorch&gt;=2.0.0,transformers==4.32.0,Sentencepiece,CUDA 11. Evaluation Environment The evaluation of the generated codes involves compiling and running in multiple programming languages. The versions of the programming language environments and packages we use are as follows: Dependency Version Python 3. - - - QuickStart - /docs/codefuse-mft-vlm/quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-mft-vlm/quickstart/ - Contents Install Datasets Multimodal Alignment Visual Instruction Tuning Evaluation Install Please run sh init_env.sh Datasets Here&rsquo;s the table of datasets we used to train CodeFuse-VLM-14B: Dataset Task Type Number of Samples synthdog-en OCR 800,000 synthdog-zh OCR 800,000 cc3m(downsampled) Image Caption 600,000 cc3m(downsampled) Image Caption 600,000 SBU Image Caption 850,000 Visual Genome VQA (Downsampled) Visual Question Answer(VQA) 500,000 Visual Genome Region descriptions (Downsampled) Reference Grouding 500,000 Visual Genome objects (Downsampled) Grounded Caption 500,000 OCR VQA (Downsampled) OCR and VQA 500,000 Please download these datasets on their own official websites. - - - QuickStart - /docs/codefuse-modelcache-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-quickstart/ - ModelCache is easy to use, and you can build a cache testing demo in just one step. Quick Start Building a Cache The default interface for Cache is shown below: class Cache: # it should be called when start the cache system def __init__(self): self.has_init = False self.cache_enable_func = None self.embedding_func = None self.post_process_messages_func = None self.config = Config() Before creating a ModelCache, consider the following questions: How will you generate embedding vectors for queries? - - - QuickStart - /docs/codefuse-query-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-quickstart/ - Installation, Configuration, and Running Hardware and Software Requirements Hardware: 4C8G Environment Requirements: Java 1.8 and Python 3.8 or above runtime environments. Please ensure Java and Python executables are available. Sparrow Installation Steps and Guidance The CodeFuse-Query download package is a zip archive that contains tools, scripts, and various files specific to CodeFuse-Query. If you do not have a CodeFuse-Query license, downloading this archive indicates your agreement with the CodeFuse-Query Terms and Conditions. - - - QuickStart - /docs/codefuse-devops-model-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-quickstart/ - Dependency Installation Please install the packages listed in the requirements.txt file from the GitHub address first. You can refer to the following code: pip install -r requirements.txt Model Download Model download information is as follows: 🤗 Huggingface Address - Base Model Aligned Model 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat 🤖 ModelScope Address - Base Model Aligned Model 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat Find the version of the Chat model you want to download; currently, 7B and 14B models are provided. - - - QuickStart - /docs/mftcoder-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-quickstart/ - Requirements To begin, ensure that you have successfully installed CUDA (version &gt;= 11.4, preferably 11.7) along with the necessary drivers. Additionally, make sure you have installed torch (version 2.0.1). Next, we have provided an init_env.sh script to simplify the installation of required packages. Execute the following command to run the script: sh init_env.sh We highly recommend training with flash attention(version &gt;= 2.1.0, preferably 2.3.6), please refer to the following link for installation instructions: https://github. - - - QuickStart - /docs/test-agent-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/test-agent-quickstart/ - QuickStart Prerequisites Model Download You can get detailed information about the model and download the model files from modelscope or huggingface. Please note: 需要注意的是: If you download the model through modelscope, refer to the download instructions: Download Instructions; If you download the model through huggingface, please make sure you have proper access to huggingface. Environment Installation python&gt;=3.8 transformers==4.33.2 git clone https://github.com/codefuse-ai/Test-Agent cd Test-Agent pip install -r requirements.txt Before starting to run the TestGPT-7B model, please ensure that your execution environment has about 14GB of VRAM. - - - Release Note - /docs/codefuse-modelcache-release/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-release/ - 时间 功能 版本号 20230430 Completed GPTCache research, open-source process running through OpenAI interface, single-node form 无 20230509 1. Completed technology selection and upstream/downstream interaction scheme 2. Redeveloped database module, replaced SQLAlchemy framework 3. Refactored llm_handler module, compatible with codegpt, adapted codegpt model parameters 数 V0.1.0 20230519 1. Dynamically selected codegpt service mode based on environment 2. Capability for local model loading and pre-loading 3. Added dynamic loading capability for local paths based on environment V0. - - - Start-Detail - /docs/chatbot/start-detail/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/chatbot/start-detail/ - 中文&nbsp | &nbspEnglish&nbsp If you need to deploy a privatized model, please install the NVIDIA driver yourself. Preparation of Python environment It is recommended to use conda to manage the python environment (optional) # Prepare conda environment conda create --name Codefusegpt python=3.9 conda activate Codefusegpt Install related dependencies cd Codefuse-ChatBot pip install -r requirements.txt Sandbox Environment Preparation Windows Docker installation: Docker Desktop for Windows supports 64-bit versions of Windows 10 Pro with Hyper-V enabled (Hyper-V is not required for versions v1903 and above), or 64-bit versions of Windows 10 Home v1903 and above. - - - Test-Agent - /docs/test-agent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/test-agent/ - Test-Agent Test-Agent - - - Test-Agent: Your AI Test Assistant - /docs/overview/test-agent-your-ai-test-assistant/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/test-agent-your-ai-test-assistant/ - Local Mac M1 Experience Moda Experience Moda Model Access Link:ModelScope TestGPT-7B What is Test Agent? (Introduction) Test Agent aims to build an &ldquo;intelligent agent&rdquo; in the testing domain, integrating large models with engineering technologies in the quality domain to promote the generational upgrade of quality technology. We look forward to collaborating with community members to create innovative solutions in the testing domain, establish a 24-hour online testing assistant service, and make testing as smooth as silk. - - - Toolchain - /docs/codefuse-query-toolchain/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-toolchain/ - Developing Plugins (VSCode) Installation Install from VSCode marketplace (Recommand) VSCode Extension Install from local via VSIX pack Download the plugin. Manually install from vsix: Or use the command directly from the terminal to install: code --install-extension [extension vsix file path] Environment Preparation Sparrow CLI, refer to Section 3 Installation, Configuration, and Running. Extension Features This extension provides the following feature modules: COREF AST Viewer Gödel Language Server Gödel Language Runner COREF AST Viewer The following features need to be enabled in the extension settings. - - - Train Detail - /docs/codefuse-devops-model-train/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-train/ - Training Process According to the literature review, it is known that most domain models are based on conversational models and undergo knowledge infusion through Supervised Fine-Tuning (SFT). However, the QA corpus required for SFT fine-tuning largely comes from ChatGPT generation, which may not fully cover domain knowledge. Therefore, the DevOps-Model adopts a pre-training plus training followed by SFT fine-tuning approach, as illustrated in Figure 2.1. We believe that for large domain models, additional pre-training is necessary. - - - User Case - /docs/codefuse-query-usercase/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-usercase/ - Use Cases Querying Code Features A developer wants to know which String type variables are used in Repo A, so he writes a Gödel script as follows and submits it to the CodeFuse-Query system for results. // script use coref::java::* fn out(var: string) -&gt; bool { for(v in Variable(JavaDB::load(&#34;coref_java_src.db&#34;))) { if (v.getType().getName() = &#34;String&#34; &amp;&amp; var = v.getName()) { return true } } } fn main() { output(out()) } Similar needs: querying for classes, functions, variables, return values, call graphs, class inheritance, etc. - - - diff --git a/docs/docs/llm-configuration/index.html b/docs/docs/llm-configuration/index.html deleted file mode 100644 index 69026aa..0000000 --- a/docs/docs/llm-configuration/index.html +++ /dev/null @@ -1,956 +0,0 @@ - - - - - - - - -LLM-Configuration · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    LLM-Configuration

    -
    -
    - - -

    - 中文  |  English  -

    -

    Local Privatization/Large Model Interface Access

    -

    Leveraging open-source LLMs (Large Language Models) and Embedding models, this project enables offline private deployment based on open-source models.

    -

    In addition, the project supports the invocation of OpenAI API.

    -

    Local Privatization Model Access

    -


    Example of model address configuration, modification of the model_config.py configuration:

    -
    # Recommendation: Use Hugging Face models, preferably the chat models, and avoid using base models, which may not produce correct outputs.
    -# Note: When both `llm_model_dict` and `VLLM_MODEL_DICT` are present, the model configuration in `VLLM_MODEL_DICT` takes precedence.
    -# Example of `llm_model_dict` configuration:
    -
    -# 1. If the model is placed under the ~/codefuse-chatbot/llm_models path
    -# Suppose the model address is as follows
    -model_dir: ~/codefuse-chatbot/llm_models/THUDM/chatglm-6b
    -
    -# The reference configuration is as follows
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "THUDM/chatglm-6b",
    -}
    -
    -# or If the model address is as follows
    -model_dir: ~/codefuse-chatbot/llm_models/chatglm-6b
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "chatglm-6b",
    -}
    -
    -# 2. If you do not wish to move the model to ~/codefuse-chatbot/llm_models
    -# Also, delete the related code below `Model Path Reset`, see model_config.py for details.
    -# Suppose the model address is as follows
    -model_dir: ~/THUDM/chatglm-6b
    -# The reference configuration is as follows
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "your personl dir/THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "your personl dir/THUDM/chatglm-6b",
    -}
    -
    # 3. Specify the model service to be launched, keeping both consistent
    -LLM_MODEL = "chatglm-6b"
    -LLM_MODELs = ["chatglm-6b"]
    -
    # Modification of server_config.py configuration, if LLM_MODELS does not have multiple model configurations, no additional settings are needed.
    -# Modify the configuration of server_config.py#FSCHAT_MODEL_WORKERS
    -"model_name": {'host': DEFAULT_BIND_HOST, 'port': 20057}
    -


    量化模型接入

    -
    # If you need to support the codellama-34b-int4 model, you need to patch fastchat
    -cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# If you need to support the qwen-72b-int4 model, you need to patch fastchat
    -cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -
    -# Quantization requires modification of the llm_api.py configuration
    -# Uncomment `kwargs["gptq_wbits"] = 4` in examples/llm_api.py#559
    -

    Public Large Model Interface Access

    -
    # Modification of model_config.py configuration
    -# ONLINE_LLM_MODEL
    -# Other interface development comes from the langchain-chatchat project, untested due to lack of relevant accounts.
    -# Specify the model service to be launched, keeping both consistent
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -

    外部大模型接口接入示例

    -
    # 1. Implement a new model access class
    -# Refer to ~/examples/model_workers/openai.py#ExampleWorker
    -# Implementing the do_chat function will enable the use of LLM capabilities
    -
    -class XXWorker(ApiModelWorker):
    -    def __init__(
    -            self,
    -            *,
    -            controller_addr: str = None,
    -            worker_addr: str = None,
    -            model_names: List[str] = ["gpt-3.5-turbo"],
    -            version: str = "gpt-3.5",
    -            **kwargs,
    -    ):
    -        kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
    -        kwargs.setdefault("context_len", 16384) #TODO 16K模型需要改成16384
    -        super().__init__(**kwargs)
    -        self.version = version
    -
    -    def do_chat(self, params: ApiChatParams) -> Dict:
    -        '''
    -        执行Chat的方法,默认使用模块里面的chat函数。
    -        :params.messages : [
    -            {"role": "user", "content": "hello"}, 
    -            {"role": "assistant", "content": "hello"}
    -            ]
    -        :params.xx: 详情见 ApiChatParams 
    -        要求返回形式:{"error_code": int, "text": str}
    -        '''
    -        return {"error_code": 500, "text": f"{self.model_names[0]}未实现chat功能"}
    -
    -
    -# Finally, complete the registration in ~/examples/model_workers/__init__.py
    -# from .xx import XXWorker
    -
    -# 2. Complete access through an existing model access class
    -# Or directly use the existing relevant large model class for use (lacking relevant account testing, community contributions after testing are welcome)
    -
    # Modification of model_config.py#ONLINE_LLM_MODEL configuration
    -# Enter exclusive model details: version, api_base_url, api_key, provider (consistent with the class name above)
    -ONLINE_LLM_MODEL = {
    -    # Online models. Please set different ports for each online API in server_config.
    -    "openai-api": {
    -        "model_name": "gpt-3.5-turbo",
    -        "api_base_url": "https://api.openai.com/v1",
    -        "api_key": "",
    -        "openai_proxy": "",
    -    },
    -    "example": {
    -        "version": "gpt-3.5",  # Using openai interface as an example
    -        "api_base_url": "https://api.openai.com/v1",
    -        "api_key": "",
    -        "provider": "ExampleWorker",
    -    },
    -}
    -

    Launching Large Model Services

    -
    # start llm-service (optional)  - Launch the large model service separately
    -python examples/llm_api.py
    -
    # Test
    -import openai
    -# openai.api_key = "EMPTY" # Not support yet
    -openai.api_base = "http://127.0.0.1:8888/v1"
    -# Select the model you launched
    -model = "example"
    -# create a chat completion
    -completion = openai.ChatCompletion.create(
    -    model=model,
    -    messages=[{"role": "user", "content": "Hello! What is your name? "}],
    -    max_tokens=100,
    -)
    -# print the completion
    -print(completion.choices[0].message.content)
    -# Once the correct output is confirmed, LLM can be accessed normally.
    -

    or

    -
    # model_config.py#USE_FASTCHAT - Determine whether to integrate local models via fastchat
    -USE_FASTCHAT = "gpt" not in LLM_MODEL
    -python start.py #221 Automatically executes python llm_api.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-accelerate-zh/index.html b/docs/docs/mftcoder-accelerate-zh/index.html deleted file mode 100644 index b284977..0000000 --- a/docs/docs/mftcoder-accelerate-zh/index.html +++ /dev/null @@ -1,900 +0,0 @@ - - - - - - - - -MFTCoder: Accelerate + DeepSpeed/FSDP 框架篇 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder: Accelerate + DeepSpeed/FSDP 框架篇

    -
    -
    - - -

    Generic badge - -GitHub -

    -

    [中文] [English]

    -

    1. 更新

    -

    🔥 MFTCoder-accelerate 新增支持accelerate + FSDP框架, 支持全量微调和LoRA;

    -

    🔥 MFTCoder-accelerate 支持最新更多主流开源模型: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3;

    -

    🔥 MFTCoder-accelerate 新增self-paced Loss, 用于收敛均衡;

    -

    🔥 MFTCoder-accelerate 支持使用accelerate + DeepSpeed框架下支持 全量参数/QLoRA/LoRA微调;

    -

    🔥 MFTCoder-accelerate 在训练中支持了多任务微调MFT, 可以同时平衡多个任务的训练,训练的模型支持多任务推理;

    -

    🔥 MFTCoder-accelerate 在训练中支持多种模型基座: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen等

    -

    2. 数据格式

    -

    2.1 训练数据格式

    -

    训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 -可以参考项目中的xxx.jsonl文件。

    -
    {
    -    "id":0,
    -    "data_name":"code-helper",
    -    "chat_rounds":[
    -        {
    -            "role": "system",
    -            "content": "你是一个智能代码助手,可以回复用户与代码相关的问题"
    -        },
    -        {
    -            "role": "human",
    -            "content": "写一个快速排序"
    -        },
    -        {
    -            "role": "bot",
    -            "content": "以下是一个快速排序算法xxxxxx"
    -        },
    -        {
    -            "role": "human",
    -            "content": "解释一下这段代码"
    -        },
    -        {
    -            "role": "bot",
    -            "content": "好的,这段代码xxx"
    -        }
    -    ]
    -}
    -

    2.2 推理数据格式

    -

    推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式:

    -
    """
    -<s>system
    -这是System指令
    -<s>human
    -这是第1轮用户输入的问题
    -<s>bot
    -这是第1轮模型生成的内容{EOS_TOKEN}
    -<s>human
    -这是第2轮用户输入的问题
    -<s>bot
    -这是第2轮模型生成的内容{EOS_TOKEN}
    -...
    -...
    -...
    -<s>human
    -这是第n轮用户输入的问题
    -<s>bot
    -{模型现在要生成的内容}{EOS_TOKEN}
    -"""
    -

    3. 模型训练

    -

    目前支持全量参数(Full-parameters)指令微调、QLoRA指令微调,LoRA指令微调。 -一些优秀的代码预训练模型权重,理论上,HuggingFace上开源的模型,均可使用本项目进行训练:

    -

    🤗 最新代码预训练SOTA,CodeLlama :code-llama-34b, code-llama-34b-python, 新的SOTA基座。

    -

    🤗 10B级别最佳代码预训练模型Starcoder wizardCoder-15B, PanGu-coder2等前SOTA的基座模型。

    -

    🤗 多语言能手Qwen-7b :适用于多语言任务,也适用中文任务。进行指令微调时。

    -

    mftcoder_accelerate文件结构

    -
    mftcoder_accelerate
    -       |
    -       src
    -          configs
    -          |
    -          data
    -          |
    -          model
    -          |
    -          *pefts*
    -          |
    -          tokenizer
    -          |
    -          utils
    -       |
    -       evals
    -

    我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化, 详见src目录下的实现。

    -

    训练入口文件是mftcoder_accelerate/src/pefts/mft_accelerate.py

    -

    参数配置存储在mftcoder_accelerate/src/configs目录下,方便统一管理和更改。

    -

    所以,在你开启训练之前,请进入src目录

    -
    cd mftcoder_accelerate/src
    -

    3.1 数据tokenization

    -

    训练时,我们将多轮对话拼接成如下格式(也是上文中的推理数据格式),然后进行tokenize。 -其中,默认情况下:

    -

    <s>human\n作为human/user的起始符,<s>bot\n作为bot/assistant的起始符,{EOS_TOKEN} 表示eos_token。 -其中eos_token可以根据不同模型修改替换。不同角色的起始符可以配置,用来实现不同的对话/问答模版。

    -
    "<s>human\n{input1}<s>bot\n{target1}{EOS_TOKEN}<s>human\n{input2}<s>bot\n{target2}{EOS_TOKEN}\n"
    -

    在计算loss时,我们通过loss mask的方式,input部分的loss不参与参数更新,只有“target{EOS_TOKEN}”部分的loss参与参数更新。 -这种方式充分利用了模型并行计算的优势,训练更加高效,同时也充分利用了decoder-only模型从左到右attention的特性,一次性将多轮对话中的每个target部分都参与了训练,训练更充分高效。

    -

    3.2 LoRA/QLoRA微调

    -

    LoRA/QLoRA微调简介

    -

    关于LoRA的详细介绍可参考论文:LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS

    -

    关于QLoRA的详细介绍可参考论文:QLORA: Efficient Finetuning of Quantized LLMs

    -

    QLoRA通过4-bit的nf4量化,且加入更多adapter,在大幅减少显存消耗的同时,尽可能逼近全量参数微调的效果。 -QLoRA论文指出,该方法可以在一张V100上对33B的模型进行微调,并且性能逼近全量参数微调。

    -

    执行如下命令即可进行 Lora/QLora/全量 微调:

    -

    Launch via Deepspeed

    -

    DeepSpeed配置在accelerate_ds_config.yaml中。

    -
    accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "DeepSpeed" 
    -

    或者

    -

    DeepSpeed配置在脚本中通过命令行输入。

    -
    sh ds_single_launch.sh
    -

    Launch via FSDP

    -

    FSDP配置在accelerate_fsdp_config.yaml中。

    -
    accelerate launch --config_file accelerate_fsdp_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "FSDP"
    -

    或者

    -

    FSDP配置在脚本中通过命令行输入。

    -
    sh fsdp_single_launch.sh
    -

    训练参数

    -

    训练需要的参数配置在configs/*_train_config中,主要参数说明如下:

    -
      -
    • load_raw_dataset: 需要保持true,后续会支持其它模式数据,当前仅支持jsonl输入
    • -
    • data_paths: “[path1,path2,path3]” 输入数据地址,字符串,开头结尾用[],中间用,间隔不同path,每个path是一个目录,目录的最后一级名字作为任务名称,下面包含1到多个jsonl数据
    • -
    • output_dir:训练输出目录,存储checkpoint(全量训练时)、lora_adaptor(Lora或者Qlora时)等
    • -
    • tb_dir: 存储tensorboard等
    • -
    • model_type: “mixtral|mistral|deepseek|llama|starcoder|chatglm2|qwen|gpt_neox”
    • -
    • attn_implementation: “flash_attention_2” 或者 “eager”
    • -
    • peft_type: lora或者qlora或者null(全量微调)
    • -
    • lora_rank: lora rank
    • -
    • lora_alpha: lora alpha
    • -
    • lora_dropout: lora dropout
    • -
    • target_modules: List[str], lora目标模块,如果null,会使用默认,参考model_mapping.py
    • -
    • quantization: 是否量化,“4bit”, “8bit” 或者null, qlora推荐4bit量化
    • -
    • pretrained_model_path:预训练模型的本地目录,或者在huggingface上的模型名称。
    • -
    • weighted_loss_mode: 多任务loss加权模式, “case3"是当前推荐。
    • -
    • padding_mode: 数据的样本组织方式, “padding"是将每个原始样本填充到seq_length, “pack"是将尽量多的样本打包到每个seq_length的序列中。
    • -
    • num_train_epochs:训练的轮次。如果数据量足够大,一般建议只训1-2个epoch。
    • -
    • per_device_train_batch_size:每张显卡train的batch size。
    • -
    • per_device_eval_batch_size:每张显卡eval的batch size。
    • -
    • gradient_accumulation_steps:梯度累计步数。global batch=num_gpus * per_device_train_batch_size * gradient_accumulation_steps。
    • -
    • learning_rate:学习率。全量参数微调的时候,建议小一些,1e-5或5e-6。qlora中的学习率设置更大一些,一般为1e-4、2e-4。
    • -
    • min_lr: 最低学习率, 一般是learning_rate的十分之一
    • -
    • seq_length:训练时的最大长度。按照自己的设备进行设置,越长需要占用越多显存。
    • -
    • log_interval:每隔多少步统计一次train loss。
    • -
    • checkpointing_steps:每隔多少步保存一个模型。
    • -
    • evaluation_steps:每隔多少步在验证集上evaluate一次。
    • -
    • early_stopping : 是否执行early_stop
    • -
    • early_stopping_stall_num: 多少个eval point不继续收敛,则停止训练
    • -
    • lr_scheduler_type:学习率变化策略。常用"cosine”
    • -
    • warmup_steps:warm up步数。学习率经过多少步,增长到指定的数值。
    • -
    • seed:随机种子,用于复现实验结果。
    • -
    • saving_limit:整数,ckpt存储数量上限, 全量训练必须设置。默认null即不限制数量。
    • -
    • role_markers: null,即使用{“system”: “<s>system\n”, “user”: “<s>human\n”, “assistant”: “<s>bot\n”}。 你可以自定义 “system”, “user” and “assistant"的模板, 用于定制自己的问答或者对话模板,比如 {“system”: “### System:\n”, “user”: “### Instruction:\n”, “assistant”: “### Response:\n”}
    • -
    -

    4. 模型使用

    -

    4.1 权重合并

    -

    如果使用LoRA或者QLoRA进行训练,本项目仅保存adapter的权重和配置文件,需要将adapter权重与base model进行合并。 -可以使用如下merge_base_and_lora_to_hf.py脚本。

    -
    python pefts/merge_base_and_lora_to_hf.py \
    -    --base_model_or_path model_path \
    -    --adaptor_path lora_adapter_path \
    -    --model_type model_type \
    -    --merged_output_path output_path
    -

    4.2 模型推理

    -

    我们提供了单轮对话和多轮对话的如下脚本,该脚本可同时兼容大部分huggingface格式的模型。

    -
    from transformers import (
    -    AutoTokenizer, 
    -    AutoModelForCausalLM,
    -)
    -model_name_or_path = "codefuse-ai/CodeFuse-Deepseek-33B"
    -tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, padding_side="left")
    -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("<|end▁of▁sentence|>")
    -tokenizer.pad_token_id = tokenizer.eos_token_id
    -model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True)
    -
    -HUMAN_ROLE_START_TAG = "<s>human\n"
    -BOT_ROLE_START_TAG = "<s>bot\n"
    -texts = ["write a python function of quick sort."]
    -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts]
    -
    -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda")
    -outputs = model.generate(
    -        inputs=inputs["input_ids"],
    -        attention_mask=inputs["attention_mask"],
    -        max_new_tokens=512,
    -        top_p=0.95,
    -        temperature=0.1,
    -        do_sample=True,
    -        eos_token_id=tokenizer.eos_token_id,
    -        pad_token_id=tokenizer.pad_token_id
    -    )
    -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
    -print(gen_text)
    -

    生成脚本中的top_p、temperature、repetition_penalty、do_sample等参数对模型的生成效果影响较大,可按照自己的使用场景进行调试修改。 -实践中,在代码生成场景中,如果采样模式,do_sample=True, top_p=0.95, temperature=0.1是pass@1指标的不错选择; -如果非采样模式, do_sample=False, beam_num=1或者3是不错的选择,其中beam_num=1即为greedy decoding。

    -

    5. FAQ

    -

    问题1:OOM如何解决?

    -

    如果发生OOM,可以缩小per_device_train_batch_size、seq_length等参数来缓解。由于面对的模型普遍较大(6b, 13b, 34b, 70b等)我们已经默认使用gradient_checkpointing技术,可以大幅降低显存占用,但训练速度会稍慢一些。

    -

    问题2:安装包错误

    -

    参考init_env.sh和requirements.txt

    -

    问题3:如何指定使用某些卡训练?

    -

    通过如下方式,即可指定使用0和1号卡进行训练:

    -
    CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file pefts/accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "deepspeed"
    -

    问题4:关于Flash Attention, 该如何配置训练?

    -

    首先,我们强烈建议您安装Flash Attention 2(FA2),(>=2.1.0, 2.3.6功能更齐全)。

    -

    训练参数中"attn_implementation” 设置成 “eager” 可以用naive attention,也就是未经加速的attention。

    -

    训练参数中"attn_implementation” 设置成 “flash_attention_2” 可以用FA2,速度快,省显存。

    -

    如果你可以自行安装环境并使用torch>=2.1.1,可以尝试设置参数"attn_implementation"为 “sdpa”。这样会尝试使用transformers兼容的torch.nn.functional.scaled_dot_product_attention。支持的模型还不全面。

    -

    问题5:推荐的分布式框架是怎样的?

    -

    对于LoRA/QLoRA, 我们推荐使用DeepSpeed作为底层分布式框架,它具有易用性和兼容性好的特点,并且速度很快。 -FSDP 不支持QLoRA, 因为bitsandbytes暂不支持FSDP。

    -

    对于全量微调,我们推荐使用FSDP, 因为它在全量训练时可以发挥fully sharding的优势,达到更快的训练速度。

    -

    问题6:当前支持的模型中,有什么区别

    -

    国产大模型比如chatglm2, chatglm3, baichuan2, qwen, aquila2等,使用的是和模型共同发布的modeling_xxx.py. -其它被transformers官方支持的大模型,由于已经升级支持flash attention等,所以全面切换到官方的modeling支持训练,之前的自定义modeling会被deprecated

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-accelerate/index.html b/docs/docs/mftcoder-accelerate/index.html deleted file mode 100644 index d93e34b..0000000 --- a/docs/docs/mftcoder-accelerate/index.html +++ /dev/null @@ -1,993 +0,0 @@ - - - - - - - - -MFTCoder-accelerate: Training Framework with Accelerate and DeepSpeed/FSDP · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder-accelerate: Training Framework with Accelerate and DeepSpeed/FSDP

    -
    -
    - - -

    Generic badge - -GitHub -

    -

    [中文] [English]

    -

    1. Updates

    -

    🔥 MFTCoder-accelerate supports Full-parameters/LoRA using accelerate + FSDP Framework;

    -

    🔥 MFTCoder-accelerate supports MFT/SFT on more new mainstream open-source base models: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3;

    -

    🔥 MFTCoder-accelerate supports Self-Paced Loss for Convergence Balance;

    -

    🔥 MFTCoder-accelerate supports Full-parameters/QLoRA/LoRA using accelerate + DeepSpeed Framework;

    -

    🔥 MFTCoder-accelerate supports Multitask Fine-Tuning(MFT), which is able to balance diffenrent tasks in data level.

    -

    🔥 MFTCoder-accelerate supports finetuning most of mainstream open-source base models: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen.

    -

    2. Data Format

    -

    2.1 Training Data Format

    -

    The training data is required to be a uniformed JSONL format, in which each line of data has the following “chatML”-style JSON format. The “chat_rounds” field is required, and other fields can be added or removed based on specific needs. -The reason why we selected “chatML” style as our training and inference data format is that “chatML” style is compatible with both “conversation” and “instruction/response” scenarios.

    -

    For the keys of roles in “chat_rounds”, you could use “system/human/bot” tuple or “system/user/assistant” tuple.

    -
    {
    -    "id":0,
    -    "data_name":"code-helper",
    -    "chat_rounds":[
    -        {
    -            "role": "system",
    -            "content": "You are a expert in coding and help answer code questions"
    -        },
    -        {
    -            "role": "human",
    -            "content": "Write a python function of quick sort"
    -        },
    -        {
    -            "role": "bot",
    -            "content": "Below is the function of quick sort: ..."
    -        },
    -        {
    -            "role": "human",
    -            "content": "Explain the code"
    -        },
    -        {
    -            "role": "bot",
    -            "content": "OK, this code ..."
    -        }
    -    ]
    -}
    -

    2.2 Default Inference Data Format

    -

    Inference data format is the real string format consumed by tokenizers and then LLMs. It is also the string format to which the training data is converted before tokenization. -The default inference data format contains strings concatenated by conversation data(system, human and bot contents) in the training data format. -It is used as the data “seen”(before tokenization) by the model in training process. -It is used as input during the inference process as well. -Here is an example format of the inference string:

    -
    """
    -<s>system
    -System instruction
    -<s>human
    -User 1st round input
    -<s>bot
    -Assistant 1st round output{EOS_TOKEN}
    -<s>human
    -User 2nd round input
    -<s>bot
    -Assistant 2nd round output{EOS_TOKEN}
    -...
    -...
    -...
    -<s>human
    -User nth round input
    -<s>bot
    -{Assistant output to be genreated}{EOS_TOKEN}
    -"""
    -

    When applying inference, you always make your input string end with <s>bot\n to request the model generating answers.

    -

    3. Model Training

    -

    Currently, the “MFTCoder-accelerate” codebase supports Full-parameters/LoRA/QLoR along with Multi-Task FineTuning(MFT). -In theory, this project can be used to train any publicly available model in the HuggingFace Format.

    -

    Here are some excellent pre-trained models weights available on Huggingface that can be finetuned with this codebase:

    -

    🤗 Latest code pre-trained SOTA, CodeLlama-34b-Python : code-llama-34b, code-llama-34b-python, a new SOTA base model.

    -

    🤗 Best 10B level pre-trained Code LLM, Starcoder: wizardCoder-15B, PanGu-coder2, and other previous SOTA were trained on it.

    -

    🤗 Multilingual powerhouse, Qwen-7b: Suitable for multilingual tasks, including Chinese tasks, for instruction fine-tuning.

    -

    mftcoder_accelerate directory structure

    -
    mftcoder_accelerate
    -       |
    -       src
    -          configs
    -          |
    -          data
    -          |
    -          model
    -          |
    -          *pefts*
    -          |
    -          tokenizer
    -          |
    -          utils
    -       |
    -       evals
    -

    我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化, 详见src目录下的实现。

    -

    训练入口文件是mftcoder_accelerate/src/pefts/mft_accelerate.py

    -

    参数配置存储在mftcoder_accelerate/src/configs目录下,方便统一管理和更改。

    -

    所以,在你开启训练之前,请进入src目录

    -
    cd mftcoder_accelerate/src
    -

    You can find the implementations in the mftcoder_accelerate/src directory. -The entry directory for fine-tuning training is mftcoder_accelerate/src, and the entry file for training is mftcoder_accelerate/src/pefts/mft_accelerate.py. -Configurations are stored in the mftcoder_accelerate/src/configs directory for easy management and modification.

    -

    As a result, before you start training, you should first change your dir by

    -
    cd mftcoder_accelerate/src
    -

    3.1 Tokenization

    -

    During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned before) and then tokenize it.

    -

    In default format, <s>human\n starts the user’s input (i.e., prompt),<s>bot\n starts the assistant’s output (i.e., response)

    -

    {EOS_TOKEN} represents the proper eos_token. -We have different eos_tokens in src/pefts/model_mapping.py which fits different base models.

    -

    Here is a visionable example of the training data after formatting:

    -
    f"<s>human\n{input1}<s>bot\n{target1}{EOS_TOKEN}\n<s>human\n{input2}<s>bot\ntarget2{EOS_TOKEN}\n"
    -

    During the calculation of loss, we use a loss mask to ensure that the loss from the input part does not contribute to parameter updates. Only the loss from the target{EOS_TOKEN} part is used for updating parameters. -This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. -By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient.

    -

    3.2 LoRA/QLoRA

    -

    Intro

    -

    You can refer to the Lora paper for details about LoRA:LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS

    -

    You can refer to the Qlora paper for details about QLoRA:QLORA: Efficient Finetuning of Quantized LLMs

    -

    QLoRA (Quantized LoRA) is a method that combines 4-bit nf4 quantization and additional adapters to achieve a balance between reducing GPU memory consumption and approaching the performance of full-parameter fine-tuning.

    -

    According to the QLoRA paper, this method enables fine-tuning of a 33B model on a single V100 GPU while achieving performance close to that of full-parameter fine-tuning.

    -

    To perform LoRA/QLoRA fine-tuning, you can execute the following command:

    -

    Launch via Deepspeed

    -

    DeepSpeed config in accelerate_ds_config.yaml.

    -
    accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "DeepSpeed" 
    -

    or -DeepSpeed config in command line arguments

    -
    sh ds_single_launch.sh
    -

    Launch via FSDP

    -

    FSDP config in accelerate_fsdp_config.yaml.

    -
    accelerate launch --config_file accelerate_fsdp_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json --distributed_type "FSDP"
    -

    or -FSDP config in command line arguments

    -
    sh ds_single_launch.sh
    -

    Traing Arguments

    -

    All arguments allowed in ***_train_config.josn are defined in arguments.py.

    -

    Frequently used arguments are provided in configs/***_train_config and explained as follows. You can modify these parameters according to your needs:

    -
      -
    • -

      load_raw_dataset: Need to be true at present. Only JSONL format is supported.

      -
    • -
    • -

      data_paths: Input data paths in a String of list format, e.g., “[path1,path2,path3]”. Each path represents a task directory and each task directory contains one or more JSONL data files.

      -
    • -
    • -

      output_dir: Training output directory to store checkpoints, Lora adapter, etc.

      -
    • -
    • -

      tb_dir: TensorBoard directory to store logs, metrics, etc.

      -
    • -
    • -

      model_type: Type of the model to train, e.g., “mixtral | llama | starcoder | chatglm2 | qwen | gpt_neox”.

      -
    • -
    • -

      attn_implementation: “flash_attention_2” or “eager” or “sdpa”, worked when model is supported by transformers officially

      -
    • -
    • -

      peft_type: null or “lora” or “qlora”. null for full-params training

      -
    • -
    • -

      lora_rank: Rank value for Lora.

      -
    • -
    • -

      lora_alpha: Alpha value for Lora.

      -
    • -
    • -

      lora_dropout: Dropout rate for Lora.

      -
    • -
    • -

      target_modules: List of target modules in lora, we have default values if None

      -
    • -
    • -

      quantization: “4bit” for QLoRA/ null for LoRA and Full-params training.

      -
    • -
    • -

      pretrained_model_path: Local/Shared disk path or model name on HuggingFace for the pre-trained model.

      -
    • -
    • -

      weighted_loss_mode: Loss weighting method for multitask training. “case3” is recommended at present, “self-paced” is supported but need tuning of hyperparameters.

      -
    • -
    • -

      padding_mode: The way tokenized data is set. “padding” means padding for each sample to seq_length, “pack” means putting samples into seq_length as many as possible.

      -
    • -
    • -

      num_train_epochs: Number of training epochs.

      -
    • -
    • -

      per_device_train_batch_size: Batch size per GPU for training.

      -
    • -
    • -

      per_device_eval_batch_size: Batch size per GPU for evaluation.

      -
    • -
    • -

      gradient_accumulation_steps: Number of gradient accumulation steps. Global batch size is calculated as num_gpus * per_device_train_batch_size * gradient_accumulation_steps.

      -
    • -
    • -

      learning_rate: Initial Learning rate. For full-parameter fine-tuning, it is recommended to use a smaller value such as 1e-5 or 5e-6. For QLoRA, a larger learning rate is generally used, such as 1e-4 or 2e-4.

      -
    • -
    • -

      min_lr: Minimum learning rate. Usually set to one-tenth of the learning rate.

      -
    • -
    • -

      seq_length: Maximum input sequence length during training.

      -
    • -
    • -

      log_interval: Log training loss every log_interval steps.

      -
    • -
    • -

      checkpointing_steps: Save a checkpoint every checkpointing_steps steps.

      -
    • -
    • -

      evaluation_steps: Evaluate on the validation set every evaluation_steps steps.

      -
    • -
    • -

      early_stopping: Enable early stopping or not.

      -
    • -
    • -

      early_stopping_stall_num: Number of evaluation points without improvement which triggers early stopping.

      -
    • -
    • -

      lr_scheduler_type: Type of learning rate scheduler. “cosine” is a good choice already.

      -
    • -
    • -

      num_warmup_steps: Number of warm-up steps to gradually increase the learning rate.

      -
    • -
    • -

      seed: Random seed for reproducibility.

      -
    • -
    • -

      saving_limit: ckpt saving limit num, must be set in Full-parameter training.

      -
    • -
    • -

      role_markers: {“system”: “<s>system\n”, “user”: “<s>human\n”, “assistant”: “<s>bot\n} as default(null). You could set your preferred role_markers as the templates startting “system”, “user” and “assistant”. e.g. {“system”: “### System:\n”, “user”: “### Instruction:\n”, “assistant”: “### Response:\n”}

      -
    • -
    -

    4. Model Usage

    -

    4.1 Merge Adaptor weights

    -

    Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. -To merge the adapter weights with the base model:

    -
    python pefts/merge_base_and_lora_to_hf.py \
    -    --base_model_or_path model_path \
    -    --adaptor_path lora_adapter_path \
    -    --model_type model_type \
    -    --merged_output_path output_path
    -

    4.2 Inference demo

    -

    Here is the script for inference on models trained by MFTCoder since v0.3.0, which is compatible with most HuggingFace models:

    -
    from transformers import (
    -    AutoTokenizer, 
    -    AutoModelForCausalLM,
    -)
    -model_name_or_path = "codefuse-ai/CodeFuse-Deepseek-33B"
    -tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, padding_side="left")
    -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("<|end▁of▁sentence|>")
    -tokenizer.pad_token_id = tokenizer.eos_token_id
    -model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True)
    -
    -HUMAN_ROLE_START_TAG = "<s>human\n"
    -BOT_ROLE_START_TAG = "<s>bot\n"
    -texts = ["write a python function of quick sort."]
    -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts]
    -
    -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda")
    -outputs = model.generate(
    -        inputs=inputs["input_ids"],
    -        attention_mask=inputs["attention_mask"],
    -        max_new_tokens=512,
    -        top_p=0.95,
    -        temperature=0.1,
    -        do_sample=True,
    -        eos_token_id=tokenizer.eos_token_id,
    -        pad_token_id=tokenizer.pad_token_id
    -    )
    -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
    -print(gen_text)
    -

    Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model’s generation output. -You can modify these parameters based on your specific use case.

    -

    In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric:

    -

    top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results.

    -

    temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic.

    -

    These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results.

    -

    If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings:

    -

    beam_num: Set a smaller value such as 1 or 3. beam_num=1 represents greedy decoding, which selects the most probable single generated word. beam_num=3 represents beam search mode, which considers multiple potential generation paths and chooses the best path among them.

    -

    5. FAQ

    -

    Q1:What should I do when cuda OOM happens?

    -

    If OOM happened,you can reduce parameters such as per_device_train_batch_size and seq_length. Since you are dealing with large models (6B, 13B, 34B, 70B, etc.), you are already using gradient checkpointing technology by default, which significantly reduces GPU memory consumption. -However, this may slightly slow down the training speed.

    -

    Q2:install packages

    -

    Please refer to init_env.sh and requirements.txt -We highly recommend you install Flash Attention 2 (flash_attn>=2.1.0, 2.3.6 used by us) first to get memory-efficient and fast training.

    -

    Q3:How should I specify the GPUs for training?

    -

    You can specify the visiable GPUs as below:

    -
    CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file accelerate_ds_config.yaml pefts/mft_accelerate.py --train_config configs/xxx_train_config.json
    -
    -

    For LoRA/QLoRA, we recommend DeepSpeed(ZeRO2) as the underlying framework, because it is easy and stable to use, moreover it is more compatable for different settings. -And FSDP does not support Quantization(integer type in training).

    -

    For Full-parameter finetuning, FSDP is usually faster, and may help you with very large models by sharding parameters and gradients.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-atorch-zh/index.html b/docs/docs/mftcoder-atorch-zh/index.html deleted file mode 100644 index 3cb48d4..0000000 --- a/docs/docs/mftcoder-atorch-zh/index.html +++ /dev/null @@ -1,909 +0,0 @@ - - - - - - - - -MFTCoder训练: Atorch框架篇 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder训练: Atorch框架篇

    -
    -
    - - -

    Generic badge - -GitHub -

    -

    [中文] [English]

    -

    1. 更新

    -

    🔥 MFTCoder在Atorch框架下支持GPTNeoX模型的微调;

    -

    🔥 MFTCoder支持全量的有监督微调;

    -

    🔥 MFTCoder支持LoRA微调;

    -

    2. 数据格式

    -

    2.1 训练数据格式

    -

    训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 -可以参考项目中的xxx.jsonl文件。

    -
    {
    -    "id":0,
    -    "data_name":"code-helper",
    -    "chat_rounds":[
    -        {
    -            "role": "system",
    -            "content": "你是一个智能代码助手,可以回复用户与代码相关的问题",
    -            "chat_round_id": 0
    -        },
    -        {
    -            "role": "human",
    -            "content": "写一个快速排序", 
    -            "chat_round_id": 1
    -        },
    -        {
    -            "role": "bot",
    -            "content": "以下是一个快速排序算法xxxxxx", 
    -            "chat_round_id": 1
    -        },
    -        {
    -            "role": "human",
    -            "content": "解释一下这段代码", 
    -            "chat_round_id": 2
    -        },
    -        {
    -            "role": "bot",
    -            "content": "好的,这段代码xxx", 
    -            "chat_round_id": 2
    -        }
    -    ]
    -}
    -

    2.2 推理数据格式

    -

    推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式:

    -
    """
    -<|role_start|>system<|role_end|>这是System指令
    -<|role_start|>human<|role_end|>这是第1轮用户输入的问题
    -<|role_start|>bot<|role_end|>这是第1轮模型生成的内容</s>
    -<|role_start|>human<|role_end|>这是第2轮用户输入的问题
    -<|role_start|>bot<|role_end|>这是第2轮模型生成的内容</s>
    -...
    -...
    -...
    -<|role_start|>human<|role_end|>这是第n轮用户输入的问题
    -<|role_start|>bot<|role_end|>{模型现在要生成的内容}</s>
    -"""
    -

    3. 模型训练

    -

    目前 “MFTCoder/mft_atorch” 代码库支持全量参数指令微调和LoRA指令微调。 -目前仅支持GPTNeoX模型的训练,理论上,HuggingFace上开源的GPTNeoX模型权重,均可使用本项目进行训练。

    -

    我们将训练中使用的各种组件抽取出来,以便后续的扩展和优化,详见主目录下的实现。微调训练的入口目录是train/, 训练入口文件是train/run_train.py, 参数配置存储在启动脚本train/run_gpt_*.sh等文件中,方便统一管理和更改。

    -

    3.1 数据格式

    -

    训练时,我们将多轮对话拼接成如下格式,然后进行tokenize。其中<|role_start|>human<|role_end|>表示human输入提示符,<|role_start|>bot<|role_end|>表示bot输出提示符,</s> 表示eos_token。

    -
    "<|role_start|>human<|role_end|>input1</s>target1</s>input2</s>target2</s>...
    -

    在计算loss时,我们通过mask的方式,input部分的loss不参与参数更新,只有“target”部分的loss参与参数更新。 -这种方式充分利用了模型并行计算的优势,训练更加高效,且多轮对话中的每个target部分都参与了训练,训练更充分。 -否则,就需要把一个n轮对话,拆分成n条数据,且只计算最后一个target的loss,大大降低了训练效率。

    -

    3.2 全量SFT

    -

    执行如下命令即可进行全量SFT:

    -
    sh run_gpt_mft.sh 10 1 8 5
    -

    需注意,启动脚本后的四个参数,分别是:

    -
      -
    • 第一个参数是总的per gpu batch size
    • -
    • 第二个参数是tensor parallel数(暂时只支持1)
    • -
    • 第三个参数是data parallel数,与所用GPU数保持一致
    • -
    • 第四个参数是训练epoch数
    • -
    -

    后面其他的训练方式启动脚本,也同样需要配置这四个参数

    -

    3.3 LoRA微调

    -

    执行如下命令即可进行Lora微调:

    -
    sh run_gpt_mft_peft.sh 10 1 8 5
    -

    3.4 启动脚本中主要参数说明

    -

    train/run_gpt_*.sh中的主要参数说明如下,以下参数可以根据需求进行修改,其他参数建议不做修改:

    -
      -
    • -

      tokenize_mode: 目前仅支持"sft"。

      -
    • -
    • -

      train_mode: 目前仅支持"sft"。

      -
    • -
    • -

      load_raw_dataset: 需要保持"True",后续会支持其它模式数据,当前仅支持jsonl输入

      -
    • -
    • -

      data_paths: “[path1,path2,path3]” 输入数据地址,字符串,开头结尾用[],中间用,间隔不同path,每个path是一个目录,目录的最后一级名字作为任务名称,下面包含1到多个jsonl数据。

      -
    • -
    • -

      output_dir: 训练输出目录,存储checkpoint、lora_adaptor checkpoint等。

      -
    • -
    • -

      tensorboard_dir: 可以暂时忽略,实际tensorboard存储在output_dir的runs目录下。

      -
    • -
    • -

      model_type: 目前仅支持 gpt_neox。

      -
    • -
    • -

      peft_type: 目前仅支持 lora。

      -
    • -
    • -

      pretrained_model_path: 预训练模型的本地目录。

      -
    • -
    • -

      total_train_batch_size: 所有显卡train的batch size的总和,会根据启动脚本时输入的per gpu batch size自动计算。

      -
    • -
    • -

      per_device_valid_batch_size: 每张显卡eval的batch size,会根据启动脚本时输入的per gpu batch size自动计算。

      -
    • -
    • -

      gradient_accumulation_steps: 梯度累计步数。global batch=num_gpus * per_device_train_batch_size * gradient_accumulation_steps。

      -
    • -
    • -

      checkpoint_activations: 如果显存捉襟见肘,可以开启。以时间换空间,模型不缓存激活状态,会进行两次forward计算,以节省显存。

      -
    • -
    • -

      learning_rate: 学习率。全量参数微调的时候,建议小一些,1e-5或5e-6。qlora中的学习率设置更大一些,一般为1e-4、2e-4。

      -
    • -
    • -

      min_lr: 最低学习率, 一般是learning_rate的十分之一。

      -
    • -
    • -

      seq_length: 训练时的最大长度。按照自己的设备进行设置,越长需要占用越多显存。

      -
    • -
    • -

      log_interval: 每隔多少步统计一次train loss。

      -
    • -
    • -

      checkpointing_steps: 每隔多少步保存一个模型。

      -
    • -
    • -

      evalation_steps: 每隔多少步在验证集上evaluate一次。

      -
    • -
    • -

      early_stopping_patience: 多少个eval point不继续收敛,则停止训练。

      -
    • -
    • -

      lr_scheduler_type: 学习率变化策略。

      -
    • -
    • -

      num_warmup_steps: warm up步数,学习率经过多少步,增长到指定的数值。

      -
    • -
    • -

      seed: 随机种子,用于复现实验结果。

      -
    • -
    • -

      train_iters: 可以暂时设为比较小的数,如10,实际上不会影响训练步数,留作后面拓展读取其他形式数据集的功能。

      -
    • -
    • -

      valid_iters: 可以暂时设为比较小的数,如10,实际上不会影响训练步数,留作后面拓展读取其他形式数据集的功能。

      -
    • -
    • -

      evaluation_strategy: 训练期间evaluate的策略,“steps"表示每隔"valid_interval"步做一次evaluate,“epoch"表示每隔一个epoch做一次evaluate,支持同时开启。

      -
    • -
    • -

      save_strategy: 训练期间保存模型权重的策略,“steps"表示每隔"checkpointing_steps"步保存一次。

      -
    • -
    • -

      extra_save_by_epoch: 每过一个epoch是否要保存一个epoch级别的checkpoint。

      -
    • -
    • -

      save_total_limit: 最多保留的模型checkpoint个数,一般设置为2,会保留valid loss最低,以及最新的checkpoint,注意epoch级别的checkpoint会一直保留,且不受限制。

      -
    • -
    • -

      weighted_loss_mode: 多任务训练的loss加权方式。

      -
    • -
    -

    4. 模型使用

    -

    4.1 权重合并

    -

    如果使用LoRA进行训练,本项目仅保存adapter的权重和配置文件,需要将adapter权重与base model进行合并。脚本见utils/merge_base_and_lora_to_hf.py

    -

    4.2 模型推理

    -

    我们提供了单轮对话和多轮对话的如下脚本,该脚本可同时兼容大部分huggingface格式的模型。

    -
    from transformers import (
    -    AutoTokenizer, 
    -    AutoModelForCausalLM,
    -)
    -tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False)
    -tokenizer.padding_side = "left"
    -tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("<unk>")
    -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("</s>")
    -model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True)
    -
    -HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>"
    -BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>"
    -texts = ["write a python function of quick sort."]
    -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts]
    -
    -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda")
    -outputs = model.generate(
    -        inputs=inputs["input_ids"],
    -        attention_mask=inputs["attention_mask"],
    -        max_new_tokens=512,
    -        top_p=0.95,
    -        temperature=0.1,
    -        do_sample=True,
    -        eos_token_id=tokenizer.eos_token_id,
    -        pad_token_id=tokenizer.pad_token_id
    -    )
    -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
    -print(gen_text)
    -

    生成脚本中的top_p、temperature、repetition_penalty、do_sample等参数对模型的生成效果影响较大,可按照自己的使用场景进行调试修改。 -实践中,在代码生成场景中,如果采样模式,do_sample=True, top_p=0.95, temperature=0.1是pass@1指标的不错选择; -如果非采样模式, do_sample=False, beam_num=1或者3是不错的选择,其中beam_num=1即为greedy decoding。

    -

    5. FAQ

    -

    问题1:OOM如何解决?

    -

    如果发生OOM,可以缩小per GPU batch size (启动训练脚本时的第一个参数)、seq_length等参数来缓解。也可以设gradient_checkpointing=true,可以大幅降低显存占用,但训练速度会变慢一些。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-atorch/index.html b/docs/docs/mftcoder-atorch/index.html deleted file mode 100644 index 58c32ed..0000000 --- a/docs/docs/mftcoder-atorch/index.html +++ /dev/null @@ -1,934 +0,0 @@ - - - - - - - - -MFTCoder Training: Atorch Framework · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder Training: Atorch Framework

    -
    -
    - - -

    Generic badge - -GitHub -

    -

    [中文] [English]

    -

    1. Updates

    -

    🔥 MFTCoder supports fine-tuning of the GPTNeoX model under the Atorch framework.

    -

    🔥 MFTCoder supports both fully supervised fine-tuning.

    -

    🔥 MFTCoder supports LoRA using the Atorch Framework.

    -

    2. Data Format

    -

    2.1 Training Data Format

    -

    The training data is in a uniformed JSONL format, in which each line of data has the following JSON format. The “chat_rounds” field is required, and other fields can be added or removed based on the specific need.

    -
    {
    -    "id":0,
    -    "data_name":"code-helper",
    -    "chat_rounds":[
    -        {
    -            "role": "system",
    -            "content": "You are a expert in coding and help answer code questions",
    -            "chat_round_id": 0
    -        },
    -        {
    -            "role": "human",
    -            "content": "Write a python function of quick sort", 
    -            "chat_round_id": 1
    -        },
    -        {
    -            "role": "bot",
    -            "content": "Below is the function of quick sort: ...", 
    -            "chat_round_id": 1
    -        },
    -        {
    -            "role": "human",
    -            "content": "Explain the code", 
    -            "chat_round_id": 2
    -        },
    -        {
    -            "role": "bot",
    -            "content": "OK, this code ...", 
    -            "chat_round_id": 2
    -        }
    -    ]
    -}
    -

    2.2 Inference Data Format

    -

    The inference data contains strings concatenated by conversation data(system, human and bot contents) in the training data format. -It is used as the data “seen”(before tokenization) by the model in training process. -It is used as input during the inference process as well. -Here is an example format of the concatenated string:

    -
    """
    -<|role_start|>system<|role_end|>System instruction
    -<|role_start|>human<|role_end|>Human 1st round input
    -<|role_start|>bot<|role_end|>Bot 1st round output</s>
    -<|role_start|>human<|role_end|>Human 2nd round input
    -<|role_start|>bot<|role_end|>Bot 2nd round output</s>
    -...
    -...
    -...
    -<|role_start|>human<|role_end|>Human nth round input
    -<|role_start|>bot<|role_end|>{Bot output to be genreated}</s>
    -"""
    -

    When applying inference, you always make your input string end with “<|role_start|>bot<|role_end|>” to request the model generating answers.

    -

    3. Model Training

    -

    Currently, the “MFTCoder/mft_atorch” code repository supports fully instruction fine-tuning, and LoRA instruction fine-tuning. Only the training of the GPTNeoX model is supported. In theory, the pretrained weights of the GPTNeoX model available on HuggingFace can be used for training within this project.

    -

    We have extracted various components used in training to facilitate future extension and optimization. Please refer to the implementation in the main directory for more details. The entry directory for fine-tuning training is train/, and the entry file for training is train/run_train.py. The parameter configurations are stored in the launch scripts such as train/run_gpt_*.sh, making it easier to manage and modify them uniformly.

    -

    3.1 Tokenization

    -

    During training, we concatenate multi-turn dialogues into the following format (also known as the inference data format mentioned earlier) and then tokenize it. In this format, <|role_start|>human<|role_end|> represents the human input (i.e., prompt), <|role_start|>bot<|role_end|> represents the bot output, and represents the eos_token. -You can modify and replace the eos_token based on different models’ requirements.

    -

    Here is an example of the concatenated format with prompts:

    -
    "<|role_start|>human<|role_end|>input1</s>target1</s>input2</s>target2</s>...
    -

    During the calculation of loss, we use a loss mask to ensure that the loss from the input part does not contribute to the parameter updates. Only the loss from the target</s> part is used for updating parameters. -This approach takes full advantage of the benefits of model parallelism, making training more efficient. It also leverages the characteristic of decoder-only models with left-to-right attention. -By including all target parts from multiple turns in a single training iteration, the training process becomes more efficient.

    -

    3.2 Fully Supervised Fine-Tuning (SFT)

    -

    To perform fully SFT, you can execute the following command:

    -
    sh run_gpt_mft.sh 10 1 8 5
    -

    Please note that the four parameters after the launch script have the following meanings:

    -
      -
    • The first parameter is the per GPU batch size.
    • -
    • The second parameter is the number of tensor parallelism (currently only supports 1).
    • -
    • The third parameter is the number of data parallelism, which should match the number of GPUs used.
    • -
    • The fourth parameter is the number of training epochs.
    • -
    -

    For other training modes, the same four parameters need to be configured in the launch script.

    -

    3.3 LoRA Supervised Fine-Tuning

    -

    To perform LoRA SFT, you can execute the following command:

    -
    sh run_gpt_mft_peft.sh 10 1 8 5
    -

    3.4 Parameter Explanations

    -

    The main parameter explanations for the train/run_gpt_*.sh are as follows. You can modify these parameters according to your needs:

    -
      -
    • -

      tokenize_mode: Need to be ‘sft’ at present.

      -
    • -
    • -

      train_mode: Need to be ‘sft’ at present.

      -
    • -
    • -

      load_raw_dataset: Need to be ‘True’ at present. Only JSONL format is supported.

      -
    • -
    • -

      data_paths: “[path1,path2,path3]” Input data addresses, a string enclosed in [], with different paths separated by commas (,). Each path is a directory where the last level of the directory name is considered as the task name. Each task directory contains 1 to multiple jsonl data files.

      -
    • -
    • -

      output_dir: Training output directory to store checkpoints, lora_adaptor checkpoints, etc.

      -
    • -
    • -

      tensorboard_dir: Can be temporarily ignored, as the actual tensorboard is stored in the runs directory under output_dir.

      -
    • -
    • -

      model_type: Currently only supports gpt_neox.

      -
    • -
    • -

      peft_type: Currently only supports lora.

      -
    • -
    • -

      pretrained_model_path: Local directory of the pre-trained model.

      -
    • -
    • -

      total_train_batch_size: The total batch size for training across all GPUs, calculated automatically based on per gpu batch size entered in the script.

      -
    • -
    • -

      per_device_valid_batch_size: The batch size for evaluation on each GPU, calculated automatically based on per gpu batch size entered in the script.

      -
    • -
    • -

      gradient_accumulation_steps: Number of gradient accumulation steps. Global batch size = num_gpus * per_device_train_batch_size * gradient_accumulation_steps.

      -
    • -
    • -

      checkpoint_activations: Enable if running out of GPU memory. Trades time for space by not caching activation states, resulting in two forward passes to save memory.

      -
    • -
    • -

      learning_rate: Learning rate. When fine-tuning the entire model, it is recommended to use a smaller value, such as 1e-5 or 5e-6. For lora, a larger learning rate is generally used, such as 1e-4 or 2e-4.

      -
    • -
    • -

      min_lr: Minimum learning rate, usually one-tenth of the learning_rate.

      -
    • -
    • -

      seq_length: Maximum length during training. Set according to your device, longer lengths require more memory.

      -
    • -
    • -

      log_interval: Frequency of logging training loss.

      -
    • -
    • -

      checkpointing_steps: Frequency of saving a model checkpoint.

      -
    • -
    • -

      evalation_steps: Frequency of evaluating on the validation set.

      -
    • -
    • -

      early_stopping_patience: Number of consecutive eval points without further convergence to stop training.

      -
    • -
    • -

      lr_scheduler_type: Learning rate changing strategy.

      -
    • -
    • -

      num_warmup_steps: Number of warm-up steps for the learning rate to increase to the specified value.

      -
    • -
    • -

      seed: Random seed used for reproducibility of experimental results.

      -
    • -
    • -

      train_iters: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats.

      -
    • -
    • -

      valid_iters: Can be temporarily set to a small value, such as 10, which does not affect the actual number of training steps, kept for future expansion to support reading datasets in other formats.

      -
    • -
    • -

      evaluation_strategy: Evaluation strategy during training. “steps” means to evaluate every “valid_interval” steps, “epoch” means to evaluate every epoch. Both can be enabled simultaneously.

      -
    • -
    • -

      save_strategy: Strategy for saving model weights during training. “steps” means to save every “checkpointing_steps” steps.

      -
    • -
    • -

      extra_save_by_epoch: Whether to save an epoch-level checkpoint every epoch.

      -
    • -
    • -

      save_total_limit: Maximum number of model checkpoints to keep. Generally set to 2, retaining the checkpoint with the lowest valid loss and the latest checkpoint. Note that epoch-level checkpoints will always be retained and are not subject to this limit.

      -
    • -
    • -

      weighted_loss_mode: Loss weighting method for multi-task training.

      -
    • -
    -

    4. Model Usage

    -

    4.1 Merge Adaptor weights

    -

    Using LoRA or QLoRA for training, this project only saves the weights and configuration files of the adapters. -To merge the adapter weights with the base model, see src/pefts/merge_base_and_lora_to_hf.py

    -

    4.2 Inference demo

    -

    Here is the script for inference on our trained models, which is compatible with most Hugging Face models:

    -
    from transformers import (
    -    AutoTokenizer, 
    -    AutoModelForCausalLM,
    -)
    -tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False)
    -tokenizer.padding_side = "left"
    -tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("<unk>")
    -tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("</s>")
    -model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, trust_remote_code=True)
    -
    -HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>"
    -BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>"
    -texts = ["write a python function of quick sort."]
    -texts = [f"{HUMAN_ROLE_START_TAG}{text}{BOT_ROLE_START_TAG}" for text in texts]
    -
    -inputs = tokenizer(texts, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda")
    -outputs = model.generate(
    -        inputs=inputs["input_ids"],
    -        attention_mask=inputs["attention_mask"],
    -        max_new_tokens=512,
    -        top_p=0.95,
    -        temperature=0.1,
    -        do_sample=True,
    -        eos_token_id=tokenizer.eos_token_id,
    -        pad_token_id=tokenizer.pad_token_id
    -    )
    -gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
    -print(gen_text)
    -

    Indeed, the parameters top_p, temperature, repetition_penalty, do_sample, etc., have a significant impact on the model’s generation output. -You can modify these parameters based on your specific use case.

    -

    In code generation scenarios, if you are using the sampling mode (do_sample=True), the following parameter settings can yield good results for the Pass@1 metric:

    -

    top_p: Set a higher value, such as 0.95, to retain highly probable generated words. This helps ensure more accurate and fluent generation results.

    -

    temperature: Set a lower value, such as 0.1, to reduce randomness. Lower temperature values make the generation output more deterministic.

    -

    These parameter combinations can control the diversity of the generated outputs while maintaining naturalness. Additionally, you can adjust other related parameters, such as repetition_penalty, to reduce repetition in the generated results.

    -

    If you choose the non-sampling mode (do_sample=False), you can consider the following parameter settings:

    -

    beam_num: Set a smaller value such as 1 or 3. beam_num=1 represents greedy decoding, which selects the most probable single generated word. beam_num=3 represents beam search mode, which considers multiple potential generation paths and chooses the best path among them.

    -

    5. FAQ

    -

    Q1:What should I do when cuda OOM happens?

    -

    If OOM (Out of Memory) occurs, you can mitigate it by reducing parameters such as per GPU batch size (the first argument when starting the training script) and seq_length. You can also set gradient_checkpointing=true, which significantly reduces memory usage but may slow down the training speed.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-introduction-zh/index.html b/docs/docs/mftcoder-introduction-zh/index.html deleted file mode 100644 index e1ca610..0000000 --- a/docs/docs/mftcoder-introduction-zh/index.html +++ /dev/null @@ -1,669 +0,0 @@ - - - - - - - - -MFTCoder 介绍 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder 介绍

    -
    -
    - - -

    项目简介

    -

    国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架;

    -

    Codefuse-MFTCoder 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。

    -

    项目框架

    -

    img_1.jpg

    -

    项目优势

    -

    :white_check_mark: 多任务:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去;

    -

    :white_check_mark: 多模型:支持最新的多个开源模型,包括gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2等;

    -

    :white_check_mark: 多框架:既支持主流开源的Accelerate+DeepSpeed/FSDP,也支持新开源的ATorch 框架

    -

    :white_check_mark: 高效微调:支持LoRA和QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景;

    -

    本项目主要内容如下:

    -
      -
    • 同时支持单任务SFT(Supervised FineTuning)和MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等
    • -
    • 支持QLoRA低成本高效指令微调、LoRA高效指令微调、全量参数高精度微调。
    • -
    • 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA等。
    • -
    • 支持lora与base model进行权重合并,推理更便捷。
    • -
    • 整理并开源2个指令微调数据集:Evol-instruction-66kCodeExercise-Python-27k
    • -
    • 开源多个[Codefuse系列指令微调模型权重],具体参见我们的huggingface组织和modelscope组织下的模型:codefuse-ai huggingface or codefuse-ai 魔搭
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-introduction/index.html b/docs/docs/mftcoder-introduction/index.html deleted file mode 100644 index d6db24a..0000000 --- a/docs/docs/mftcoder-introduction/index.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - -Introduction · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Introduction

    -
    -
    - - -

    Introduction

    -

    High Accuracy and efficiency Multi-task Fine-tuning framework for Code LLMs.

    -

    MFTCoder is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). -Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework.

    -

    In MFTCoder, we released two codebases for finetuning Large Language Models:

    -
      -
    • MFTCoder-accelerate is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. We highly recommend you try this framework and make your fintuning accurate and efficient.
    • -
    • MFTCoder-atorch is based on the ATorch frameworks, which is a fast distributed training framework of LLM.
    • -
    -

    The aim of this project is to foster collaboration and share advancements in large language models, particularly within the domain of code development.

    -

    Frameworks

    -

    img.jpg

    -

    Highlights

    -

    :white_check_mark: Multi-task: Train models on multiple tasks while maintaining a balance between them. The models can even generalize to new, previously unseen tasks.

    -

    :white_check_mark: Multi-model: It integrates state-of-the-art open-source models such as gpt-neox, llama, llama-2, baichuan, Qwen, chatglm2, and more. (These finetuned models will be released in the near future.)

    -

    :white_check_mark: Multi-framework: It provides support for both Accelerate (with Deepspeed and FSDP) and ATorch

    -

    :white_check_mark: Efficient fine-tuning: It supports LoRA, QLoRA as well as Full-parameters training, enabling fine-tuning of large models with minimal resources. The training speed meets the demands of almost all fine-tuning scenarios.

    -

    The main components of this project include:

    -
      -
    • Support for both SFT (Supervised FineTuning) and MFT (Multi-task FineTuning). The current MFTCoder achieves data balance among multiple tasks, and future releases will achieve a balance between task difficulty and convergence speed during training.
    • -
    • Support for QLoRA instruction fine-tuning, LoRA fine-tuning as well as Full-parameters fine-tuning.
    • -
    • Support for most mainstream open-source large models, particularly those relevant to Code-LLMs, such as DeepSeek-coder, Mistral, Mixtral, Chatglm3, Code-LLaMA, Starcoder, Codegeex2, Qwen, GPT-Neox, and more.
    • -
    • Support for weight merging between the LoRA adaptor and base models, simplifying the inference process.
    • -
    • Release of 2 high-quality code-related instruction fine-tuning datasets: Evol-instruction-66k and CodeExercise-Python-27k.
    • -
    • Release of many Code LLMs, please refer to organizations: codefuse-ai on huggingface or codefuse-ai on modelscope.
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-quickstart-zh/index.html b/docs/docs/mftcoder-quickstart-zh/index.html deleted file mode 100644 index a6783b9..0000000 --- a/docs/docs/mftcoder-quickstart-zh/index.html +++ /dev/null @@ -1,766 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    环境

    -

    首先, 你需要将CUDA(>=11.4, 推荐11.7)及其相关驱动安装成功,并确保其工作正常, 并且安装基本的torch(>=2.0.0) -在requirements.txt下固定了几个主要的python包的版本,执行如下脚本即可:

    -
    sh init_env.sh
    -

    我们强烈建议您安装flash attention(>=2.1.0, 推荐2.3.6), 安装请参考 https://github.com/Dao-AILab/flash-attention

    -

    训练

    -

    如果你熟悉大模型训练的各种主流开源资源,例如 transformers, DeepSpeed, FSDP等, 为了用开源项目快速上手高性能微调,我们建议您尝试:

    -

    🚀🚀 MFTCoder-accelerate: Accelerate + DeepSpeed/FSDP Codebase for MFT(Multi-task Finetuning)

    -

    如果你想探索一些新兴的训练框架,可以尝试:

    -

    🚀 MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning)

    -

    模型

    -

    使用本项目的训练代码,以及上述训练数据,我们训练并在huggingface, modelscope开源了以下模型。

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型HuggingFace链接魔搭 链接基座模型训练数据Batch SizeSeq Length
    🔥🔥🔥 CodeFuse-DeepSeek-33Bh-linkm-linkDeepSeek-coder-33B60万804096
    🔥🔥🔥 CodeFuse-Mixtral-8x7Bh-linkm-linkMixtral-8x7B60万804096
    🔥🔥🔥 CodeFuse-CodeLlama-34Bh-linkm-linkCodeLlama-34b-Python60万804096
    🔥🔥🔥 CodeFuse-CodeLlama-34B-4bitsh-linkm-linkCodeLlama-34b-Python4096
    🔥🔥🔥 CodeFuse-StarCoder-15Bh-linkm-linkStarCoder-15B60万804096
    🔥🔥🔥 CodeFuse-QWen-14Bh-linkm-linkQwen-14b110万2564096
    🔥🔥🔥 CodeFuse-CodeGeex2-6Bh-linkm-linkCodeGeex2-6B110万2564096
    -

    数据集

    -

    目前本项目主要整理了如下指令数据集,并将其整理成统一的数据格式,这两个指令微调数据集是我们多任务训练中数十个任务中的2个,未来我们会陆续开源更多的代码任务指令微调数据集:

    - - - - - - - - - - - - - - - - - -
    数据集介绍
    ⭐ Evol-instruction-66k基于开源open-evol-instruction-80k过滤低质量,重复和human eval相似的数据后得到的高质量代码类微调数据
    ⭐ CodeExercise-Python-27k高质量python练习题数据
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-quickstart/index.html b/docs/docs/mftcoder-quickstart/index.html deleted file mode 100644 index d48a788..0000000 --- a/docs/docs/mftcoder-quickstart/index.html +++ /dev/null @@ -1,782 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    Requirements

    -

    To begin, ensure that you have successfully installed CUDA (version >= 11.4, preferably 11.7) along with the necessary drivers. Additionally, make sure you have installed torch (version 2.0.1).

    -

    Next, we have provided an init_env.sh script to simplify the installation of required packages. Execute the following command to run the script:

    -
    sh init_env.sh
    -

    We highly recommend training with flash attention(version >= 2.1.0, preferably 2.3.6), please refer to the following link for installation instructions: https://github.com/Dao-AILab/flash-attention

    -

    Training

    -

    As mentioned above, we open source two training frameworks. You could refer to their own READMEs for more details as followed.

    -

    If you are familiar with open source transformers, DeepSpeed or FSDP, we highly recommend you try:

    -

    🚀🚀 MFTCoder-accelerate: Accelerate + Deepspeed/FSDP Codebase for MFT(Multi-task Finetuning)

    -

    If you want to explore some new framework like atorch, you could check:

    -

    🚀 MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning)

    -

    Models

    -

    We are excited to release the following two CodeLLMs trained by MFTCoder, now available on both HuggingFace and ModelScope:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelHuggingFace LinksModelScope LinksBase ModelNum of examples trainedBatch SizeSeq Length
    🔥 CodeFuse-DeepSeek-33Bh-linkm-linkDeepSeek-coder-33B60万804096
    🔥 CodeFuse-Mixtral-8x7Bh-linkm-linkMixtral-8x7B60万804096
    🔥 CodeFuse-CodeLlama-34Bh-linkm-linkCodeLlama-34b-Python60万804096
    🔥 CodeFuse-CodeLlama-34B-4bitsh-linkm-linkCodeLlama-34b-Python4096
    🔥 CodeFuse-StarCoder-15Bh-linkm-linkStarCoder-15B60万804096
    🔥 CodeFuse-QWen-14Bh-linkm-linkQwen-14b110万2564096
    🔥 CodeFuse-CodeGeex2-6Bh-linkm-linkCodeGeex2-6B110万2564096
    -

    Datasets

    -

    We are also pleased to release two code-related instruction datasets, meticulously selected from a range of datasets to facilitate multitask training. Moving forward, we are committed to releasing additional instruction datasets covering various code-related tasks.

    - - - - - - - - - - - - - - - - - -
    DatasetDescription
    ⭐ Evol-instruction-66kBased on open-evol-instruction-80k, filter out low-quality, repeated, and similar instructions to HumanEval, thus get high-quality code instruction dataset.
    ⭐ CodeExercise-Python-27kpython code exercise instruction dataset
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/mftcoder-zh/index.html b/docs/docs/mftcoder-zh/index.html deleted file mode 100644 index 0d56575..0000000 --- a/docs/docs/mftcoder-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/mftcoder-zh/ - - - - - - diff --git a/docs/docs/mftcoder/index.html b/docs/docs/mftcoder/index.html deleted file mode 100644 index af5fc59..0000000 --- a/docs/docs/mftcoder/index.html +++ /dev/null @@ -1,558 +0,0 @@ - - - - - - - - -MFTCoder · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder

    -
    -
    - - -

    MFTCoder

    -

    MFTCoder

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/multi-agent-zh/index.html b/docs/docs/multi-agent-zh/index.html deleted file mode 100644 index 6603761..0000000 --- a/docs/docs/multi-agent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E5%A4%9A%E6%99%BA%E8%83%BD%E4%BD%93/ - - - - - - diff --git a/docs/docs/multi-agent/index.html b/docs/docs/multi-agent/index.html deleted file mode 100644 index f5a6ace..0000000 --- a/docs/docs/multi-agent/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /multi-agent/ - - - - - - diff --git a/docs/docs/overview/codefuse-chatbot/index.html b/docs/docs/overview/codefuse-chatbot/index.html deleted file mode 100644 index 7312bfa..0000000 --- a/docs/docs/overview/codefuse-chatbot/index.html +++ /dev/null @@ -1,631 +0,0 @@ - - - - - - - - -Codefuse-ChatBot Development by Private Knowledge Augmentation · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Codefuse-ChatBot Development by Private Knowledge Augmentation

    -
    -
    - - -

    - 中文  |  English  -

    -

    This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface.

    -

    📜 Contents

    - -

    🤝 Introduction

    -

    💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. It transitions gradually from the traditional development and operations mode of querying information from various sources and operating on standalone, disparate platforms to an intelligent development and operations mode based on large-model Q&A, changing people’s development and operations habits.

    -
      -
    • 🧠 Intelligent Scheduling Core: Constructed a well-integrated scheduling core system that supports multi-mode one-click configuration, simplifying the operational process.Use Introduction
    • -
    • 💻 Comprehensive Code Repository Analysis: Achieved in-depth understanding at the repository level and coding and generation at the project file level, enhancing development efficiency.
    • -
    • 📄 Enhanced Document Analysis: Integrated document knowledge bases with knowledge graphs, providing deeper support for document analysis through enhanced retrieval and reasoning.
    • -
    • 🔧 Industry-Specific Knowledge: Tailored a specialized knowledge base for the DevOps domain, supporting the self-service one-click construction of industry-specific knowledge bases for convenience and practicality.
    • -
    • 🤖 Compatible Models for Specific Verticals: Designed small models specifically for the DevOps field, ensuring compatibility with related DevOps platforms and promoting the integration of the technological ecosystem.
    • -
    -

    🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.Access Demo

    -

    👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the CodefuseGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of “Making Development Seamless for Everyone.”

    -
    - Image -
    -

    🌍 Relying on open-source LLM and Embedding models, this project can achieve offline private deployments based on open-source models. Additionally, this project also supports the use of the OpenAI API.

    -

    👥 The core development team has been long-term focused on research in the AIOps + NLP domain. We initiated the DevOpsGPT project, hoping that everyone could contribute high-quality development and operations documents widely, jointly perfecting this solution to achieve the goal of “Making Development Seamless for Everyone.”

    -

    🧭 Technical Route

    -
    - Image -
    -
      -
    • 🧠 Multi-Agent Schedule Core: Easily configurable to create interactive intelligent agents.
    • -
    • 🕷️ Multi Source Web Crawl: Offers the capability to crawl specified URLs for collecting the required information.
    • -
    • 🗂️ Data Processor: Effortlessly handles document loading, data cleansing, and text segmentation, integrating data from different sources.
    • -
    • 🔤 Text Embedding & Index::Users can easily upload files for document retrieval, optimizing the document analysis process.
    • -
    • 🗄️ Vector Database & Graph Database: Provides flexible and powerful data management solutions.
    • -
    • 📝 Prompt Control & Management::Precisely defines the contextual environment for intelligent agents.
    • -
    • 🚧 SandBox::Safely executes code compilation and actions.
    • -
    • 💬 LLM::Supports various open-source models and LLM interfaces.
    • -
    • 🛠️ API Management:: Enables rapid integration of open-source components and operational platforms.
    • -
    -

    For implementation details, see: Technical Route Details

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/codefuse-devops-eval/index.html b/docs/docs/overview/codefuse-devops-eval/index.html deleted file mode 100644 index 5deb088..0000000 --- a/docs/docs/overview/codefuse-devops-eval/index.html +++ /dev/null @@ -1,1407 +0,0 @@ - - - - - - - - -codefuse-devops-eval · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    codefuse-devops-eval

    -
    -
    - - -

    - -

    DevOps-Eval is a comprehensive evaluation suite specifically designed for foundation models in the DevOps field. We hope DevOps-Eval could help developers, especially in the DevOps field, track the progress and analyze the important strengths/shortcomings of their models.

    -

    📚 This repo contains questions and exercises related to DevOps, including the AIOps, ToolLearning;

    -

    💥️ There are currently 7486 multiple-choice questions spanning 8 diverse general categories, as shown below.

    -

    🔥 There are a total of 2840 samples in the AIOps subcategory, covering scenarios such as log parsing, time series anomaly detection, time series classification, time series forecasting, and root cause analysis.

    -

    🔧 There are a total of 1509 samples in the ToolLearning subcategory, covering 239 tool scenes across 59 fields.

    -

    -

    🏆 Leaderboard

    -

    Below are zero-shot and five-shot accuracies from the models that we evaluate in the initial release. We note that five-shot performance is better than zero-shot for many instruction-tuned models.

    -

    👀 DevOps

    -

    Zero Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelNameplancodebuildtestreleasedeployoperatemonitorAVG
    DevOpsPal-14B-Chat60.6178.3584.8684.6587.2682.7569.8979.1778.23
    DevOpsPal-14B-Base54.5577.8283.4985.9686.3281.9671.1882.4178.23
    Qwen-14B-Chat60.6175.485.3284.2189.6282.7569.5780.5677.18
    Qwen-14B-Base57.5873.8184.485.5386.3281.1870.0580.0976.19
    Baichuan2-13B-Base60.6169.4279.8279.8282.5581.1870.3783.873.73
    Baichuan2-13B-Chat60.6168.4377.9880.781.683.5367.6384.7272.9
    DevOpsPal-7B-Chat54.5569.1183.9482.0276.898064.7377.7871.92
    DevOpsPal-7B-Base54.5568.9682.1178.9580.6676.4765.5478.771.69
    Qwen-7B-Base53.0368.1378.975.4480.198065.0680.0971.09
    Qwen-7B-Chat57.5866.0180.2879.8276.8977.6562.6479.1769.75
    Baichuan2-7B-Chat54.5563.6677.9876.3271.773.3359.4279.6366.97
    Internlm-7B-Chat60.6162.1577.0676.3266.9874.5160.3978.2466.27
    Baichuan2-7B-Base56.0662.4575.6970.6174.0669.861.6775.9366.21
    Internlm-7B-Base54.5558.2979.3678.9577.8370.5965.8675.9365.99
    -

    Five Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelNameplancodebuildtestreleasedeployoperatemonitorAVG
    DevOpsPal-14B-Chat63.6479.4981.6585.9686.7986.6772.9581.4879.69
    DevOpsPal-14B-Base62.1280.5582.5785.5385.8584.7171.9880.0979.63
    Qwen-14B-Chat65.157682.5785.5384.9184.3170.8581.4877.81
    Qwen-14B-Base66.6776.1584.485.5386.3280.3972.4680.5677.56
    Baichuan2-13B-Base63.6471.3980.7382.4681.1384.3173.7585.1975.8
    Qwen-7B-Base75.7672.5278.981.1483.9681.1870.3781.9475.36
    Baichuan2-13B-Chat62.1269.9576.6184.2183.4979.6171.9880.5674.12
    DevOpsPal-7B-Chat66.6769.9583.9481.1480.1982.7568.676.8573.61
    DevOpsPal-7B-Base69.769.4982.1181.1482.5582.3567.1579.1773.35
    Qwen-7B-Chat65.1566.5482.5781.5881.681.1865.3881.0271.69
    Baichuan2-7B-Base60.6167.2276.617577.8378.4367.3179.6370.8
    Internlm-7B-Chat60.6163.0679.8280.2667.9275.6960.0677.3169.21
    Baichuan2-7B-Chat60.6164.9581.1975.8871.2375.6964.979.1769.05
    Internlm-7B-Base62.1265.2577.5280.774.0678.8263.4575.4667.17
    -

    🔥 AIOps

    -
    -

    Zero Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelNameLogParsingRootCauseAnalysisTimeSeriesAnomalyDetectionTimeSeriesClassificationTimeSeriesForecastingAVG
    Qwen-14B-Base66.2958.825.3343.562.552.25
    DevOpsPal-14B—Base63.1453.623.3343.564.0650.49
    Qwen-14B-Chat64.5751.622.673662.548.94
    DevOpsPal-14B—Chat6056244357.8148.8
    Qwen-7B-Base5039.222.675443.7541.48
    DevOpsPal-7B—Chat56.5730.425.334544.0640.92
    Baichuan2-13B-Chat641821.3337.546.8839.3
    Qwen-7B-Chat57.4338.822.3339.525.3136.97
    Internlm-7B—Chat58.868.822.3328.551.2536.34
    Baichuan2-7B-Chat60.86102834.539.0636.34
    Baichuan2-7B-Base53.4312.827.6736.540.3135.49
    Baichuan2-13B-Base5412.42334.542.8134.86
    DevOpsPal-7B—Base46.5720.8253438.7533.94
    Internlm-7B—Base48.5718.823.3337.533.7533.1
    -

    One Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelNameLogParsingRootCauseAnalysisTimeSeriesAnomalyDetectionTimeSeriesClassificationTimeSeriesForecastingAVG
    DevOpsPal-14B—Chat66.2980.823.3344.556.2554.44
    DevOpsPal-14B—Base607425.3343.552.551.13
    Qwen-14B-Base64.2974.42848.540.3150.77
    Qwen-7B-Base5660.827.674457.1949.44
    Qwen-14B-Chat49.7165.628.674842.1946.13
    Baichuan2-13B-Base5643.224.334146.8842.89
    Baichuan2-7B-Chat58.5731.62731.551.8841.83
    DevOpsPal-7B—Base52.8644.42844.536.2541.2
    Baichuan2-7B-Base48.2940.4274240.9439.86
    Qwen-7B-Chat54.575229.6726.527.1938.73
    Baichuan2-13B-Chat57.4344.42525.530.6337.75
    DevOpsPal-7B—Chat56.5727.225.3341.533.4437.46
    Internlm-7B—Chat62.5712.822.332150.3136.69
    Internlm-7B—Base4833.2293531.5635.85
    -
    -

    🔧 ToolLearning

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FuncCall-Fillerdataset_namefccr1-fcffr1-fcfnr1-fcfpr1-fcfniraar
    Qwen-14b-chatluban6110097.6863.3210069.46
    Qwen-7b-chatluban50.5810098.0752.5110063.59
    Baichuan-7b-chatluban60.2310097.362.9399.6161.12
    Internlm-chat-7bluban47.8810096.1451.7499.6161.85
    Qwen-14b-chatfc_data98.3799.7399.8698.7810081.58
    Qwen-7b-chatfc_data99.4699.8610099.5910079.25
    Baichuan-7b-chatfc_data97.9699.3210098.6410089.53
    Internlm-chat-7bfc_data94.2995.7810098.510088.19
    CodeLLaMa-7bfc_data98.7899.7310099.0510094.7
    CodeLLaMa-7b-16fc_data98.199.8799.7398.510093.14
    CodeFuse-7b-4kfc_data98.9199.8799.8799.1810089.5
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/codefuse-devops-model/index.html b/docs/docs/overview/codefuse-devops-model/index.html deleted file mode 100644 index 57f8611..0000000 --- a/docs/docs/overview/codefuse-devops-model/index.html +++ /dev/null @@ -1,784 +0,0 @@ - - - - - - - - -codefuse-devops-model · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    codefuse-devops-model

    -
    -
    - - -

    codeFuse-devops-model

    -

    DevOps-Model is a large language model for the Chinese DevOps field jointly released by Ant Group and Peking University. By collecting professional data related to the DevOps domain and conducting additional training and alignment on the model, a large model has been produced to help engineers enhance efficiency throughout the entire development and operations lifecycle. This fills the current gap in large models within the DevOps domain, with the aim to provide solutions to any problems by asking DevOps-Model! -We have now open-sourced two versions of the model, the Base model with additional training and the Chat model after alignment, in both 7B and 14B specifications, as well as the corresponding training code. We welcome everyone to collaborate and contribute!

    -

    Project Address

    -

    GitHub Address: https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main -ModelScope Address:

    - -

    Evaluation Questions

    -

    For model evaluation, there was initially no benchmark for testing in the DevOps domain, so we first selected some domain-related multiple-choice questions from general open-source tests for evaluation. The specific test data is as follows:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    DatasetSubjectTotal Questions
    CMMLUComputer science 204
    Computersecurity171
    Machinelearning122
    CEvalcollege programming37
    CEvalcomputer_architecture21
    CEvalcomputer_network19
    总计总计题目数574
    -

    Evaluation Methods

    -

    Since all are multiple-choice questions, we adopted the method of selecting the highest-scoring Token among the four option Tokens in the first Token produced by the model as the model’s answer to the question. We also tested Zero-shot and Five-shot results.

    -

    Evaluation Results

    -

    -

    The specific scores are shown in the table below:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Scale of ParametersModelModel SizeZero-shot ScoreFive-shot Score
    10+ BDevOps-Model-14B-Base14B70.7373.00
    10+ BQwen-14B-Base14B69.1671.25
    10+ BBaichuan2-13B-Base13B55.7561.15
    10+ BDevOps-Model-14B-Chat14B74.0475.96
    10+ BQwen-14B-Chat14B69.1670.03
    10+ BBaichuan2-13B-Chat13B52.7955.23
    7BDevOps-Model-7B-Base7B62.7262.02
    7BQwen-7B-Base7B55.7556.0
    7BBaichuan2-7B-Base7B49.3055.4
    7BInternlm-7B-Base7B47.5652.6
    7BDevOps-Model-7B-Chat7B62.2064.11
    7BQwen-7B-Chat7B46.0052.44
    7BBaichuan2-7B-Chat7B52.2654.46
    7BInternlm-7B-Chat7B52.6155.75
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/codefuse-mft-vlm/index.html b/docs/docs/overview/codefuse-mft-vlm/index.html deleted file mode 100644 index e589418..0000000 --- a/docs/docs/overview/codefuse-mft-vlm/index.html +++ /dev/null @@ -1,616 +0,0 @@ - - - - - - - - -CodeFuse-MFT-VLM · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-MFT-VLM

    -
    -
    - - -

    CodeFuse-VLM

    -

    CodeFuse-VLM is a Multimodal LLM(MLLM) framework that provides users with multiple vision encoders, multimodal alignment adapters, and LLMs. Through CodeFuse-VLM framework, users are able to customize their own MLLM model to adapt their own tasks. -As more and more models are published on Huggingface community, there will be more open-source vision encoders and LLMs. Each of these models has their own specialties, e.g. Code-LLama is good at code-related tasks but has poor performance for Chinese tasks. Therefore, we built CodeFuse-VLM framework to support multiple vision encoders, multimodal alignment adapters, and LLMs to adapt different types of tasks. -img.jpg

    -

    Under CodeFuse-VLM framework, we use cross attention multimodal adapter, Qwen-14B LLM, and Qwen-VL’s vision encoder to train CodeFuse-VLM-14B model. On multiple benchmarks, our CodeFuse-VLM-14B shows superior performances over Qwen-VL and LLAVA-1.5. -img.jpg

    -

    Here is the table for different MLLM model’s performance on benchmarks

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelMMBenchMMBench-CNVqaV2GQATextVQAVizwiz
    LLAVA-1.567.763.680.063.361.353.6
    Qwen-VL60.656.778.257.563.838.9
    CodeFuse-VLM-14B75.769.879.359.463.945.3
    -

    Our model achieved high ranking on MMBenchmark: https://mmbench.opencompass.org.cn/leaderboard

    -

    Here’s our model’s demo video

    -

    https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/codefuse-modelcache/index.html b/docs/docs/overview/codefuse-modelcache/index.html deleted file mode 100644 index 71fe8bc..0000000 --- a/docs/docs/overview/codefuse-modelcache/index.html +++ /dev/null @@ -1,616 +0,0 @@ - - - - - - - - -CodeFuse-ModelCache · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-ModelCache

    -
    -
    - - -

    -

    -

    -

    - 中文 | - English -

    -

    -
    -

    Contents

    - -

    news

    -
      -
    • 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as ’llmEmb’, ‘ONNX’, ‘PaddleNLP’, ‘FastText’, alone with the image embedding framework ’timm’, to bolster embedding functionality.
    • -
    • 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests.
    • -
    • [2023.08.26] codefuse-ModelCache…
    • -
    -

    Introduction

    -

    Codefuse-ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience.
    This project aims to optimize services by introducing a caching mechanism. It helps businesses and research institutions reduce the cost of inference deployment, improve model performance and efficiency, and provide scalable services for large models. Through open-source, we aim to share and exchange technologies related to large model semantic cache.

    -

    modules

    -

    modelcache modules

    -

    Acknowledgements

    -

    This project has referenced the following open-source projects. We would like to express our gratitude to the projects and their developers for their contributions and research.
    GPTCache

    -

    Contributing

    -

    ModelCache is a captivating and invaluable project, whether you are an experienced developer or a novice just starting out, your contributions to this project are warmly welcomed. Your involvement in this project, be it through raising issues, providing suggestions, writing code, or documenting and creating examples, will enhance the project’s quality and make a significant contribution to the open-source community.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/codefuse-query/index.html b/docs/docs/overview/codefuse-query/index.html deleted file mode 100644 index 88b0d3f..0000000 --- a/docs/docs/overview/codefuse-query/index.html +++ /dev/null @@ -1,562 +0,0 @@ - - - - - - - - -CodeFuse-Query · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-Query

    -
    -
    - - -

    CodeFuse-Query

    -

    With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. For example, a security team might need sophisticated algorithms like context-sensitive taint analysis to review smaller codebases, while project managers might need a lighter algorithm, such as one that calculates cyclomatic complexity, to measure developer productivity on larger codebases.

    -

    These diversified needs, coupled with the common computational resource constraints in large organizations, pose a significant challenge. Traditional tools, with their problem-specific computation methods, often fail to scale in such environments. This is why we introduced CodeQuery, a centralized data platform specifically designed for large-scale static analysis.
    -In implementing CodeQuery, we treat source code and analysis results as data, and the execution process as big data processing, a significant departure from traditional tool-centric approaches. We leverage common systems in large organizations, such as data warehouses, data computation facilities like MaxCompute and Hive, OSS object storage, and flexible computing resources like Kubernetes, allowing CodeQuery to integrate seamlessly into these systems. This approach makes CodeQuery highly maintainable and scalable, capable of supporting diverse needs and effectively addressing changing demands. Furthermore, CodeQuery’s open architecture encourages interoperability between various internal systems, facilitating seamless interaction and data exchange. This level of integration and interaction not only increases the degree of automation within the organization but also improves efficiency and reduces the likelihood of manual errors. By breaking down information silos and fostering a more interconnected, automated environment, CodeQuery significantly enhances the overall productivity and efficiency of the software development process.
    -Moreover, CodeQuery’s data-centric approach offers unique advantages when addressing domain-specific challenges in static source code analysis. For instance, source code is typically a highly structured and interconnected dataset, with strong informational and relational ties to other code and configuration files. By treating code as data, CodeQuery can adeptly handle these issues, making it especially suitable for use in large organizations where codebases evolve continuously but incrementally, with most code undergoing minor changes daily while remaining stable. CodeQuery also supports use cases like code-data based Business Intelligence (BI), generating reports and dashboards to aid in monitoring and decision-making processes. Additionally, CodeQuery plays an important role in analyzing training data for large language models (LLMs), providing deep insights to enhance the overall effectiveness of these models.

    -

    In the current field of static analysis, CodeQuery introduces a new paradigm. It not only meets the needs of analyzing large, complex codebases but is also adaptable to the ever-changing and diversified scenarios of static analysis. CodeQuery’s data-centric approach gives it a unique advantage in dealing with code analysis issues in big data environments. Designed to address static analysis problems in large-scale software development settings, it views both source code and analysis results as data, allowing it to integrate flexibly into various systems within large organizations. This approach not only enables efficient handling of large codebases but can also accommodate various complex analysis needs, thereby making static analysis work more effective and accurate.

    -

    The characteristics and advantages of CodeQuery can be summarized as follows:

    -
      -
    • Highly Scalable: CodeQuery can handle large codebases and adapt to different analysis needs. This high level of scalability makes CodeQuery particularly valuable in large organizations.
    • -
    • Data-Centric: By treating source code and analysis results as data, CodeQuery’s data-centric approach gives it a distinct edge in addressing code analysis problems in big data environments.
    • -
    • Highly Integrated: CodeQuery can integrate seamlessly into various systems within large organizations, including data warehouses, data computation facilities, object storage, and flexible computing resources. This high level of integration makes the use of CodeQuery in large organizations more convenient and efficient.
    • -
    • Supports Diverse Needs: CodeQuery can process large codebases and accommodate various complex analysis needs, including QoS analysis, cross-language analysis, algorithmic needs, and performance requirements.
    • -
    -

    CodeQuery is a powerful static code analysis platform, suitable for large-scale, complex codebase analysis scenarios. Its data-centric approach and high scalability give it a unique advantage in the modern software development environment. As static code analysis technology continues to evolve, CodeQuery is expected to play an increasingly important role in this field.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/fastertransformer4codefuse/index.html b/docs/docs/overview/fastertransformer4codefuse/index.html deleted file mode 100644 index ccb9ec8..0000000 --- a/docs/docs/overview/fastertransformer4codefuse/index.html +++ /dev/null @@ -1,799 +0,0 @@ - - - - - - - - -FasterTransformer4CodeFuse · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    FasterTransformer4CodeFuse

    -
    -
    - - -

    FasterTransformer4CodeFuse

    -

    FasterTransformer4CodeFuse

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/index.html b/docs/docs/overview/index.html deleted file mode 100644 index 1849c07..0000000 --- a/docs/docs/overview/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/en_overview/ - - - - - - diff --git a/docs/docs/overview/mftcoder/index.html b/docs/docs/overview/mftcoder/index.html deleted file mode 100644 index 6a80d88..0000000 --- a/docs/docs/overview/mftcoder/index.html +++ /dev/null @@ -1,726 +0,0 @@ - - - - - - - - -MFTCoder: High Accuracy and Efficiency Multi-task Fine-Tuning Framework · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder: High Accuracy and Efficiency Multi-task Fine-Tuning Framework

    -
    -
    - - -
    -

    - 🤗 HuggingFace - • 🤖 ModelScope - -

    -

    [中文] [English]

    -
    -

    Contents

    - -

    News

    -

    🔥🔥🔥 [2024/01/17] We released MFTCoder v0.3.0, mainly for MFTCoder-accelerate. It now supports new models like Mixtral(MoE), DeepSeek-coder, chatglm3. It supports FSDP as an option. It also supports Self-paced Loss as a solution for convergence balance in Multitask Fine-tuning.

    -

    🔥🔥🔥 [2024/01/17] CodeFuse-DeepSeek-33B has been released, achieving a pass@1 (greedy decoding) score of 78.7% on HumanEval. It lists as top-1 LLM on Bigcode Leardboard in terms of win-rate, the official result is going to be published later.

    -

    🔥🔥🔥 [2024/01/17] CodeFuse-Mixtral-8x7B has been released, achieving a pass@1 (greedy decoding) score of 56.1% on HumanEval.

    -

    🔥🔥 [2023/11/07] MFTCoder Paper has been released on Arxiv, which discloses technique details of multi-task-fine-tuning.

    -

    🔥🔥 [2023/10/20] CodeFuse-QWen-14B has been released, achieving a pass@1 (greedy decoding) score of 48.8% on HumanEval, which gains 16% absolute improvement over the base model Qwen-14b

    -

    🔥🔥 [2023/09/27] CodeFuse-StarCoder-15B has been released, achieving a pass@1 (greedy decoding) score of 54.9% on HumanEval.

    -

    🔥🔥 [2023/09/26]We are pleased to announce the release of the 4-bit quantized version of CodeFuse-CodeLlama-34B. Despite the quantization process, the model still achieves a remarkable 73.8% accuracy (greedy decoding) on the HumanEval pass@1 metric.

    -

    🔥🔥 [2023/09/07]We released CodeFuse-CodeLlama-34B, which achieves the 74.4% Python Pass@1 (greedy decoding) and surpasses GPT4 (2023/03/15) and ChatGPT-3.5 on the HumanEval Benchmarks.

    -

    🔥🔥 [2023/08/26]We released MFTCoder-v0.1.0 which supports finetuning Code Llama, Llama, Llama2, StarCoder, ChatGLM2, CodeGeeX2, Qwen, and GPT-NeoX models with LoRA/QLoRA.

    -

    HumanEval Performance

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelHumanEval(Pass@1)Date
    CodeFuse-DeepSeek-33B78.7%2024/01
    CodeFuse-CodeLlama-34B74.4%2023/09
    CodeFuse-CodeLlama-34B-4bits73.8%2023/09
    WizardCoder-Python-34B-V1.073.2%2023/08
    GPT-4(zero-shot)67.0%2023/03
    PanGu-Coder2 15B61.6%2023/08
    CodeFuse-Mixtral-8x7B56.1%2024/01
    CodeFuse-StarCoder-15B54.9%2023/08
    CodeLlama-34b-Python53.7%2023/08
    CodeFuse-QWen-14B48.8%2023/10
    CodeLlama-34b48.8%2023/08
    GPT-3.5(zero-shot)48.1%2022/11
    OctoCoder46.2%2023/08
    StarCoder-15B33.6%2023/05
    QWen-14B32.3%2023/10
    -

    Articles

    -

    MFT Arxiv paper

    -

    Introduction

    -

    High Accuracy and efficiency Multi-task Fine-tuning framework for Code LLMs.

    -

    MFTCoder is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). -Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework.

    -

    In MFTCoder, we released two codebases for finetuning Large Language Models:

    -
      -
    • MFTCoder-accelerate is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. We highly recommend you try this framework and make your fintuning accurate and efficient.
    • -
    • MFTCoder-atorch is based on the ATorch frameworks, which is a fast distributed training framework of LLM.
    • -
    -

    The aim of this project is to foster collaboration and share advancements in large language models, particularly within the domain of code development.

    -

    Frameworks

    -

    img.jpg

    -

    Highlights

    -

    :white_check_mark: Multi-task: Train models on multiple tasks while maintaining a balance between them. The models can even generalize to new, previously unseen tasks.

    -

    :white_check_mark: Multi-model: It integrates state-of-the-art open-source models such as gpt-neox, llama, llama-2, baichuan, Qwen, chatglm2, and more. (These finetuned models will be released in the near future.)

    -

    :white_check_mark: Multi-framework: It provides support for both Accelerate (with Deepspeed and FSDP) and ATorch

    -

    :white_check_mark: Efficient fine-tuning: It supports LoRA, QLoRA as well as Full-parameters training, enabling fine-tuning of large models with minimal resources. The training speed meets the demands of almost all fine-tuning scenarios.

    -

    The main components of this project include:

    -
      -
    • Support for both SFT (Supervised FineTuning) and MFT (Multi-task FineTuning). The current MFTCoder achieves data balance among multiple tasks, and future releases will achieve a balance between task difficulty and convergence speed during training.
    • -
    • Support for QLoRA instruction fine-tuning, LoRA fine-tuning as well as Full-parameters fine-tuning.
    • -
    • Support for most mainstream open-source large models, particularly those relevant to Code-LLMs, such as DeepSeek-coder, Mistral, Mixtral, Chatglm3, Code-LLaMA, Starcoder, Codegeex2, Qwen, GPT-Neox, and more.
    • -
    • Support for weight merging between the LoRA adaptor and base models, simplifying the inference process.
    • -
    • Release of 2 high-quality code-related instruction fine-tuning datasets: Evol-instruction-66k and CodeExercise-Python-27k.
    • -
    • Release of many Code LLMs, please refer to organizations: codefuse-ai on huggingface or codefuse-ai on modelscope.
    • -
    -

    Contributing

    -

    Contributions are welcome! If you have any suggestions, ideas, bug reports, or new model/feature supported, please open an issue or submit a pull request.

    -

    Citation

    -

    If you find our work useful or helpful for your R&D works, please feel free to cite our paper as below.

    -
    @article{mftcoder2023,
    -      title={MFTCoder: Boosting Code LLMs with Multitask Fine-Tuning}, 
    -      author={Bingchang Liu and Chaoyu Chen and Cong Liao and Zi Gong and Huan Wang and Zhichao Lei and Ming Liang and Dajun Chen and Min Shen and Hailian Zhou and Hang Yu and Jianguo Li},
    -      year={2023},
    -      journal={arXiv preprint arXiv},
    -      archivePrefix={arXiv},
    -      eprint={2311.02303}
    -}
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/overview/test-agent-your-ai-test-assistant/index.html b/docs/docs/overview/test-agent-your-ai-test-assistant/index.html deleted file mode 100644 index 9b3be59..0000000 --- a/docs/docs/overview/test-agent-your-ai-test-assistant/index.html +++ /dev/null @@ -1,915 +0,0 @@ - - - - - - - - -Test-Agent: Your AI Test Assistant · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Test-Agent: Your AI Test Assistant

    -
    -
    - - -

    Local Mac M1 Experience

    -

    图片

    -

    Moda Experience

    -

    Moda Model Access Link:ModelScope TestGPT-7B -MS

    -

    What is Test Agent? (Introduction)

    -

    Test Agent aims to build an “intelligent agent” in the testing domain, integrating large models with engineering technologies in the quality domain to promote the generational upgrade of quality technology. We look forward to collaborating with community members to create innovative solutions in the testing domain, establish a 24-hour online testing assistant service, and make testing as smooth as silk.

    -

    Current Features (Features)

    -
      -
    • Model: This release open-sources the TestGPT-7B model for the testing domain. The model is based on CodeLlama-7B and has been fine-tuned for related downstream tasks: -
        -
      • Multilingual Test Case Generation (Java/Python/Javascript): This has always been an area of great interest to both academia and industry, with new products and tools like EvoSuite, Randoop, SmartUnit, etc., constantly being incubated. However, traditional test case generation has pain points that are difficult to address. Test case generation based on large models is superior to traditional tools in terms of readability, completeness of test scenarios, and multilingual support. This update focuses on multilingual test case generation, initially including Java, Python, and Javascript, and will gradually introduce Go, C++, and other languages in future releases.
      • -
      • Test Case Assert Completion: Analyzing the current state of test cases, we found that a certain proportion of existing test cases in the code repositories do not contain Asserts. Test cases without Asserts may pass during regression but fail to detect issues. Therefore, we expanded the scenario of automatic completion of test case Asserts. With this model capability, combined with the right engineering support, it’s possible to perform batch automatic completion for the entire test case repository, intelligently raising the quality level of the project.
      • -
      -
    • -
    • Engineering Framework: Local model quick release and experience engineering framework -
        -
      • ChatBot page
      • -
      • Quick model launch
      • -
      • Private deployment, localized GPT large model interactions with your data and environment, no risk of data leakage, 100% safe.
      • -
      -
    • -
    -

    We will continue to iterate on the model and engineering capabilities:

    -
      -
    • Continuously adding more exciting test domain application scenarios, such as domain knowledge Q&A, test scenario analysis, etc.
    • -
    • Supporting the open copilot engineering framework focused on testing scenarios, such as intelligent embedding of testing domain knowledge, a common tool API system, intelligent testing Agent, and more, so stay tuned!
    • -
    • Expanding from a 7B base to 13B and 34B models gradually. Stay tuned!
    • -
    -

    The Most Powerful 7B Test Domain Large Model (Model)

    -

    Currently, within TestAgent, we default to using the TestGPT-7B model. Compared to existing open-source models, the TestGPT-7B model leads the industry in case execution pass rate (pass@1) and test scenario coverage (average number of test scenarios). -The core capability evaluation results of the TestGPT-7B model are as follows:

    -

    Multilingual Test Case Generation For the three supported languages of the model: Java, Python, and Javascript, the Pass@1 evaluation results are as follows:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelJava pass@1Java Average number of test scenariosPython pass@1Python Average number of test scenariosJavascript pass@1Javascript Average number of test scenarios
    TestGPT-7B48.6%4.3735.67%3.5636%2.76
    CodeLlama-13B-Instruct40.54%1.0830.57%1.6531.7%3.13
    Qwen-14B-Chat10.81%2.7815.9%1.329.15%4.22
    Baichuan2-13B-Chat13.5%2.2412.7%2.126.1%3.31
    -
      -
    • Test Case Assert Completion -Currently, the model supports Assert completion for Java cases, and the Pass@1 evaluation
    • -
    - - - - - - - - - - - - - - - -
    Modelpass@1Percentage of strong validation
    Codefuse-TestGPT-7B71.1%100%
    -

    Engineering Architecture

    -

    JG

    -

    The clarion call for large models has been sounded, and large models in the testing domain are continuously evolving. With the rich world knowledge accumulated during the pre-training process, they have demonstrated extraordinary reasoning and decision-making abilities in complex interactive environments.

    -

    Despite significant achievements of the foundational models in the testing domain, there are still some limitations. Testing tasks in specific domains often require specialized tools or domain knowledge. For instance, foundational models can complete tasks such as single-instance test code generation and test text generation through pre-trained knowledge, but when dealing with complex integrated test case generation, domain-specific case creation, and interactions with test process pipelines, more specialized tools and domain knowledge are necessary. Therefore, integrating specialized tools with foundational models can fully harness their respective strengths. Specialized tools can address insufficiencies in model timeliness, enhance professional knowledge, and improve interpretability and robustness. On the other hand, foundational models possess human-like reasoning and planning abilities, capable of understanding complex data and scenarios, and interacting with the real world.

    -

    Building upon the open model engineering deployment and ChatBot foundation in this release, we will continue to invest deeply in the open-source testing domain. Collaborating with community developers who share similar interests, we aim to create the most advanced engineering system for tools in the testing domain, an intelligent testing assistant, and open-source testing engineering!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/quickstart-zh/index.html b/docs/docs/quickstart-zh/index.html deleted file mode 100644 index 54ea7ea..0000000 --- a/docs/docs/quickstart-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - - - - - - diff --git a/docs/docs/quickstart/index.html b/docs/docs/quickstart/index.html deleted file mode 100644 index 2e7ddf6..0000000 --- a/docs/docs/quickstart/index.html +++ /dev/null @@ -1,493 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    - 中文  |  English  -

    -

    🚀 Quick Start

    -

    To deploy private models, please install the NVIDIA driver by yourself. -This project has been tested on Python 3.9.18 and CUDA 11.7 environments, as well as on Windows and macOS systems with x86 architecture. -For Docker installation, private LLM access, and related startup issues, see: Start-detail…

    -

    Preparation of Python environment

    -
      -
    • It is recommended to use conda to manage the python environment (optional)
    • -
    -
    # Prepare conda environment
    -conda create --name Codefusegpt python=3.9
    -conda activate Codefusegpt
    -
      -
    • Install related dependencies
    • -
    -
    cd Codefuse-ChatBot
    -pip install -r requirements.txt
    -

    Basic Configuration

    -
    # Modify the basic configuration for service startup
    -cd configs
    -cp model_config.py.example model_config.py
    -cp server_config.py.example server_config.py
    -
    -# model_config#11~12 If you need to use the OpenAI interface, the OpenAI interface key
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -# Replace with the api_base_url you need
    -os.environ["API_BASE_URL"] = "https://api.openai.com/v1"
    -
    -# vi model_config#LLM_MODEL The language model you need to choose
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -
    -# vi model_config#EMBEDDING_MODEL The private vector model you need to choose
    -EMBEDDING_ENGINE = 'model'
    -EMBEDDING_MODEL = "text2vec-base"
    -
    -# Example of vector model access, modify model_config#embedding_model_dict
    -# If the model directory is:
    -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese
    -# Configure as follows
    -"text2vec-base": "shibing624/text2vec-base-chinese"
    -
    -
    -# vi server_config#8~14, It's recommended to use a container to start the service to prevent environment conflicts when installing other dependencies using the codeInterpreter feature
    -DOCKER_SERVICE = True
    -# Whether to use a container sandbox
    -SANDBOX_DO_REMOTE = True
    -

    Start the Service

    -

    By default, only webui related services are started, and fastchat is not started (optional).

    -
    # If you need to support the codellama-34b-int4 model, you need to patch fastchat
    -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# Modify examples/llm_api.py#258 to kwargs={"gptq_wbits": 4},
    -
    -# Start llm-service (optional)
    -python examples/llm_api.py
    -

    For more LLM access methods, see Details…

    -
    # After completing the server_config.py configuration, you can start with one click
    -cd examples
    -python start.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/start-detail-zh/index.html b/docs/docs/start-detail-zh/index.html deleted file mode 100644 index 6b889c6..0000000 --- a/docs/docs/start-detail-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - - - - - - diff --git a/docs/docs/start-detail/index.html b/docs/docs/start-detail/index.html deleted file mode 100644 index 1176259..0000000 --- a/docs/docs/start-detail/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /docs/chatbot/start-detail/ - - - - - - diff --git a/docs/docs/test-agent-quickstart-zh/index.html b/docs/docs/test-agent-quickstart-zh/index.html deleted file mode 100644 index 7a95e1f..0000000 --- a/docs/docs/test-agent-quickstart-zh/index.html +++ /dev/null @@ -1,740 +0,0 @@ - - - - - - - - -快速使用 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速使用

    -
    -
    - - -

    快速使用(QuickStart)

    -

    前置准备

    -

    模型下载

    -

    您可在modelscopehuggingface上获取到模型的详细信息并下载模型文件。 -需要注意的是: -1)如果您通过modelscope下载模型,下载方式可参考:下载说明; -2)如果您通过huggingface下载模型,请确保您可以正常访问huggingface。

    -

    环境安装

    -
      -
    • python>=3.8
    • -
    • transformers==4.33.2
    • -
    -
    git clone https://github.com/codefuse-ai/Test-Agent
    -cd Test-Agent
    -pip install -r requirements.txt
    -

    在开始运行TestGPT-7B模型之前,请确保你的执行环境拥有大约14GB的显存。

    -

    启动服务

    -

    项目提供了网页端快速搭建UI的能力能够更直观的展示模型交互和效果,我们可以使用简单的几个命令把前端页面唤醒并实时调用模型能力。在项目目录下,依次启动以下服务:

    -

    1.启动controller -controller -python3 -m chat.server.controller

    -

    2.启动模型worker -work -python3 -m chat.server.model_worker –model-path models/TestGPT-7B –device mps

    -

    (models/TestGPT-7B 为实际模型文件路径)

    -

    对于启动方式,可以按需选择以下几种配置选项:

    -
      -
    • –device mps 用于在Mac电脑上开启GPU加速的选项(Apple Silicon或AMD GPUs);
    • -
    • –device xpu 用于在Intel XPU上开启加速的选项(Intel Data Center and Arc A-Series GPUs); - -
    • -
    • –device npu 用于在华为AI处理器上开启加速的选项; -
        -
      • 需安装Ascend PyTorch Adapter
      • -
      • 设置CANN环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh
      • -
      -
    • -
    • –device cpu 单独使用CPU运行的选项,不需要GPU;
    • -
    • –num-gpus 2 指定并发gpu运行的选项。
    • -
    -
      -
    1. 启动web服务 -python3 -m chat.server.gradio_testgpt -web -待服务准备就绪后,我们可以打开本地启动的web服务地址 http://0.0.0.0:7860 ,就能看到完整的前端页面了。在页面下方包含了【单测生成】和【Assert补全】的两个例子,点击按钮后会自动生成一段样例文本到输入框中,点击Send按钮就会触发模型运行,之后耐心等待一段时间后(运行时间视本机性能而定)即可看到完整的回答了。 -demo
    2. -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/test-agent-quickstart/index.html b/docs/docs/test-agent-quickstart/index.html deleted file mode 100644 index 71a3230..0000000 --- a/docs/docs/test-agent-quickstart/index.html +++ /dev/null @@ -1,756 +0,0 @@ - - - - - - - - -QuickStart · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    QuickStart

    -
    -
    - - -

    QuickStart

    -

    Prerequisites

    -

    Model Download

    -

    You can get detailed information about the model and download the model files from modelscope or huggingface. -Please note: -需要注意的是: -If you download the model through modelscope, refer to the download instructions: Download Instructions; -If you download the model through huggingface, please make sure you have proper access to huggingface.

    -

    Environment Installation

    -
      -
    • python>=3.8
    • -
    • transformers==4.33.2
    • -
    -
    git clone https://github.com/codefuse-ai/Test-Agent
    -cd Test-Agent
    -pip install -r requirements.txt
    -

    Before starting to run the TestGPT-7B model, please ensure that your execution environment has about 14GB of VRAM.

    -

    Starting the Service

    -

    The project provides the ability to quickly set up a web UI for a more intuitive display of model interactions and effects. We can use a few simple commands to wake up the front-end page and call the model capabilities in real time. In the project directory, start the following services in order:

    -

    1.Start controller -controller -python3 -m chat.server.controller

    -

    2.Start model worker -work -python3 -m chat.server.model_worker –model-path models/TestGPT-7B –device mps

    -

    (models/TestGPT-7B is the actual model file path)

    -

    For the launch method, you can choose from several configuration options as needed:

    -
      -
    • –device mps for enabling GPU acceleration on Mac computers (Apple Silicon or AMD GPUs);
    • -
    • –device xpu for enabling acceleration on Intel XPU (Intel Data Center and Arc A-Series GPUs): - -
    • -
    • –device npu for enabling acceleration on Huawei AI processors; -
        -
      • Install Ascend PyTorch Adapter
      • -
      • 设置CANN环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh
      • -
      -
    • -
    • –device cpu for running using only CPU, no GPU needed;
    • -
    • –num-gpus 2 to specify the option of running GPUs concurrently.
    • -
    -
      -
    1. Start the web service -python3 -m chat.server.gradio_testgpt -web -Once the service is ready, you can open the local web service address http://0.0.0.0:7860 and see the complete front-end page. At the bottom of the page, there are two examples: 【Single-test Generation】 and 【Assert Completion】. After clicking the button, a sample text will be automatically generated in the input box. Clicking the Send button will trigger the model to run. After waiting patiently for a while (running time depends on the performance of your machine), you can see the complete answer. -demo
    2. -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/test-agent-zh/index.html b/docs/docs/test-agent-zh/index.html deleted file mode 100644 index 17ad491..0000000 --- a/docs/docs/test-agent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/overview/test-agent-zh/ - - - - - - diff --git a/docs/docs/test-agent/index.html b/docs/docs/test-agent/index.html deleted file mode 100644 index 1f3e7ca..0000000 --- a/docs/docs/test-agent/index.html +++ /dev/null @@ -1,574 +0,0 @@ - - - - - - - - -Test-Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Test-Agent

    -
    -
    - - -

    Test-Agent

    -

    Test-Agent

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/docs/zh-acknowledgements/index.html b/docs/docs/zh-acknowledgements/index.html deleted file mode 100644 index 5071cd8..0000000 --- a/docs/docs/zh-acknowledgements/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E8%87%B4%E8%B0%A2/ - - - - - - diff --git "a/docs/docs/\345\220\257\345\212\250\346\230\216\347\273\206/index.html" "b/docs/docs/\345\220\257\345\212\250\346\230\216\347\273\206/index.html" deleted file mode 100644 index 6b889c6..0000000 --- "a/docs/docs/\345\220\257\345\212\250\346\230\216\347\273\206/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - - - - - - diff --git "a/docs/docs/\345\244\232\346\231\272\350\203\275\344\275\223/index.html" "b/docs/docs/\345\244\232\346\231\272\350\203\275\344\275\223/index.html" deleted file mode 100644 index 6603761..0000000 --- "a/docs/docs/\345\244\232\346\231\272\350\203\275\344\275\223/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E5%A4%9A%E6%99%BA%E8%83%BD%E4%BD%93/ - - - - - - diff --git "a/docs/docs/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/docs/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" deleted file mode 100644 index 143cda2..0000000 --- "a/docs/docs/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/codefuse-chatbot-quickstart-zh/ - - - - - - diff --git "a/docs/docs/\346\225\260\346\215\256\344\273\213\347\273\215/index.html" "b/docs/docs/\346\225\260\346\215\256\344\273\213\347\273\215/index.html" deleted file mode 100644 index 7c5232b..0000000 --- "a/docs/docs/\346\225\260\346\215\256\344\273\213\347\273\215/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E6%95%B0%E6%8D%AE%E4%BB%8B%E7%BB%8D/ - - - - - - diff --git "a/docs/docs/\346\234\254\345\234\260\347\247\201\346\234\211\345\214\226\345\244\247\346\250\241\345\236\213\346\216\245\345\217\243\346\216\245\345\205\245/index.html" "b/docs/docs/\346\234\254\345\234\260\347\247\201\346\234\211\345\214\226\345\244\247\346\250\241\345\236\213\346\216\245\345\217\243\346\216\245\345\205\245/index.html" deleted file mode 100644 index 4f44793..0000000 --- "a/docs/docs/\346\234\254\345\234\260\347\247\201\346\234\211\345\214\226\345\244\247\346\250\241\345\236\213\346\216\245\345\217\243\346\216\245\345\205\245/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - - - - - - diff --git "a/docs/docs/\346\246\202\350\247\210/index.html" "b/docs/docs/\346\246\202\350\247\210/index.html" deleted file mode 100644 index 6b5d5d8..0000000 --- "a/docs/docs/\346\246\202\350\247\210/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/zh_overview/ - - - - - - diff --git "a/docs/docs/\350\207\264\350\260\242/index.html" "b/docs/docs/\350\207\264\350\260\242/index.html" deleted file mode 100644 index 5071cd8..0000000 --- "a/docs/docs/\350\207\264\350\260\242/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/docs/%E8%87%B4%E8%B0%A2/ - - - - - - diff --git a/docs/en/index.html b/docs/en/index.html deleted file mode 100644 index 6c449ee..0000000 --- a/docs/en/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/docs/en/sitemap.xml b/docs/en/sitemap.xml deleted file mode 100644 index 5852d18..0000000 --- a/docs/en/sitemap.xml +++ /dev/null @@ -1,1016 +0,0 @@ - - - - /docs/devops_eval/tool_learning_info/ - 2024-04-09T13:54:32+08:00 - - /docs/devops_eval/tutorial/ - 2024-04-09T13:54:32+08:00 - - /docs/abstract/ - 2024-04-09T13:54:32+08:00 - - - - /contribution/acknowledgements/ - 2024-01-23T20:52:15+08:00 - - - - /coagent/agent-flow/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/agent-flow/ - 2024-04-09T13:54:32+08:00 - - - - /categories/ - - - - /docs/chatbot-roadmap/ - 2024-01-23T14:58:31+08:00 - - - - /coagent/coagent/ - 2024-01-24T19:45:27+08:00 - - - - /coagent/ - 2024-01-25T21:08:32+08:00 - - - - / - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-chatbot/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/codefuse-chatbot/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-eval/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/codefuse-devops-eval/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-model/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/codefuse-devops-model/ - 2024-04-09T13:54:32+08:00 - - - - /docs/overview/codefuse-mft-vlm/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/codefuse-modelcache/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/codefuse-query/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-evalution/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/connector-agent/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/connector-agent/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/connector-chain/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/connector-chain/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/connector-memory/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/connector-memory/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/connector-phase/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/connector-phase/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/connector-prompt/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/connector-prompt/ - 2024-04-09T13:54:32+08:00 - - - - /contribution/contribution-guide/ - 2024-01-23T20:52:15+08:00 - - - - /contribution/ - 2024-04-09T13:54:32+08:00 - - - - /muagent/custom-retrieval/ - 2024-04-09T13:54:32+08:00 - - - - /muagent/custom-tool/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/customed-examples/ - 2024-01-25T20:54:37+08:00 - - - - /muagent/custom-examples/ - 2024-04-09T13:54:32+08:00 - - - - /docs/data/ - 2024-04-09T13:54:32+08:00 - - - - /docs/ - 2024-04-09T13:54:32+08:00 - - - - /muagent/embedding-model-config/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-eval-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/fastertransformer4codefuse/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/fastertransformer4codefuse/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache-feature/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-godellanguage/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache-config/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-introduction/ - 2024-04-09T13:54:32+08:00 - - - - /docs/mftcoder-introduction/ - 2024-04-09T13:54:32+08:00 - - - - /contribution/issue-report/ - 2024-01-23T20:52:15+08:00 - - - - /muagent/llm-model-config/ - 2024-04-09T13:54:32+08:00 - - - - /docs/LLM-Configuration/ - 2024-01-23T14:58:31+08:00 - - - - /docs/mftcoder/ - 2024-01-09T14:29:00+08:00 - - - - /docs/mftcoder-atorch/ - 2024-04-09T13:54:32+08:00 - - - - /docs/mftcoder-accelerate/ - 2024-04-09T13:54:32+08:00 - - - - /docs/overview/mftcoder/ - 2024-04-09T13:54:32+08:00 - - - - /muagent/muagent/ - 2024-04-09T13:54:32+08:00 - - - - /muagent/ - 2024-04-09T13:54:32+08:00 - - - - /docs/en_overview/ - 2024-01-09T14:29:00+08:00 - - - - /coagent/prompt-manager/ - 2024-01-25T21:08:32+08:00 - - - - /coagent/prompt-manager/ - 2024-04-09T13:54:32+08:00 - - /contribution/pull-request/ - 2024-04-09T13:54:32+08:00 - - - - /coagent/quick-start/ - 2024-01-24T19:45:27+08:00 - - - - /muagent/quick-start/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-chatbot-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-evalution-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-mft-vlm/quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-model-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/mftcoder-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/test-agent-quickstart/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache-release/ - 2024-04-09T13:54:32+08:00 - - /docs/chatbot/start-detail/ - 2024-01-25T20:54:37+08:00 - - - - /tags/ - - - - /docs/test-agent/ - 2024-01-09T14:29:00+08:00 - - - - /docs/overview/test-agent-your-ai-test-assistant/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-toolchain/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-model-train/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-usercase/ - 2024-04-09T13:54:32+08:00 - - - - diff --git a/docs/font/Inter-Italic.woff b/docs/font/Inter-Italic.woff deleted file mode 100644 index a806b38..0000000 Binary files a/docs/font/Inter-Italic.woff and /dev/null differ diff --git a/docs/font/Inter-Italic.woff2 b/docs/font/Inter-Italic.woff2 deleted file mode 100644 index a619fc5..0000000 Binary files a/docs/font/Inter-Italic.woff2 and /dev/null differ diff --git a/docs/font/Inter-Regular.woff b/docs/font/Inter-Regular.woff deleted file mode 100644 index 62d3a61..0000000 Binary files a/docs/font/Inter-Regular.woff and /dev/null differ diff --git a/docs/font/Inter-Regular.woff2 b/docs/font/Inter-Regular.woff2 deleted file mode 100644 index 6c2b689..0000000 Binary files a/docs/font/Inter-Regular.woff2 and /dev/null differ diff --git a/docs/font/Inter-SemiBold.woff b/docs/font/Inter-SemiBold.woff deleted file mode 100644 index a815f43..0000000 Binary files a/docs/font/Inter-SemiBold.woff and /dev/null differ diff --git a/docs/font/Inter-SemiBold.woff2 b/docs/font/Inter-SemiBold.woff2 deleted file mode 100644 index 611e90c..0000000 Binary files a/docs/font/Inter-SemiBold.woff2 and /dev/null differ diff --git a/docs/images/LOGO.png b/docs/images/LOGO.png deleted file mode 100644 index 08a2d34..0000000 Binary files a/docs/images/LOGO.png and /dev/null differ diff --git a/docs/images/chatbot/BaseAgent.png b/docs/images/chatbot/BaseAgent.png deleted file mode 100644 index 1f022d3..0000000 Binary files a/docs/images/chatbot/BaseAgent.png and /dev/null differ diff --git a/docs/images/chatbot/devops-chatbot-module-v2.png b/docs/images/chatbot/devops-chatbot-module-v2.png deleted file mode 100644 index b905e86..0000000 Binary files a/docs/images/chatbot/devops-chatbot-module-v2.png and /dev/null differ diff --git a/docs/images/chatbot/luban.png b/docs/images/chatbot/luban.png deleted file mode 100644 index 9cf8576..0000000 Binary files a/docs/images/chatbot/luban.png and /dev/null differ diff --git a/docs/images/chatbot/objective_v4.png b/docs/images/chatbot/objective_v4.png deleted file mode 100644 index 82cacac..0000000 Binary files a/docs/images/chatbot/objective_v4.png and /dev/null differ diff --git a/docs/images/codefuse-evalution/EnglishIntroduction.png b/docs/images/codefuse-evalution/EnglishIntroduction.png deleted file mode 100644 index 5e37155..0000000 Binary files a/docs/images/codefuse-evalution/EnglishIntroduction.png and /dev/null differ diff --git "a/docs/images/codefuse-evalution/\344\270\255\346\226\207\344\273\213\347\273\215.png" "b/docs/images/codefuse-evalution/\344\270\255\346\226\207\344\273\213\347\273\215.png" deleted file mode 100644 index 14c1ee7..0000000 Binary files "a/docs/images/codefuse-evalution/\344\270\255\346\226\207\344\273\213\347\273\215.png" and /dev/null differ diff --git a/docs/images/codefuse-modelcache/modelcache_modules_20231114.png b/docs/images/codefuse-modelcache/modelcache_modules_20231114.png deleted file mode 100644 index 596c1ac..0000000 Binary files a/docs/images/codefuse-modelcache/modelcache_modules_20231114.png and /dev/null differ diff --git a/docs/images/codefuse-query/introduction01.png b/docs/images/codefuse-query/introduction01.png deleted file mode 100644 index b86708b..0000000 Binary files a/docs/images/codefuse-query/introduction01.png and /dev/null differ diff --git a/docs/images/codefuse-query/introduction02.png b/docs/images/codefuse-query/introduction02.png deleted file mode 100644 index 5cd1949..0000000 Binary files a/docs/images/codefuse-query/introduction02.png and /dev/null differ diff --git a/docs/images/codefuse-query/introduction03.png b/docs/images/codefuse-query/introduction03.png deleted file mode 100644 index de6f7f3..0000000 Binary files a/docs/images/codefuse-query/introduction03.png and /dev/null differ diff --git a/docs/images/codefuse-query/macos_cannot_open_godel.png b/docs/images/codefuse-query/macos_cannot_open_godel.png deleted file mode 100644 index d417a04..0000000 Binary files a/docs/images/codefuse-query/macos_cannot_open_godel.png and /dev/null differ diff --git a/docs/images/codefuse-query/panel.jpg b/docs/images/codefuse-query/panel.jpg deleted file mode 100644 index 2c05e75..0000000 Binary files a/docs/images/codefuse-query/panel.jpg and /dev/null differ diff --git a/docs/images/codefuse-query/security_allow_godel_run.png b/docs/images/codefuse-query/security_allow_godel_run.png deleted file mode 100644 index d01d2d5..0000000 Binary files a/docs/images/codefuse-query/security_allow_godel_run.png and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain01.png b/docs/images/codefuse-query/toolchain01.png deleted file mode 100644 index b271660..0000000 Binary files a/docs/images/codefuse-query/toolchain01.png and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain02.gif b/docs/images/codefuse-query/toolchain02.gif deleted file mode 100644 index 6c2b75e..0000000 Binary files a/docs/images/codefuse-query/toolchain02.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain03.gif b/docs/images/codefuse-query/toolchain03.gif deleted file mode 100644 index 8471c8d..0000000 Binary files a/docs/images/codefuse-query/toolchain03.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain04.gif b/docs/images/codefuse-query/toolchain04.gif deleted file mode 100644 index 611c286..0000000 Binary files a/docs/images/codefuse-query/toolchain04.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain05.gif b/docs/images/codefuse-query/toolchain05.gif deleted file mode 100644 index e7b2905..0000000 Binary files a/docs/images/codefuse-query/toolchain05.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain06.gif b/docs/images/codefuse-query/toolchain06.gif deleted file mode 100644 index 49ac0fe..0000000 Binary files a/docs/images/codefuse-query/toolchain06.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain07.gif b/docs/images/codefuse-query/toolchain07.gif deleted file mode 100644 index f6d276e..0000000 Binary files a/docs/images/codefuse-query/toolchain07.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain08.gif b/docs/images/codefuse-query/toolchain08.gif deleted file mode 100644 index 440d7a6..0000000 Binary files a/docs/images/codefuse-query/toolchain08.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain09.gif b/docs/images/codefuse-query/toolchain09.gif deleted file mode 100644 index 02f6696..0000000 Binary files a/docs/images/codefuse-query/toolchain09.gif and /dev/null differ diff --git a/docs/images/codefuse-query/toolchain10.gif b/docs/images/codefuse-query/toolchain10.gif deleted file mode 100644 index eadb3c0..0000000 Binary files a/docs/images/codefuse-query/toolchain10.gif and /dev/null differ diff --git a/docs/images/codefuse-query/wechat_qrcode.JPG b/docs/images/codefuse-query/wechat_qrcode.JPG deleted file mode 100644 index 7c49013..0000000 Binary files a/docs/images/codefuse-query/wechat_qrcode.JPG and /dev/null differ diff --git a/docs/images/devops_eval/categroy_mapping.json b/docs/images/devops_eval/categroy_mapping.json deleted file mode 100644 index 4ed77ff..0000000 --- a/docs/images/devops_eval/categroy_mapping.json +++ /dev/null @@ -1,479 +0,0 @@ -{ - "Visualization.csv":[ - "visualization", - "可视化", - { - "dev":5, - "test":44 - }, - "Visualization.csv" - ], - "Logging.csv":[ - "logging", - "日志", - { - "dev":5, - "test":100 - }, - "Logging.csv" - ], - "Storage.csv":[ - "storage", - "存储", - { - "dev":5, - "test":36 - }, - "Storage.csv" - ], - "DataAcquisition.csv":[ - "data acquisition", - "数据采集", - { - "dev":5, - "test":36 - }, - "DataAcquisition.csv" - ], - "IntegrationTesting.csv":[ - "integration testing", - "集成测试", - { - "dev":5, - "test":31 - }, - "IntegrationTesting.csv" - ], - "UserAcceptanceTesting.csv":[ - "user acceptance testing", - "用户验收测试", - { - "dev":5, - "test":39 - }, - "UserAcceptanceTesting.csv" - ], - "SecurityTesting.csv":[ - "security testing", - "安全测试", - { - "dev":5, - "test":38 - }, - "SecurityTesting.csv" - ], - "UnitTesting.csv":[ - "unit testing", - "单元测试", - { - "dev":5, - "test":32 - }, - "UnitTesting.csv" - ], - "PerformanceTesting.csv":[ - "performance testing", - "性能测试", - { - "dev":5, - "test":36 - }, - "PerformanceTesting.csv" - ], - "SystemTesting.csv":[ - "system testing", - "系统测试", - { - "dev":5, - "test":52 - }, - "SystemTesting.csv" - ], - "ProgM.csv":[ - "programme management", - "进度管理", - { - "dev":5, - "test":21 - }, - "ProgM.csv" - ], - "REQM.csv":[ - "requirements management", - "需求管理", - { - "dev":5, - "test":24 - }, - "REQM.csv" - ], - "RiskMgmt.csv":[ - "risk management", - "风险管理", - { - "dev":5, - "test":21 - }, - "RiskMgmt.csv" - ], - "InfrastructureAsCode.csv":[ - "infrastructure as code", - "基础设施即代码", - { - "dev":5, - "test":34 - }, - "InfrastructureAsCode.csv" - ], - "Provisioning.csv":[ - "provisioning", - "置备", - { - "dev":5, - "test":19 - }, - "Provisioning.csv" - ], - "ConfigMgmt.csv":[ - "config management", - "配置管理", - { - "dev":5, - "test":100 - }, - "ConfigMgmt.csv" - ], - "Azure.csv":[ - "microsoft azure", - "微软云服务", - { - "dev":5, - "test":27 - }, - "Azure.csv" - ], - "GoogleCloud.csv":[ - "google cloud", - "谷歌云服务", - { - "dev":5, - "test":31 - }, - "GoogleCloud.csv" - ], - "AWS.csv":[ - "amazon web services", - "亚马逊云服务", - { - "dev":5, - "test":44 - }, - "AWS.csv" - ], - "LogDesign.csv":[ - "log design", - "日志设计", - { - "dev":5, - "test":33 - }, - "LogDesign.csv" - ], - "ServiceDesign.csv":[ - "service design", - "服务设计", - { - "dev":5, - "test":44 - }, - "ServiceDesign.csv" - ], - "CapabilityDesign.csv":[ - "capability design", - "容量设计", - { - "dev":5, - "test":33 - }, - "CapabilityDesign.csv" - ], - "CloudNativeDesign.csv":[ - "cloud native design", - "云原生设计", - { - "dev":5, - "test":44 - }, - "CloudNativeDesign.csv" - ], - "CacheDesign.csv":[ - "cache design", - "缓存设计", - { - "dev":5, - "test":28 - }, - "CacheDesign.csv" - ], - "DBDesign.csv":[ - "database design", - "数据库设计", - { - "dev":5, - "test":38 - }, - "DBDesign.csv" - ], - "ArtificialIntelligence.csv":[ - "artificial intelligence", - "人工智能", - { - "dev":5, - "test":45 - }, - "ArtificialIntelligence.csv" - ], - "ComputerBasics.csv":[ - "computer basics", - "计算机基础", - { - "dev":5, - "test":100 - }, - "ComputerBasics.csv" - ], - "DataBase.csv":[ - "database", - "数据库", - { - "dev":5, - "test":75 - }, - "DataBase.csv" - ], - "ComputerNetwork.csv":[ - "computer network", - "计算机网络", - { - "dev":5, - "test":88 - }, - "ComputerNetwork.csv" - ], - "OperatingSystem.csv":[ - "operating system", - "操作系统", - { - "dev":5, - "test":36 - }, - "OperatingSystem.csv" - ], - "Go.csv":[ - "go", - "go语言", - { - "dev":5, - "test":100 - }, - "Go.csv" - ], - "Java.csv":[ - "java", - "java语言", - { - "dev":5, - "test":100 - }, - "Java.csv" - ], - "C:C++.csv":[ - "c/c++", - "c/c++语言", - { - "dev":5, - "test":100 - }, - "C:C++.csv" - ], - "Python.csv":[ - "python", - "python语言", - { - "dev":5, - "test":73 - }, - "Python.csv" - ], - "BigData.csv":[ - "big data", - "大数据", - { - "dev":5, - "test":15 - }, - "BigData.csv" - ], - "Front-end.csv":[ - "front-end", - "前端", - { - "dev":5, - "test":100 - }, - "Front-end.csv" - ], - "MobileApp.csv":[ - "mobile app", - "移动应用", - { - "dev":5, - "test":100 - }, - "MobileApp.csv" - ], - "MachineLearning.csv":[ - "machine learning", - "机器学习", - { - "dev":5, - "test":69 - }, - "MachineLearning.csv" - ], - "Back-end.csv":[ - "back-end", - "后端", - { - "dev":5, - "test":100 - }, - "Back-end.csv" - ], - "ArtifactMgmt.csv":[ - "artifact management", - "产出物管理", - { - "dev":5, - "test":12 - }, - "ArtifactMgmt.csv" - ], - "CI:CD.csv":[ - "cd/cd", - "持续集成/持续部署", - { - "dev":5, - "test":100 - }, - "CI:CD.csv" - ], - "Linux.csv":[ - "linux", - "linux操作系统", - { - "dev":5, - "test":100 - }, - "Linux.csv" - ], - "ContainerOrchestration.csv":[ - "container orchestration", - "容器编排", - { - "dev":5, - "test":100 - }, - "ContainerOrchestration.csv" - ], - "Virtualization.csv":[ - "virtualization", - "虚拟化技术", - { - "dev":5, - "test":34 - }, - "Virtualization.csv" - ], - "TimeSeriesAnomalyDetection.csv":[ - "time series anomaly detection", - "时序异常检测", - { - "dev":5, - "test":300 - }, - "TimeSeriesAnomalyDetection.csv" - ], - "TimeSeriesClassification.csv":[ - "time series classification", - "时序分类", - { - "dev":5, - "test":200 - }, - "TimeSeriesClassification.csv" - ], - "RootCauseAnalysis.csv":[ - "root cause analysis", - "根因分析", - { - "dev":5, - "test":250 - }, - "RootCauseAnalysis.csv" - ], - "LogParser.csv":[ - "log parser", - "日志解析", - { - "dev":5, - "test":350 - }, - "LogParser.csv" - ], - "VersionControl.csv":[ - "version control", - "版本控制", - { - "dev":5, - "test":100 - }, - "VersionControl.csv" - ], - "DBMgnt.csv":[ - "database management", - "数据库管理", - { - "dev":5, - "test":19 - }, - "DBMgnt.csv" - ], - "Dependency.csv":[ - "dependency", - "依赖管理", - { - "dev":5, - "test":44 - }, - "Dependency.csv" - ], - "Compile.csv":[ - "compile", - "编译", - { - "dev":5, - "test":31 - }, - "Compile.csv" - ], - "Package.csv":[ - "package", - "包管理", - { - "dev":5, - "test":24 - }, - "Package.csv" - ] -} \ No newline at end of file diff --git a/docs/images/devops_eval/data_info.png b/docs/images/devops_eval/data_info.png deleted file mode 100644 index 5b42add..0000000 Binary files a/docs/images/devops_eval/data_info.png and /dev/null differ diff --git a/docs/images/devops_eval/devops_diagram_zh.jpg b/docs/images/devops_eval/devops_diagram_zh.jpg deleted file mode 100644 index f75a1a4..0000000 Binary files a/docs/images/devops_eval/devops_diagram_zh.jpg and /dev/null differ diff --git a/docs/images/devops_eval/devops_eval_logo.png b/docs/images/devops_eval/devops_eval_logo.png deleted file mode 100644 index 8ed3852..0000000 Binary files a/docs/images/devops_eval/devops_eval_logo.png and /dev/null differ diff --git a/docs/images/devops_eval/toolLearning_performance_metrics copy.png b/docs/images/devops_eval/toolLearning_performance_metrics copy.png deleted file mode 100644 index 8ade560..0000000 Binary files a/docs/images/devops_eval/toolLearning_performance_metrics copy.png and /dev/null differ diff --git a/docs/images/devops_eval/toolLearning_performance_metrics.png b/docs/images/devops_eval/toolLearning_performance_metrics.png deleted file mode 100644 index 8ade560..0000000 Binary files a/docs/images/devops_eval/toolLearning_performance_metrics.png and /dev/null differ diff --git a/docs/images/devops_model/devops-data-filter.webp b/docs/images/devops_model/devops-data-filter.webp deleted file mode 100644 index 7df027c..0000000 Binary files a/docs/images/devops_model/devops-data-filter.webp and /dev/null differ diff --git a/docs/images/devops_model/devops-train-framework.webp b/docs/images/devops_model/devops-train-framework.webp deleted file mode 100644 index e34391f..0000000 Binary files a/docs/images/devops_model/devops-train-framework.webp and /dev/null differ diff --git a/docs/images/devops_model/devops_data_filter.png b/docs/images/devops_model/devops_data_filter.png deleted file mode 100644 index 1f89571..0000000 Binary files a/docs/images/devops_model/devops_data_filter.png and /dev/null differ diff --git a/docs/images/devops_model/devops_eval.webp b/docs/images/devops_model/devops_eval.webp deleted file mode 100644 index 696c382..0000000 Binary files a/docs/images/devops_model/devops_eval.webp and /dev/null differ diff --git a/docs/images/devops_model/devops_train_framework.png b/docs/images/devops_model/devops_train_framework.png deleted file mode 100644 index 9301c46..0000000 Binary files a/docs/images/devops_model/devops_train_framework.png and /dev/null differ diff --git a/docs/images/mft-vlm/CodeFuse-VLM-14B-performance.png b/docs/images/mft-vlm/CodeFuse-VLM-14B-performance.png deleted file mode 100644 index e3b8f7a..0000000 Binary files a/docs/images/mft-vlm/CodeFuse-VLM-14B-performance.png and /dev/null differ diff --git a/docs/images/mft-vlm/CodeFuse-VLM-arch.png b/docs/images/mft-vlm/CodeFuse-VLM-arch.png deleted file mode 100644 index 58e7f33..0000000 Binary files a/docs/images/mft-vlm/CodeFuse-VLM-arch.png and /dev/null differ diff --git a/docs/images/mft-vlm/MFT-VLM-arch.png b/docs/images/mft-vlm/MFT-VLM-arch.png deleted file mode 100644 index a9487fe..0000000 Binary files a/docs/images/mft-vlm/MFT-VLM-arch.png and /dev/null differ diff --git a/docs/images/mftcoder/github-codefuse-logo-update.jpg b/docs/images/mftcoder/github-codefuse-logo-update.jpg deleted file mode 100644 index 0cfa493..0000000 Binary files a/docs/images/mftcoder/github-codefuse-logo-update.jpg and /dev/null differ diff --git a/docs/images/mftcoder/img.jpg b/docs/images/mftcoder/img.jpg deleted file mode 100644 index 199cc8e..0000000 Binary files a/docs/images/mftcoder/img.jpg and /dev/null differ diff --git a/docs/images/mftcoder/img_1.jpg b/docs/images/mftcoder/img_1.jpg deleted file mode 100644 index bde7dac..0000000 Binary files a/docs/images/mftcoder/img_1.jpg and /dev/null differ diff --git a/docs/images/muagent/agent-flow.png b/docs/images/muagent/agent-flow.png deleted file mode 100644 index 2358cc8..0000000 Binary files a/docs/images/muagent/agent-flow.png and /dev/null differ diff --git a/docs/images/muagent/memory manager.webp b/docs/images/muagent/memory manager.webp deleted file mode 100644 index 0bed6d2..0000000 Binary files a/docs/images/muagent/memory manager.webp and /dev/null differ diff --git a/docs/images/muagent/muagent framework.png b/docs/images/muagent/muagent framework.png deleted file mode 100644 index 1f97176..0000000 Binary files a/docs/images/muagent/muagent framework.png and /dev/null differ diff --git a/docs/img/icon/favicon.ico b/docs/img/icon/favicon.ico deleted file mode 100644 index 4f47bdc..0000000 Binary files a/docs/img/icon/favicon.ico and /dev/null differ diff --git a/docs/img/icon/icon-16.png b/docs/img/icon/icon-16.png deleted file mode 100644 index c55011a..0000000 Binary files a/docs/img/icon/icon-16.png and /dev/null differ diff --git a/docs/img/icon/icon-180.png b/docs/img/icon/icon-180.png deleted file mode 100644 index 694fd85..0000000 Binary files a/docs/img/icon/icon-180.png and /dev/null differ diff --git a/docs/img/icon/icon-192.png b/docs/img/icon/icon-192.png deleted file mode 100644 index a47b1d8..0000000 Binary files a/docs/img/icon/icon-192.png and /dev/null differ diff --git a/docs/img/icon/icon-32.png b/docs/img/icon/icon-32.png deleted file mode 100644 index afaee33..0000000 Binary files a/docs/img/icon/icon-32.png and /dev/null differ diff --git a/docs/img/icon/icon-512.png b/docs/img/icon/icon-512.png deleted file mode 100644 index d69620b..0000000 Binary files a/docs/img/icon/icon-512.png and /dev/null differ diff --git a/docs/img/icon/icon-vector.svg b/docs/img/icon/icon-vector.svg deleted file mode 100644 index fc8a34c..0000000 --- a/docs/img/icon/icon-vector.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/docs/img/icon/maskable-icon-192.png b/docs/img/icon/maskable-icon-192.png deleted file mode 100644 index c9f099c..0000000 Binary files a/docs/img/icon/maskable-icon-192.png and /dev/null differ diff --git a/docs/img/icon/maskable-icon-512.png b/docs/img/icon/maskable-icon-512.png deleted file mode 100644 index 281dae9..0000000 Binary files a/docs/img/icon/maskable-icon-512.png and /dev/null differ diff --git a/docs/index.en-US.md b/docs/index.en-US.md new file mode 100644 index 0000000..b994793 --- /dev/null +++ b/docs/index.en-US.md @@ -0,0 +1,47 @@ +--- +title: CodeFuse - 让研发变得更简单 +hero: + title: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*29lySZry1NwAAAAAAAAAAAAADlHYAQ/original' + description: Make R&D Simpler +CodeGenerationTitle: + title: 'Code Generation' + buttomText: Warehouse +CodeGeneration: + - title: CodeFuse-MFTcoder + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*jyTURIgXb4EAAAAAAAAAAAAADlHYAQ/original' + description: CodeFuse-MFTcoder is a multi-task fine-tuning framework designed to enhance the programming capabilities of large code models. Unlike traditional single-task fine-tuning, it can handle multiple programming tasks simultaneously, balancing the differences in data volume, difficulty, and convergence speed among various tasks by combining diverse loss functions. This approach increases fine-tuning efficiency and performance. Additionally, the framework incorporates efficient training optimization techniques, is compatible with several well-known open-source large models, and ranks first on the Opencompass Leaderboard for its MFT performance based on the Deepseek model. + - title: CodeFuse-MFT-VLM + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*SqoGS7hUQowAAAAAAAAAAAAADlHYAQ/original' + description: CodeFuse-MFT-VLM is a framework designed for multimodal large language models, aimed at compatibility and adaptation across various visual and linguistic models to support different types of tasks. It integrates a multitude of visual encoders such as the CLIP series, and language models like the Vicuna and LLAMA series, offering flexible configuration options. This allows users to freely combine different models using VL-MFTCoder, thereby simplifying the development and application process for multimodal tasks. + - title: Awesome-Code-LLM + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*JZ2hTZwpRhIAAAAAAAAAAAAADlHYAQ/original' + description: Ant Group, in collaboration with Shanghai Jiao Tong University, has released a 55-page comprehensive review of large code models, covering more than 50 models, 30 downstream tasks, and 500 reference papers. This review provides a holistic summary of the latest progress and challenges in the application of large language models to code-related tasks. +DevOpsTitle: + title: DevOps +DevOps: + - cardTitle: CodeFuse-ChatBot + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*l4LUSpeo7GMAAAAAAAAAAAAADlHYAQ/original' + description: The DevOps-ChatBot is an open-source AI assistant developed by the Ant CodeFuse team, dedicated to simplifying and optimizing various aspects of the software development lifecycle. + - cardTitle: DevOps-Eval + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*DVkmS5rN2iEAAAAAAAAAAAAADlHYAQ/original' + description: DevOps-Eval is a comprehensive evaluation suite specifically designed for foundation models in the DevOps field. We hope DevOps-Eval could help developers, especially in the DevOps field, track the progress and analyze the important strengths/shortcomings of their models. + - cardTitle: DevOps-Model + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*HCNGRblECa4AAAAAAAAAAAAADlHYAQ/original' + description: DevOps-Model is a series of industrial-fist Chinese DevOps large language models, mainly dedicated to exerting practical value in the field of DevOps. Currently, DevOps-Model can help engineers answer questions encountered in the all DevOps life cycle. +CodeAnalysis: + title: Code Analysis + description: CodeFuse-Query is a powerful static code analysis platform suitable for large-scale, complex codebase analysis scenarios. Its data-centric approach and high scalability give it a unique advantage in the modern software development environment. + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*4yyUS7SkkS8AAAAAAAAAAAAADlHYAQ/original' +IntelligentInference: + title: Intelligent Inference + description: ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience.This project aims to optimize services by introducing a caching mechanism. It helps businesses and research institutions reduce the cost of inference deployment, improve model performance and efficiency, and provide scalable services for large models. Through open-source, we aim to share and exchange technologies related to large model semantic cache. + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*TArYQoIpeNkAAAAAAAAAAAAADlHYAQ/original' +AutomatedTesting: + title: Automated Testing + description: TestAgent is the first open-source large model in the domestic testing industry, which includes the most powerful 7B large model for testing domains, as well as an accompanying framework for rapid local model deployment and an engineered experience. TestAgent is designed to build an "intelligence agent" within the testing field, integrating large models with engineering technologies in the quality domain to promote generational upgrades in quality technology. We look forward to collaborating with community members to create innovative solutions in the testing field, to construct a 24-hour online testing assistant service, making testing as smooth as silk. + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*hVVqQI7U5noAAAAAAAAAAAAADlHYAQ/original' +PerformanceEvaluation: + title: Performance Evaluation + description: CodeFuseEval is an enterprise-level, multi-type programming task evaluation benchmark developed on top of the open-source HumanEval-x, MBPP, and DS1000 benchmarks, integrated with the multi-task scenarios of the CodeFuse large model. It is designed for assessing the capabilities of large models in various tasks such as code completion, natural language code generation, test case generation, cross-language code translation, Chinese instruction-based code generation, code annotation explanation, bug detection/fixing, and code optimization. CodeFuseEval is built to closely reflect real-world business scenarios, and aims to create a multidimensional, diverse, and trustworthy evaluation benchmark for measuring large models' code generation capabilities. + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*cpmqR5bj9wYAAAAAAAAAAAAADlHYAQ/original' +--- diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index eec65d7..0000000 --- a/docs/index.html +++ /dev/null @@ -1,407 +0,0 @@ - - - - - - - - - -CodeFuse-AI · CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    -
    -

    CodeFuse-AI

    -

    CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis.

    - - - - - -
    - -
    -
    -

    CodeFuse-Query

    -

    Query-Based Code Analysis Engine

    - -
    - - - - -
    - - CodeFuse-Query - Star - Fork -
    -
    - -
    - -
    -
    -

    MFTCoder

    -

    High Accuracy and efficiency multi-task fine-tuning framework for Code LLMs

    - -
    - - - - -
    - - MFTCoder - Star - Fork -
    -
    - -
    - - - - -
    - - CodeFuse-MFT-VLM - Star - Fork -
    -
    - -
    - -
    -
    -

    Test-Agent

    -

    Agent that empowers software testing with LLMs; industrial-first in China

    - -
    - - - - -
    - - Test-Agent - Star - Fork -
    -
    - -
    - -
    -
    -

    CodeFuse-ModelCache

    -

    A LLM semantic caching system aiming to reducing response time via cached query-result pairs.

    - -
    - - - - -
    - - CodeFuse-ModelCache - Star - Fork -
    -
    - -
    - -
    -
    -

    DevOps-Series

    -

    An intelligent assistant serving the entire software development lifecycle.

    - -
    - - - - -
    - - codefuse-chatbot - Star - Fork -
    -
    - -
    - - - - -
    - - codefuse-devops-eval - Star - Fork -
    -
    - -
    - - - - -
    - - CodeFuse-DevOps-Model - Star - Fork -
    -
    - -
    - -
    -
    -

    Codefuse-evaluation

    -

    Industrial-level evaluation benchmarks for Coding LLMs in the full life-cycle of AI native software developing

    - -
    - - - - -
    - - codefuse-evaluation - Star - Fork -
    -
    - -
    - -
    - - - - - - - -
    -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/index.xml b/docs/index.xml deleted file mode 100644 index 7c4d6ce..0000000 --- a/docs/index.xml +++ /dev/null @@ -1,529 +0,0 @@ - - - - CodeFuse-AI - / - Recent content on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - - /docs/devops_eval/tool_learning_info/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/devops_eval/tool_learning_info/ - 数据样例 在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下: Function Call的数据格式 Input Key Input Type Input Description functions List[Swagger] 工具集合 chatrounds List[chatround] 多轮对话数据 chatrounds的数据格式 Input Key Input Type Input Description role string 角色名称,包含三种类别,user、assistant、function name string 若role为function,则存在name字段,为function的名称 content string role的返回内容 function_call dict 工具调用 { &#34;functions&#34;: [ { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;description&#34;: &#34;查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线&#34;, &#34;parameters&#34;: { &#34;type&#34;: &#34;object&#34;, &#34;properties&#34;: { &#34;year&#34;: { &#34;type&#34;: &#34;string&#34;, &#34;description&#34;: &#34;年份,例如:2020,2019,2018&#34; } }, &#34;required&#34;: [ &#34;year&#34; ] } } ], &#34;chatrounds&#34;: [ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。&#34; }, { &#34;role&#34;: &#34;user&#34;, &#34;content&#34;: &#34;查询2020年复旦大学的分数线&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: null, &#34;function_call&#34;: { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;arguments&#34;: &#34;{\n \&#34;year\&#34;: \&#34;2020\&#34;\n}&#34; } }, { &#34;role&#34;: &#34;function&#34;, &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;content&#34;: &#34;{\n \&#34;scoreline\&#34;:{\n \&#34;文科一批\&#34;: 630, \n \&#34;文科二批\&#34;: 610, \n \&#34;理科一批\&#34;: 650, \n \&#34;理科二批\&#34;: 630 \n }\n}&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: &#34;2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分&#34; } ] } 上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 - - - - /docs/devops_eval/tutorial/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/devops_eval/tutorial/ - Evaluate Tutorial 🚀 How to Evaluate If you need to test your own huggingface-formatted model, the overall steps are as follows: Write the loader function for the model. Write the context_builder function for the model. Register the model in the configuration file. Run the testing script. If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e. - - - Abstract - /docs/abstract/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/abstract/ - Abstract With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. - - - Acknowledgements - /contribution/acknowledgements/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/acknowledgements/ - The documentation homepage of CodeFuse-ai is built on docura The ChatBot project is based on langchain-chatchat and codebox-api. &hellip;&hellip; Deep gratitude is extended for their open-source contributions! - - - Agent Flow - /coagent/agent-flow/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/agent-flow/ - Introduction to Core Connectors To facilitate everyone&rsquo;s understanding of the entire CoAgent link, we use a Flow format to detail how to build through configuration settings. Below, we will first introduce the related core components Agent At the design level of the Agent, we provide four basic types of Agents, which allows for the basic role settings of these Agents to meet the interaction and usage of a variety of common scenarios. - - - Agent Flow - /muagent/agent-flow/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/agent-flow/ - Introduction to Core Connectors To facilitate everyone&rsquo;s understanding of the entire muagent link, we adopt the Flow format to introduce in detail how to build through configuration Below, we first introduce the related core components Agent On the design level of the Agent, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios: BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input =&gt; output according to the Prompt format. - - - ChatBot-RoadMap - /docs/chatbot-roadmap/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/chatbot-roadmap/ - 中文&nbsp | &nbspEnglish&nbsp RoadMap Roadmap Overview Sandbox Environment ✅ Isolated sandbox environment for code execution ✅ File upload and download ✅ Support for Java execution environment ⬜ Vector Database &amp; Retrieval ✅ Task retrieval ✅ Tool retrieval ✅ Prompt Management ✅ Memory Management ✅ Multi Agent Framework ✅ PRD (Product Requirement Document), system analysis, interface design ⬜ Generate code based on requirement documents, system analysis, and interface design ⬜ Automated testing, automated debugger ⬜ Operations process integration (ToolLearning) ⬜ Fully automated end-to-end process ⬜ Integration with LLM based on fastchat ✅ Integration with Text Embedding based on sentencebert ✅ Improved vector loading speed ✅ Connector ✅ React Mode based on langchain ✅ Tool retrieval completed with langchain ✅ General Capability for Web Crawl ⬜ Technical documentation: Zhihu, CSDN, Alibaba Cloud Developer Forum, Tencent Cloud Developer Forum, etc. - - - CoAgent - /coagent/coagent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/coagent/ - 简介 To enhance the performance of large language models (LLMs) in terms of inference accuracy, the industry has seen various innovative approaches to utilizing LLMs. From the earliest Chain of Thought (CoT), Text of Thought (ToT), to Graph of Thought (GoT), these methods have continually expanded the capability boundaries of LLMs. In dealing with complex problems, we can use the ReAct process to select, invoke, and execute tool feedback, achieving multi-round tool usage and multi-step execution. - - - Codefuse-ChatBot Development by Private Knowledge Augmentation - /docs/codefuse-chatbot/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-chatbot/ - 中文&nbsp | &nbspEnglish&nbsp This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface. 📜 Contents 🤝 Introduction 🧭 Technical Route 🤝 Introduction 💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. - - - Codefuse-ChatBot Development by Private Knowledge Augmentation - /docs/overview/codefuse-chatbot/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-chatbot/ - 中文&nbsp | &nbspEnglish&nbsp This project is an open-source AI intelligent assistant, specifically designed for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations. Through knowledge retrieval, tool utilization, and sandbox execution, Codefuse-ChatBot can not only answer professional questions you encounter during the development process but also coordinate multiple independent, dispersed platforms through a conversational interface. 📜 Contents 🤝 Introduction 🧭 Technical Route 🤝 Introduction 💡 The aim of this project is to construct an AI intelligent assistant for the entire lifecycle of software development, covering design, coding, testing, deployment, and operations, through Retrieval Augmented Generation (RAG), Tool Learning, and sandbox environments. - - - codefuse-devops-eval - /docs/codefuse-devops-eval/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-eval/ - Comming soon - - - codefuse-devops-eval - /docs/overview/codefuse-devops-eval/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-devops-eval/ - DevOps-Eval is a comprehensive evaluation suite specifically designed for foundation models in the DevOps field. We hope DevOps-Eval could help developers, especially in the DevOps field, track the progress and analyze the important strengths/shortcomings of their models. 📚 This repo contains questions and exercises related to DevOps, including the AIOps, ToolLearning; 💥️ There are currently 7486 multiple-choice questions spanning 8 diverse general categories, as shown below. 🔥 There are a total of 2840 samples in the AIOps subcategory, covering scenarios such as log parsing, time series anomaly detection, time series classification, time series forecasting, and root cause analysis. - - - codefuse-devops-model - /docs/codefuse-devops-model/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model/ - Comming soon - - - codefuse-devops-model - /docs/overview/codefuse-devops-model/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-devops-model/ - codeFuse-devops-model DevOps-Model is a large language model for the Chinese DevOps field jointly released by Ant Group and Peking University. By collecting professional data related to the DevOps domain and conducting additional training and alignment on the model, a large model has been produced to help engineers enhance efficiency throughout the entire development and operations lifecycle. This fills the current gap in large models within the DevOps domain, with the aim to provide solutions to any problems by asking DevOps-Model! - - - CodeFuse-MFT-VLM - /docs/overview/codefuse-mft-vlm/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-mft-vlm/ - CodeFuse-VLM CodeFuse-VLM is a Multimodal LLM(MLLM) framework that provides users with multiple vision encoders, multimodal alignment adapters, and LLMs. Through CodeFuse-VLM framework, users are able to customize their own MLLM model to adapt their own tasks. As more and more models are published on Huggingface community, there will be more open-source vision encoders and LLMs. Each of these models has their own specialties, e.g. Code-LLama is good at code-related tasks but has poor performance for Chinese tasks. - - - CodeFuse-ModelCache - /docs/codefuse-modelcache/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache/ - CodeFuse-ModelCache CodeFuse-ModelCache - - - CodeFuse-ModelCache - /docs/overview/codefuse-modelcache/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-modelcache/ - 中文 | English Contents news Introduction Modules Acknowledgements Contributing news 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as &rsquo;llmEmb&rsquo;, &lsquo;ONNX&rsquo;, &lsquo;PaddleNLP&rsquo;, &lsquo;FastText&rsquo;, alone with the image embedding framework &rsquo;timm&rsquo;, to bolster embedding functionality. 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests. [2023.08.26] codefuse-ModelCache&hellip; Introduction Codefuse-ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience. - - - CodeFuse-Query - /docs/codefuse-query/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query/ - CodeFuse-Query CodeFuse-Query - - - CodeFuse-Query - /docs/overview/codefuse-query/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/codefuse-query/ - CodeFuse-Query With the increasing popularity of large-scale software development, the demand for scalable and adaptable static code analysis techniques is growing. Traditional static analysis tools such as Clang Static Analyzer (CSA) or PMD have shown good results in checking programming rules or style issues. However, these tools are often designed for specific objectives and are unable to meet the diverse and changing needs of modern software development environments. These needs may relate to Quality of Service (QoS), various programming languages, different algorithmic requirements, and various performance needs. - - - CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model - /docs/codefuse-evalution/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-evalution/ - CodeFuseEval: Multi-tasking Evaluation Benchmark for Code Large Language Model 简体中文| CodeFuseEval on ModelScope| CodeFuseEval on Hugging Face CodeFuseEval is a Code Generation benchmark that combines the multi-tasking scenarios of CodeFuse Model with the benchmarks of HumanEval-x and MBPP. This benchmark is designed to evaluate the performance of models in various multi-tasking tasks, including code completion, code generation from natural language, test case generation, cross-language code translation, and code generation from Chinese commands, among others. - - - Connector Agent - /coagent/connector-agent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-agent/ - 快速构建一个Agent 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.agents import BaseAgent from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选一个role来做示例 # 从已有的配置中选择一个config,具体参数细节见下面 role_configs = load_role_configs(AGETN_CONFIGS) agent_config = role_configs[&#34;general_planner&#34;] # 生成agent实例 base_agent = BaseAgent( role=agent_config. - - - Connector Agent - /muagent/connector-agent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-agent/ - Quickly Build an Agent First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then Set LLM Configuration and Vector Model Configuration Configure related LLM and Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent. - - - Connector Chain - /coagent/connector-chain/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-chain/ - 快速构建一个 agent chain 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) # 设置openai的api-key import os, sys import openai import importlib os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxxx&#34; openai.api_key = &#34;sk-xxxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选多个role组合成 agent chain from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent. - - - Connector Chain - /muagent/connector-chain/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-chain/ - Quickly Build an Agent First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then Set LLM Configuration and Vector Model Configuration Configure related LLM and Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent. - - - Connector Memory - /coagent/connector-memory/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-memory/ - Memory Manager 主要用于 chat history 的管理,暂未完成 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 使用示例 创建 memory manager 实例 import os import openai from coagent.base_configs.env_config import KB_ROOT_PATH from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.schema import Message os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os. - - - Connector Memory - /muagent/connector-memory/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-memory/ - Memory Manager Primarily used for managing chat history, not yet completed Read and write chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval. Summarize key information from the chat history into a summary context, serving as a prompt context. Provide a search function to retrieve information related to the question from chat history or summary context, aiding in Q&amp;A. Usage Example Create memory manager instance import os import openai from coagent. - - - Connector Phase - /coagent/connector-phase/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-phase/ - 快速构建一个 agent phase 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.phase import BasePhase from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的 phase 配置中选一个 phase 来做示例 # log-level,print prompt和llm predict os. - - - Connector Phase - /muagent/connector-phase/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-phase/ - Quickly Build an Agent Phase First, add OpenAI configuration, which can be models with similar interfaces to OpenAI (triggered via fastchat). import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then Set LLM Configuration and Vector Model Configuration Configure related LLM and Embedding Model. from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent. - - - Connector Prompt - /coagent/connector-prompt/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/connector-prompt/ - Prompt 的标准结构 在整个Prompt的整个结构中,我们需要去定义三个部分 Agent Profil Input Format Response Output Format #### Agent Profile Agent Description ... #### Input Format **Origin Query:** the initial question or objective that the user wanted to achieve **Context:** the current status and history of the tasks to determine if Origin Query has been achieved. #### Response Output Format **Action Status:** finished or continued If it&#39;s &#39;finished&#39;, the context can answer the origin query. If it&#39;s &#39;continued&#39;, the context cant answer the origin query. - - - Connector Prompt - /muagent/connector-prompt/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-prompt/ - Prompt Manager Managing prompt creation in multi-agent linkages Quick Configuration: Utilizing preset processing functions, users can easily configure by simply defining the inputs and outputs of the agents, enabling fast assembly and configuration of multi-agent prompts. Customization Support: Allows users to customize the internal processing logic of each module within the prompt to achieve personalized implementation of the agent prompt. Preset Template Structure for Prompts Agent Profile: This section involves the basic description of the agent, including but not limited to the type of agent, its functions, and command set. - - - Contribution Guide - /contribution/contribution-guide/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/contribution-guide/ - 中文&nbsp | &nbspEnglish&nbsp Thank you for your interest in the Codefuse project. We warmly welcome any suggestions, opinions (including criticisms), comments, and contributions to the Codefuse project. Your suggestions, opinions, and comments on Codefuse can be directly submitted through GitHub Issues. There are many ways to participate in the Codefuse project and contribute to it: code implementation, test writing, process tool improvement, documentation enhancement, and more. We welcome any contributions and will add you to our list of contributors. - - - Custom Retrieval - /muagent/custom-retrieval/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/custom-retrieval/ - Basic Introduction Doc Retrieval is the document vector database, which is the most mainstream method for knowledge base construction nowadays. It uses Text Embedding models to vectorize documents and stores them in a vector database. In the future, we will also support querying based on knowledge graph and automatically extracting entities and relationships through large models to explore various complex relationships in data. Code Retrieval LLM faces challenges in tasks such as code generation, repair, and component understanding, including lagging code training data and the inability to perceive the dependency structure of code context. - - - Custom Tool - /muagent/custom-tool/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/custom-tool/ - Introduction In MuAgent, it also supports the registration of Tools by Agents. By registering the BaseToolModel class with Python and writing Tool_name Tool_description ToolInputArgs ToolOutputArgs run and other relevant properties and methods, the quick integration of tools can be achieved. It also supports the direct use of the langchain Tool interface. For example, functions like the aforementioned XXRetrieval can also be registered as a Tool, to be ultimately called by an LLM. - - - Customed Examples - /coagent/customed-examples/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/customed-examples/ - 如何创建你个性化的 agent phase 场景 下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建 设计你的prompt结构 import os, sys, requests # from configs.model_config import * from coagent.connector.phase import BasePhase from coagent.connector.chains import BaseChain from coagent.connector.schema import Message from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS import importlib # update new agent configs auto_feedback_from_code_execution_PROMPT = &#34;&#34;&#34;#### Agent Profile You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - - - Customed Examples - /muagent/custom-examples/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/custom-examples/ - How to Create Your Personalized Agent Phase Scenario Below we will use a code repository to demonstrate the automatic generation of API documentation from code, detailing how to customize the construction of an agent phase. Design Your Prompt Structure codeGenDocGroup_PROMPT, create group Agent Prompt # update new agent configs codeGenDocGroup_PROMPT = &#34;&#34;&#34;#### Agent Profile Your goal is to response according the Context Data&#39;s information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - - - Data - /docs/data/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/data/ - ⏬ Data Download Method 1: Download the zip file (you can also simply open the following link with the browser): wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip then unzip it and you may load the data with pandas: import os import pandas as pd File_Dir=&#34;devopseval-exam&#34; test_df=pd.read_csv(os.path.join(File_Dir,&#34;test&#34;,&#34;UnitTesting.csv&#34;)) Method 2: Directly load the dataset using Hugging Face datasets: from datasets import load_dataset dataset=load_dataset(r&#34;DevOps-Eval/devopseval-exam&#34;,name=&#34;UnitTesting&#34;) print(dataset[&#39;val&#39;][0]) # {&#34;id&#34;: 1, &#34;question&#34;: &#34;单元测试应该覆盖以下哪些方面?&#34;, &#34;A&#34;: &#34;正常路径&#34;, &#34;B&#34;: &#34;异常路径&#34;, &#34;C&#34;: &#34;边界值条件&#34;,&#34;D&#34;: 所有以上,&#34;answer&#34;: &#34;D&#34;, &#34;explanation&#34;: &#34;&#34;} ``` 👀 Notes To facilitate usage, we have organized the category name handlers and English/Chinese names corresponding to 55 subcategories. - - - Embedding Config - /muagent/embedding-model-config/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/embedding-model-config/ - Prepare Relevant Parameters First, add the OpenAI configuration; this could also be a model similar to the OpenAI interface (launched via fastchat). import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; Build LLM Config Constructing with a local model file from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) Constructing via OpenAI from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;openai&#34;, api_key=api_key, api_base_url=api_base_url, ) Customizing and inputting langchain embeddings from muagent. - - - Evaluate - /docs/codefuse-devops-eval-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-eval-quickstart/ - 🚀 How to Evaluate If you need to test your own huggingface-formatted model, the overall steps are as follows: Write the loader function for the model. Write the context_builder function for the model. Register the model in the configuration file. Run the testing script. If the model does not require any special processing after loading, and the input does not need to be converted to a specific format (e.g. chatml format or other human-bot formats), you can directly proceed to step 4 to initiate the testing. - - - FasterTransformer4CodeFuse - /docs/fastertransformer4codefuse/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/fastertransformer4codefuse/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - FasterTransformer4CodeFuse - /docs/overview/fastertransformer4codefuse/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/fastertransformer4codefuse/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - Feature - /docs/codefuse-modelcache-feature/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-feature/ - From a functional standpoint, to address Huggingface network issues and improve inference speed, local inference capabilities for embeddings have been added. Given some limitations in the SQLAlchemy framework, we have rewritten the relational database interaction module for more flexible database operations. In practice, large model products need to interface with multiple users and models; thus, support for multi-tenancy has been added to ModelCache, as well as preliminary compatibility with system commands and multi-turn conversations. - - - GodelLanguage - /docs/codefuse-query-godellanguage/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-godellanguage/ - GödelScript Query Language Index GödelScript Basic Concepts and Syntax Introduction Basic Program Structure Fundamental Types and Compiler Built-in Functions Functions Statements Schema Database Trait Import Query Ungrounded Error: Unassigned/Unbound Error Query Examples Java Python JavaScript XML Go Query Debugging and Optimization Tips Schema Arguments Causing Excessively Large Cartesian Products Multiple Layers of for Causing Excessively Large Cartesian Products Avoid Misusing @inline and Strategies for Necessary Inline Optimization Using Query Scripts on a Local Machine Basic Concepts and Syntax of GödelScript Introduction // script fn hello(greeting: string) -&gt; bool { return greeting = &#34;hello world! - - - How to better configure your cache - /docs/codefuse-modelcache-config/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-config/ - Environment Dependencies Python version: 3.8 or higher To install dependencies: pip install requirements.txt Service Startup Before starting the service, the following environment configurations should be performed: Install relational database MySQL, import SQL to create tables, SQL file: reference_doc/create_table.sql Install vector database Milvus Add database access information to the configuration files, which are: modelcache/config/milvus_config.ini modelcache/config/mysql_config.ini Download offline model bin files, refer to: https://huggingface.co/shibing624/text2vec-base-chinese/tree/main, and place the downloaded bin files into the model/text2vec-base-chinese folder Start the backend service using the flask4modelcache. - - - Introduction - /docs/codefuse-query-introduction/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-introduction/ - Introduction CodeFuse-Query is a code data platform that supports structured analysis of various programming languages. The core idea is to transform all code into data using various language parsers and to store this data in a structured format within a code database. Data analysis is then performed according to business needs using a custom query language, as shown in the diagram below: 2.1 Architecture of CodeFuse-Query Overall, the CodeFuse-Query code data platform is divided into three main parts: the code data model, the code query DSL (Domain-Specific Language), and platform productization services. - - - Introduction - /docs/mftcoder-introduction/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-introduction/ - Introduction High Accuracy and efficiency Multi-task Fine-tuning framework for Code LLMs. MFTCoder is an open-source project of CodeFuse for accurate and efficient Multi-task Fine-tuning(MFT) on Large Language Models(LLMs), especially on Code-LLMs(large language model for code tasks). Moreover, we open source Code LLM models and code-related datasets along with the MFTCoder framework. In MFTCoder, we released two codebases for finetuning Large Language Models: MFTCoder-accelerate is a framework with accelerate and DeepSpeed/FSDP. All tech-stacks are open-source and vibrant. - - - Issue Report - /contribution/issue-report/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/issue-report/ - 中文&nbsp | &nbspEnglish&nbsp Issue Type Issues can be categorized into three types: Bug: Issues where code or execution examples contain bugs or lack dependencies, resulting in incorrect execution. Documentation: Discrepancies in documentation, inconsistencies between documentation content and code, etc. Feature: New functionalities that evolve from the current codebase. Issue Template Issue: Bug Template Checklist before submitting an issue Please confirm that you have checked the document, issues, discussions (GitHub feature), and other publicly available documentation. - - - LLM Config - /muagent/llm-model-config/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/llm-model-config/ - Prepare Relevant Parameters First, add the OpenAI configuration, or you can use another model similar to the OpenAI interface (launched through fastchat). import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; Build LLM Config By passing the class openai from muagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) Customizing and inputting langchain LLM from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from langchain.llms.base import BaseLLM, LLM class CustomizedModel(LLM): repetition_penalty = 1. - - - LLM-Configuration - /docs/LLM-Configuration/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/LLM-Configuration/ - 中文&nbsp | &nbspEnglish&nbsp Local Privatization/Large Model Interface Access Leveraging open-source LLMs (Large Language Models) and Embedding models, this project enables offline private deployment based on open-source models. In addition, the project supports the invocation of OpenAI API. Local Privatization Model Access Example of model address configuration, modification of the model_config.py configuration: # Recommendation: Use Hugging Face models, preferably the chat models, and avoid using base models, which may not produce correct outputs. - - - MFTCoder - /docs/mftcoder/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder/ - MFTCoder MFTCoder - - - MFTCoder Training: Atorch Framework - /docs/mftcoder-atorch/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-atorch/ - [中文] [English] 1. Updates 🔥 MFTCoder supports fine-tuning of the GPTNeoX model under the Atorch framework. 🔥 MFTCoder supports both fully supervised fine-tuning. 🔥 MFTCoder supports LoRA using the Atorch Framework. 2. Data Format 2.1 Training Data Format The training data is in a uniformed JSONL format, in which each line of data has the following JSON format. The &ldquo;chat_rounds&rdquo; field is required, and other fields can be added or removed based on the specific need. - - - MFTCoder-accelerate: Training Framework with Accelerate and DeepSpeed/FSDP - /docs/mftcoder-accelerate/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-accelerate/ - [中文] [English] 1. Updates 🔥 MFTCoder-accelerate supports Full-parameters/LoRA using accelerate + FSDP Framework; 🔥 MFTCoder-accelerate supports MFT/SFT on more new mainstream open-source base models: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; 🔥 MFTCoder-accelerate supports Self-Paced Loss for Convergence Balance; 🔥 MFTCoder-accelerate supports Full-parameters/QLoRA/LoRA using accelerate + DeepSpeed Framework; 🔥 MFTCoder-accelerate supports Multitask Fine-Tuning(MFT), which is able to balance diffenrent tasks in data level. 🔥 MFTCoder-accelerate supports finetuning most of mainstream open-source base models: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen. - - - MFTCoder: High Accuracy and Efficiency Multi-task Fine-Tuning Framework - /docs/overview/mftcoder/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/mftcoder/ - 🤗 HuggingFace • 🤖 ModelScope [中文] [English] Contents News Articles Introduction Requirements Training Models Datasets Star History News 🔥🔥🔥 [2024/01/17] We released MFTCoder v0.3.0, mainly for MFTCoder-accelerate. It now supports new models like Mixtral(MoE), DeepSeek-coder, chatglm3. It supports FSDP as an option. It also supports Self-paced Loss as a solution for convergence balance in Multitask Fine-tuning. 🔥🔥🔥 [2024/01/17] CodeFuse-DeepSeek-33B has been released, achieving a pass@1 (greedy decoding) score of 78. - - - MuAgent - /muagent/muagent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/muagent/ - Introduction To enhance the performance of large models in terms of inference accuracy, various innovative Large Language Model (LLM) playbooks have emerged in the industry. From the earliest Chain of Thought (CoT) and Thread of Thought (ToT) to Games on Tracks (GoT), these methods have continually expanded the capability boundaries of LLMs. When handling complex problems, we can select, invoke and execute tool feedback through the ReAct process, while realizing multi-round tool use and multi-step execution. - - - overview - /docs/en_overview/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/en_overview/ - HuggingFace | ModelScope Hello World! This is CodeFuse! CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis. We are passionating about creating innovative open-source solutions that empower developers throughout the software development process as shown above. We also encourage engineers and researchers within this community to join us in co-constructing/improving CodeFuse. - - - Prompt Manager - /coagent/prompt-manager/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/prompt-manager/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 Prompt自定义配置 Prompt模块参数 field_name:唯一的字段名称标识,必须提供。 function:指定如何处理输入数据的函数,必须提供。 title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 Prompt配置示例 Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 [ {&#34;field_name&#34;: &#39;agent_profile&#39;, &#34;function_name&#34;: &#39;handle_agent_profile&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;context_placeholder&#39;, &#34;function_name&#34;: &#39;&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;tool_information&#39;,&#34;function_name&#34;: &#39;handle_tool_data&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;reference_documents&#39;, &#34;function_name&#34;: &#39;handle_doc_info&#39;}, {&#34;field_name&#34;: &#39;session_records&#39;, &#34;function_name&#34;: &#39;handle_session_records&#39;}, {&#34;field_name&#34;: &#39;task_records&#39;, &#34;function_name&#34;: &#39;handle_task_records&#39;}, {&#34;field_name&#34;: &#39;output_format&#39;, &#34;function_name&#34;: &#39;handle_output_format&#39;, &#39;title&#39;: &#39;Response Output Format&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;response&#39;, &#34;function_name&#34;: &#39;handle_response&#39;, &#34;title&#34;=&#34;begin! - - - Prompt Manager - /coagent/prompt-manager/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/prompt-manager/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 Prompt自定义配置 Prompt模块参数 field_name:唯一的字段名称标识,必须提供。 function:指定如何处理输入数据的函数,必须提供。 title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 Prompt配置示例 Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 [ {&#34;field_name&#34;: &#39;agent_profile&#39;, &#34;function_name&#34;: &#39;handle_agent_profile&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;context_placeholder&#39;, &#34;function_name&#34;: &#39;&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;tool_information&#39;,&#34;function_name&#34;: &#39;handle_tool_data&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;reference_documents&#39;, &#34;function_name&#34;: &#39;handle_doc_info&#39;}, {&#34;field_name&#34;: &#39;session_records&#39;, &#34;function_name&#34;: &#39;handle_session_records&#39;}, {&#34;field_name&#34;: &#39;task_records&#39;, &#34;function_name&#34;: &#39;handle_task_records&#39;}, {&#34;field_name&#34;: &#39;output_format&#39;, &#34;function_name&#34;: &#39;handle_output_format&#39;, &#39;title&#39;: &#39;Response Output Format&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;response&#39;, &#34;function_name&#34;: &#39;handle_response&#39;, &#34;title&#34;=&#34;begin! - - - Pull Request - /contribution/pull-request/ - Mon, 01 Jan 0001 00:00:00 +0000 - /contribution/pull-request/ - 中文&nbsp | &nbspEnglish&nbsp Contribution Pre-Checklist First, confirm whether you have checked the document, issue, discussion (GitHub features), or other publicly available documentation. Find the GitHub issue you want to address. If none exists, create an issue or draft PR and ask a Maintainer for a check Check for related, similar, or duplicate pull requests Create a draft pull request Complete the PR template for the description Link any GitHub issue(s) that are resolved by your PR Description A description of the PR should be articulated in concise language, highlighting the work completed by the PR. - - - Quick Start - /coagent/quick-start/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/quick-start/ - Quick Start First, set up the LLM configuration import os, sys import openai # llm config os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; Next, configure the LLM settings and vector model from coagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) Finally, choose a pre-existing scenario to execute from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from coagent. - - - Quick Start - /muagent/quick-start/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/quick-start/ - Quick Start For a complete example, see examples/muagent_examples First, prepare the relevant configuration information import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then, set up LLM configuration and Embedding model configuration from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.connector.phase import BasePhase from muagent.connector.schema import Message llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0. - - - QuickStart - /docs/codefuse-chatbot-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-chatbot-quickstart/ - 中文&nbsp | &nbspEnglish&nbsp 🚀 Quick Start To deploy private models, please install the NVIDIA driver by yourself. This project has been tested on Python 3.9.18 and CUDA 11.7 environments, as well as on Windows and macOS systems with x86 architecture. For Docker installation, private LLM access, and related startup issues, see: Start-detail&hellip; Preparation of Python environment It is recommended to use conda to manage the python environment (optional) # Prepare conda environment conda create --name Codefusegpt python=3. - - - QuickStart - /docs/codefuse-evalution-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-evalution-quickstart/ - Generation environment: CodeFuse-13B: Python 3.8 or above,PyTorch 1.12 or above, with a recommendation for 2.0 or above, Transformers 4.24.0 or above ,CUDA 11.4 or above (for GPU users and flash-attention users, this option should be considered). CodeFuse-CodeLlama-34B:python&gt;=3.8,pytorch&gt;=2.0.0,transformers==4.32.0,Sentencepiece,CUDA 11. Evaluation Environment The evaluation of the generated codes involves compiling and running in multiple programming languages. The versions of the programming language environments and packages we use are as follows: Dependency Version Python 3. - - - QuickStart - /docs/codefuse-mft-vlm/quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-mft-vlm/quickstart/ - Contents Install Datasets Multimodal Alignment Visual Instruction Tuning Evaluation Install Please run sh init_env.sh Datasets Here&rsquo;s the table of datasets we used to train CodeFuse-VLM-14B: Dataset Task Type Number of Samples synthdog-en OCR 800,000 synthdog-zh OCR 800,000 cc3m(downsampled) Image Caption 600,000 cc3m(downsampled) Image Caption 600,000 SBU Image Caption 850,000 Visual Genome VQA (Downsampled) Visual Question Answer(VQA) 500,000 Visual Genome Region descriptions (Downsampled) Reference Grouding 500,000 Visual Genome objects (Downsampled) Grounded Caption 500,000 OCR VQA (Downsampled) OCR and VQA 500,000 Please download these datasets on their own official websites. - - - QuickStart - /docs/codefuse-modelcache-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-quickstart/ - ModelCache is easy to use, and you can build a cache testing demo in just one step. Quick Start Building a Cache The default interface for Cache is shown below: class Cache: # it should be called when start the cache system def __init__(self): self.has_init = False self.cache_enable_func = None self.embedding_func = None self.post_process_messages_func = None self.config = Config() Before creating a ModelCache, consider the following questions: How will you generate embedding vectors for queries? - - - QuickStart - /docs/codefuse-query-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-quickstart/ - Installation, Configuration, and Running Hardware and Software Requirements Hardware: 4C8G Environment Requirements: Java 1.8 and Python 3.8 or above runtime environments. Please ensure Java and Python executables are available. Sparrow Installation Steps and Guidance The CodeFuse-Query download package is a zip archive that contains tools, scripts, and various files specific to CodeFuse-Query. If you do not have a CodeFuse-Query license, downloading this archive indicates your agreement with the CodeFuse-Query Terms and Conditions. - - - QuickStart - /docs/codefuse-devops-model-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-quickstart/ - Dependency Installation Please install the packages listed in the requirements.txt file from the GitHub address first. You can refer to the following code: pip install -r requirements.txt Model Download Model download information is as follows: 🤗 Huggingface Address - Base Model Aligned Model 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat 🤖 ModelScope Address - Base Model Aligned Model 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat Find the version of the Chat model you want to download; currently, 7B and 14B models are provided. - - - QuickStart - /docs/mftcoder-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-quickstart/ - Requirements To begin, ensure that you have successfully installed CUDA (version &gt;= 11.4, preferably 11.7) along with the necessary drivers. Additionally, make sure you have installed torch (version 2.0.1). Next, we have provided an init_env.sh script to simplify the installation of required packages. Execute the following command to run the script: sh init_env.sh We highly recommend training with flash attention(version &gt;= 2.1.0, preferably 2.3.6), please refer to the following link for installation instructions: https://github. - - - QuickStart - /docs/test-agent-quickstart/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/test-agent-quickstart/ - QuickStart Prerequisites Model Download You can get detailed information about the model and download the model files from modelscope or huggingface. Please note: 需要注意的是: If you download the model through modelscope, refer to the download instructions: Download Instructions; If you download the model through huggingface, please make sure you have proper access to huggingface. Environment Installation python&gt;=3.8 transformers==4.33.2 git clone https://github.com/codefuse-ai/Test-Agent cd Test-Agent pip install -r requirements.txt Before starting to run the TestGPT-7B model, please ensure that your execution environment has about 14GB of VRAM. - - - Release Note - /docs/codefuse-modelcache-release/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-release/ - 时间 功能 版本号 20230430 Completed GPTCache research, open-source process running through OpenAI interface, single-node form 无 20230509 1. Completed technology selection and upstream/downstream interaction scheme 2. Redeveloped database module, replaced SQLAlchemy framework 3. Refactored llm_handler module, compatible with codegpt, adapted codegpt model parameters 数 V0.1.0 20230519 1. Dynamically selected codegpt service mode based on environment 2. Capability for local model loading and pre-loading 3. Added dynamic loading capability for local paths based on environment V0. - - - Start-Detail - /docs/chatbot/start-detail/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/chatbot/start-detail/ - 中文&nbsp | &nbspEnglish&nbsp If you need to deploy a privatized model, please install the NVIDIA driver yourself. Preparation of Python environment It is recommended to use conda to manage the python environment (optional) # Prepare conda environment conda create --name Codefusegpt python=3.9 conda activate Codefusegpt Install related dependencies cd Codefuse-ChatBot pip install -r requirements.txt Sandbox Environment Preparation Windows Docker installation: Docker Desktop for Windows supports 64-bit versions of Windows 10 Pro with Hyper-V enabled (Hyper-V is not required for versions v1903 and above), or 64-bit versions of Windows 10 Home v1903 and above. - - - Test-Agent - /docs/test-agent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/test-agent/ - Test-Agent Test-Agent - - - Test-Agent: Your AI Test Assistant - /docs/overview/test-agent-your-ai-test-assistant/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/overview/test-agent-your-ai-test-assistant/ - Local Mac M1 Experience Moda Experience Moda Model Access Link:ModelScope TestGPT-7B What is Test Agent? (Introduction) Test Agent aims to build an &ldquo;intelligent agent&rdquo; in the testing domain, integrating large models with engineering technologies in the quality domain to promote the generational upgrade of quality technology. We look forward to collaborating with community members to create innovative solutions in the testing domain, establish a 24-hour online testing assistant service, and make testing as smooth as silk. - - - Toolchain - /docs/codefuse-query-toolchain/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-toolchain/ - Developing Plugins (VSCode) Installation Install from VSCode marketplace (Recommand) VSCode Extension Install from local via VSIX pack Download the plugin. Manually install from vsix: Or use the command directly from the terminal to install: code --install-extension [extension vsix file path] Environment Preparation Sparrow CLI, refer to Section 3 Installation, Configuration, and Running. Extension Features This extension provides the following feature modules: COREF AST Viewer Gödel Language Server Gödel Language Runner COREF AST Viewer The following features need to be enabled in the extension settings. - - - Train Detail - /docs/codefuse-devops-model-train/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-train/ - Training Process According to the literature review, it is known that most domain models are based on conversational models and undergo knowledge infusion through Supervised Fine-Tuning (SFT). However, the QA corpus required for SFT fine-tuning largely comes from ChatGPT generation, which may not fully cover domain knowledge. Therefore, the DevOps-Model adopts a pre-training plus training followed by SFT fine-tuning approach, as illustrated in Figure 2.1. We believe that for large domain models, additional pre-training is necessary. - - - User Case - /docs/codefuse-query-usercase/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-usercase/ - Use Cases Querying Code Features A developer wants to know which String type variables are used in Repo A, so he writes a Gödel script as follows and submits it to the CodeFuse-Query system for results. // script use coref::java::* fn out(var: string) -&gt; bool { for(v in Variable(JavaDB::load(&#34;coref_java_src.db&#34;))) { if (v.getType().getName() = &#34;String&#34; &amp;&amp; var = v.getName()) { return true } } } fn main() { output(out()) } Similar needs: querying for classes, functions, variables, return values, call graphs, class inheritance, etc. - - - diff --git a/docs/index.zh-CN.md b/docs/index.zh-CN.md new file mode 100644 index 0000000..45b2fd2 --- /dev/null +++ b/docs/index.zh-CN.md @@ -0,0 +1,47 @@ +--- +title: CodeFuse - 让研发变得更简单 +hero: + title: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*2wOZRJyrE4gAAAAAAAAAAAAADlHYAQ/original' + description: 让研发变得更简单 +CodeGenerationTitle: + title: 代码生成 + buttomText: 仓库 +CodeGeneration: + - title: CodeFuse-多功能编程器 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*SdZIQYUwWqgAAAAAAAAAAAAADlHYAQ/original' + description: CodeFuse-MFTcoder是一个多任务微调框架,它旨在提升代码大模型的编程能力。与传统单任务微调相比,它能够同时处理多个编程任务,通过结合多元损失函数来均衡不同任务间的数据量、难度和收敛速度差异,从而提高了微调效率和性能。此外,该框架还引入了高效的训练优化技术,可以与多个知名的开源大模型兼容,并且在Opencompass Leaderboard上基于Deepseek模型的MFT表现排名第一。 + - title: CodeFuse-多功能-虚拟化生命周期管理 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*UhdvTKE_ZsEAAAAAAAAAAAAADlHYAQ/original' + description: CodeFuse-MFT-VLM是一个为多模态大语言模型设计的框架,旨在兼容和适应多种视觉和语言模型以支持不同类型的任务。它集成了众多视觉编码器如CLIP系列和语言模型如Vicuna和LLAMA系列,提供灵活的配置选项,允许用户通过VL-MFTCoder自由组合不同的模型,从而简化多模态任务的开发和应用过程。 + - title: 超赞代码-长期生命周期管理 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*-zYYS4piTp0AAAAAAAAAAAAADlHYAQ/original' + description: 蚂蚁集团联合上海交通大学发布55页代码大模型综述,覆盖超过50个模型、30个下游任务、500篇参考文献,全方位总结大语言模型在代码相关应用中的最新进展与挑战。 +DevOpsTitle: + title: 开发运维 +DevOps: + - cardTitle: CodeFuse-聊天机器人 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*l4LUSpeo7GMAAAAAAAAAAAAADlHYAQ/original' + description: DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。 + - cardTitle: DevOps-评估 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*DVkmS5rN2iEAAAAAAAAAAAAADlHYAQ/original' + description: DevOps-Eval是一个专门为DevOps领域大模型设计的综合评估数据集。我们希望DevOps-Eval能够帮助开发者,尤其是DevOps领域的开发者,追踪进展并分析他们拥有的DevOps大模型的优势和不足之处。 + - cardTitle: DevOps-模型 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*HCNGRblECa4AAAAAAAAAAAAADlHYAQ/original' + description: DevOps-Model 是一系列业界首个开源的中文开发运维大模型,主要致力于在 DevOps 领域发挥实际价值。目前,DevOps-Model 能够帮助工程师回答在 DevOps 生命周期中遇到的问题。 +CodeAnalysis: + title: 代码分析 + description: CodeFuse-Query 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*GmZ5QJxXM28AAAAAAAAAAAAADlHYAQ/original' +IntelligentInference: + title: 智能推理 + description: ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*5vo0RbPtGYgAAAAAAAAAAAAADlHYAQ/original' +AutomatedTesting: + title: 自动化测试 + description: TestAgent是国内首个开源的测试行业大模型,其中包含了性能最强的7B测试领域大模型,以及配套的本地模型快速发布和体验工程化框架。TestAgent旨在构建测试领域的“智能体”,融合大模型和质量领域工程化技术,促进质量技术代系升级。我们期望和社区成员一起合作,打造创新的测试领域解决方案,构建24小时在线的测试助理服务,让测试如丝般顺滑。 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*MLVXSpMIRTYAAAAAAAAAAAAADlHYAQ/original' +PerformanceEvaluation: + title: 性能评价 + description: CodeFuseEval是结合CodeFuse大模型多任务场景,在开源的HumanEval-x、MBPP、DS1000评测基准基础上,开发的面向大模型代码垂类领域的企业级多类型编程任务评估基准。可用于评估大模型在代码补全、自然语言生成代码、测试用例生成、跨语言代码翻译、中文指令生成代码、代码注解释、Bug检测/修复、代码优化等不同任务的能力表现。旨在贴近企业实际应用场景,构建而成的衡量大模型代码生成相关能力的「多维」、「多样」和「可信」的评测基准。 + image: 'https://mdn.alipayobjects.com/huamei_bvbxju/afts/img/A*vacrSasSVFYAAAAAAAAAAAAADlHYAQ/original' +--- diff --git a/docs/js/base.min.js b/docs/js/base.min.js deleted file mode 100644 index d85f858..0000000 --- a/docs/js/base.min.js +++ /dev/null @@ -1 +0,0 @@ -const dropdowns=document.querySelectorAll(".dropdown"),dropdownOpenSelector=".dropdown-menu.show";dropdowns.forEach(e=>{e.addEventListener("click",function(){const n=e.querySelector(".dropdown-menu.show");document.querySelectorAll(dropdownOpenSelector).forEach(e=>e.classList.remove("show")),n||e.querySelector(".dropdown-menu").classList.toggle("show")})}),document.body.addEventListener("click",function(e){const t=e.target.closest(".dropdown");t||document.querySelectorAll(dropdownOpenSelector).forEach(e=>e.classList.remove("show"))});const lsKeyColorPreference="color-preference",lsKeyColorPreferenceDarkVariant="color-preference-dark-variant",getColorPreference=()=>{let e=localStorage.getItem(lsKeyColorPreference);return e!==null?e:window.matchMedia("(prefers-color-scheme: dark)").matches?"dark":"light"};let colorPreference=getColorPreference();document.firstElementChild.setAttribute("data-color",colorPreference);const getColorPreferenceDarkVariant=()=>{let e=localStorage.getItem(lsKeyColorPreferenceDarkVariant);return e!==null?e:"dark"};let colorPreferenceDarkVariant=getColorPreferenceDarkVariant(),colorSchemes=document.querySelectorAll(".color-scheme");colorSchemes.forEach(e=>{e.addEventListener("click",function(){let t=e.dataset.value;t!==colorPreference&&(colorPreference=t,setColorPreference(),(t==="dark"||t==="night")&&(colorPreferenceDarkVariant=t,localStorage.setItem(lsKeyColorPreferenceDarkVariant,colorPreferenceDarkVariant)))})});const setColorPreference=()=>{localStorage.setItem(lsKeyColorPreference,colorPreference),document.firstElementChild.setAttribute("data-color",colorPreference)};window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change",({matches:e})=>{colorPreference=e?colorPreferenceDarkVariant:"light",setColorPreference()});const body=document.body,btnArticleNavMenu=document.querySelector("#article-nav-menu-btn");btnArticleNavMenu&&btnArticleNavMenu.addEventListener("click",function(){body.classList.add("offcanvas-sidebar-on")});const btnArticleNavToc=document.querySelector("#article-nav-toc-btn");btnArticleNavToc&&btnArticleNavToc.addEventListener("click",function(){body.classList.add("offcanvas-toc-on")});const btnCloseArticleNavMenu=document.querySelector("#sidebar .btn-close");btnCloseArticleNavMenu&&btnCloseArticleNavMenu.addEventListener("click",function(){body.classList.remove("offcanvas-sidebar-on")});const btnCloseArticleNavToc=document.querySelector("#toc .btn-close");if(btnCloseArticleNavToc){btnCloseArticleNavToc.addEventListener("click",function(){body.classList.remove("offcanvas-toc-on")});const e=document.querySelectorAll("#toc ul a");e.forEach(e=>{e.addEventListener("click",function(){body.classList.remove("offcanvas-toc-on")})})}body.addEventListener("click",e=>{const t=e.target.closest("#article-nav-menu-btn"),n=e.target.closest("#sidebar");!t&&!n&&body.classList.contains("offcanvas-sidebar-on")&&body.classList.remove("offcanvas-sidebar-on");const s=e.target.closest("#article-nav-toc-btn"),o=e.target.closest("#toc");!s&&!o&&body.classList.contains("offcanvas-toc-on")&&body.classList.remove("offcanvas-toc-on")});const fromDesktop=window.matchMedia("(min-width: 1280px)"),sidebarSticky=document.querySelector("#sidebar .sticky");fromDesktop&&sidebarSticky&&window.addEventListener("scroll",function(){document.body.scrollTop>80||document.documentElement.scrollTop>80?(sidebarSticky.style.top="20px",sidebarSticky.style.bottom="65px"):(sidebarSticky.style.top=null,sidebarSticky.style.bottom=null)});const fromLargeTablet=window.matchMedia("(min-width: 1024px)"),tocSticky=document.querySelector("#toc .sticky");fromLargeTablet&&tocSticky&&window.addEventListener("scroll",function(){document.body.scrollTop>80||document.documentElement.scrollTop>80?(tocSticky.style.top="20px",tocSticky.style.bottom="65px"):(tocSticky.style.top=null,tocSticky.style.bottom=null)}),"IntersectionObserver"in window&&document.addEventListener("DOMContentLoaded",function(){const n=document.querySelectorAll("#TableOfContents a");let e=null;const t={},s=new IntersectionObserver(n=>{n.forEach(n=>{n.isIntersecting&&(e&&e.classList.remove("active"),e=t[n.target.id],e&&e.classList.add("active"))})},{rootMargin:`0% 0% -80% 0%`});n.forEach(e=>{const n=e.getAttribute("href")?e.getAttribute("href").slice(1):null;if(n){const o=document.getElementById(n);o&&(t[n]=e,s.observe(o))}})}) \ No newline at end of file diff --git a/docs/js/component/docsearch.min.js b/docs/js/component/docsearch.min.js deleted file mode 100644 index 9a7413d..0000000 --- a/docs/js/component/docsearch.min.js +++ /dev/null @@ -1,3 +0,0 @@ -/*! @docsearch/js 3.2.0 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com | https://cdn.jsdelivr.net/npm/@docsearch/js@3 */ -!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e=e||self).docsearch=t()}(this,(function(){"use strict";function e(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function t(t){for(var n=1;n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function i(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=null==e?null:"undefined"!=typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null==n)return;var r,o,c=[],i=!0,a=!1;try{for(n=n.call(e);!(i=(r=n.next()).done)&&(c.push(r.value),!t||c.length!==t);i=!0);}catch(e){a=!0,o=e}finally{try{i||null==n.return||n.return()}finally{if(a)throw o}}return c}(e,t)||u(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function a(e){return function(e){if(Array.isArray(e))return l(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||u(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function u(e,t){if(e){if("string"==typeof e)return l(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);return"Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n?Array.from(e):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?l(e,t):void 0}}function l(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n3)for(n=[n],c=3;c0?O(m.type,m.props,m.key,null,m.__v):m)){if(m.__=n,m.__b=n.__b+1,null===(p=b[s])||p&&m.key==p.key&&m.type===p.type)b[s]=void 0;else for(f=0;f3)for(n=[n],c=3;c=n.__.length&&n.__.push({}),n.__[e]}function ne(e){return $=1,re(pe,e)}function re(e,t,n){var r=te(W++,2);return r.t=e,r.__c||(r.__=[n?n(t):pe(void 0,t),function(e){var t=r.t(r.__[0],e);r.__[0]!==t&&(r.__=[t,r.__[1]],r.__c.setState({}))}],r.__c=K),r.__}function oe(e,t){var n=te(W++,3);!s.__s&&fe(n.__H,t)&&(n.__=e,n.__H=t,K.__H.__h.push(n))}function ce(e,t){var n=te(W++,4);!s.__s&&fe(n.__H,t)&&(n.__=e,n.__H=t,K.__h.push(n))}function ie(e,t){var n=te(W++,7);return fe(n.__H,t)&&(n.__=e(),n.__H=t,n.__h=e),n.__}function ae(){Q.forEach((function(e){if(e.__P)try{e.__H.__h.forEach(le),e.__H.__h.forEach(se),e.__H.__h=[]}catch(t){e.__H.__h=[],s.__e(t,e.__v)}})),Q=[]}s.__b=function(e){K=null,Y&&Y(e)},s.__r=function(e){G&&G(e),W=0;var t=(K=e.__c).__H;t&&(t.__h.forEach(le),t.__h.forEach(se),t.__h=[])},s.diffed=function(e){Z&&Z(e);var t=e.__c;t&&t.__H&&t.__H.__h.length&&(1!==Q.push(t)&&J===s.requestAnimationFrame||((J=s.requestAnimationFrame)||function(e){var t,n=function(){clearTimeout(r),ue&&cancelAnimationFrame(t),setTimeout(e)},r=setTimeout(n,100);ue&&(t=requestAnimationFrame(n))})(ae)),K=void 0},s.__c=function(e,t){t.some((function(e){try{e.__h.forEach(le),e.__h=e.__h.filter((function(e){return!e.__||se(e)}))}catch(n){t.some((function(e){e.__h&&(e.__h=[])})),t=[],s.__e(n,e.__v)}})),X&&X(e,t)},s.unmount=function(e){ee&&ee(e);var t=e.__c;if(t&&t.__H)try{t.__H.__.forEach(le)}catch(e){s.__e(e,t.__v)}};var ue="function"==typeof requestAnimationFrame;function le(e){var t=K;"function"==typeof e.__c&&e.__c(),K=t}function se(e){var t=K;e.__c=e.__(),K=t}function fe(e,t){return!e||e.length!==t.length||t.some((function(t,n){return t!==e[n]}))}function pe(e,t){return"function"==typeof t?t(e):t}function me(e,t){for(var n in t)e[n]=t[n];return e}function de(e,t){for(var n in e)if("__source"!==n&&!(n in t))return!0;for(var r in t)if("__source"!==r&&e[r]!==t[r])return!0;return!1}function he(e){this.props=e}(he.prototype=new E).isPureReactComponent=!0,he.prototype.shouldComponentUpdate=function(e,t){return de(this.props,e)||de(this.state,t)};var ve=s.__b;s.__b=function(e){e.type&&e.type.__f&&e.ref&&(e.props.ref=e.ref,e.ref=null),ve&&ve(e)};var ye="undefined"!=typeof Symbol&&Symbol.for&&Symbol.for("react.forward_ref")||3911;var _e=function(e,t){return null==e?null:C(C(e).map(t))},be={map:_e,forEach:_e,count:function(e){return e?C(e).length:0},only:function(e){var t=C(e);if(1!==t.length)throw"Children.only";return t[0]},toArray:C},ge=s.__e;function Oe(){this.__u=0,this.t=null,this.__b=null}function Se(e){var t=e.__.__c;return t&&t.__e&&t.__e(e)}function Ee(){this.u=null,this.o=null}s.__e=function(e,t,n){if(e.then)for(var r,o=t;o=o.__;)if((r=o.__c)&&r.__c)return null==t.__e&&(t.__e=n.__e,t.__k=n.__k),r.__c(e,t);ge(e,t,n)},(Oe.prototype=new E).__c=function(e,t){var n=t.__c,r=this;null==r.t&&(r.t=[]),r.t.push(n);var o=Se(r.__v),c=!1,i=function(){c||(c=!0,n.componentWillUnmount=n.__c,o?o(a):a())};n.__c=n.componentWillUnmount,n.componentWillUnmount=function(){i(),n.__c&&n.__c()};var a=function(){if(!--r.__u){if(r.state.__e){var e=r.state.__e;r.__v.__k[0]=function e(t,n,r){return t&&(t.__v=null,t.__k=t.__k&&t.__k.map((function(t){return e(t,n,r)})),t.__c&&t.__c.__P===n&&(t.__e&&r.insertBefore(t.__e,t.__d),t.__c.__e=!0,t.__c.__P=r)),t}(e,e.__c.__P,e.__c.__O)}var t;for(r.setState({__e:r.__b=null});t=r.t.pop();)t.forceUpdate()}},u=!0===t.__h;r.__u++||u||r.setState({__e:r.__b=r.__v.__k[0]}),e.then(i,i)},Oe.prototype.componentWillUnmount=function(){this.t=[]},Oe.prototype.render=function(e,t){if(this.__b){if(this.__v.__k){var n=document.createElement("div"),r=this.__v.__k[0].__c;this.__v.__k[0]=function e(t,n,r){return t&&(t.__c&&t.__c.__H&&(t.__c.__H.__.forEach((function(e){"function"==typeof e.__c&&e.__c()})),t.__c.__H=null),null!=(t=me({},t)).__c&&(t.__c.__P===r&&(t.__c.__P=n),t.__c=null),t.__k=t.__k&&t.__k.map((function(t){return e(t,n,r)}))),t}(this.__b,n,r.__O=r.__P)}this.__b=null}var o=t.__e&&g(S,null,e.fallback);return o&&(o.__h=null),[g(S,null,t.__e?null:e.children),o]};var we=function(e,t,n){if(++n[1]===n[0]&&e.o.delete(t),e.props.revealOrder&&("t"!==e.props.revealOrder[0]||!e.o.size))for(n=e.u;n;){for(;n.length>3;)n.pop()();if(n[1]>>1,1),t.i.removeChild(e)}}),B(g(je,{context:t.context},e.__v),t.l)):t.l&&t.componentWillUnmount()}function Ie(e,t){return g(Pe,{__v:e,i:t})}(Ee.prototype=new E).__e=function(e){var t=this,n=Se(t.__v),r=t.o.get(e);return r[0]++,function(o){var c=function(){t.props.revealOrder?(r.push(o),we(t,e,r)):o()};n?n(c):c()}},Ee.prototype.render=function(e){this.u=null,this.o=new Map;var t=C(e.children);e.revealOrder&&"b"===e.revealOrder[0]&&t.reverse();for(var n=t.length;n--;)this.o.set(t[n],this.u=[1,0,this.u]);return e.children},Ee.prototype.componentDidUpdate=Ee.prototype.componentDidMount=function(){var e=this;this.o.forEach((function(t,n){we(e,n,t)}))};var ke="undefined"!=typeof Symbol&&Symbol.for&&Symbol.for("react.element")||60103,De=/^(?:accent|alignment|arabic|baseline|cap|clip(?!PathU)|color|fill|flood|font|glyph(?!R)|horiz|marker(?!H|W|U)|overline|paint|stop|strikethrough|stroke|text(?!L)|underline|unicode|units|v|vector|vert|word|writing|x(?!C))[A-Z]/,Ce=function(e){return("undefined"!=typeof Symbol&&"symbol"==n(Symbol())?/fil|che|rad/i:/fil|che|ra/i).test(e)};function Ae(e,t,n){return null==t.__k&&(t.textContent=""),B(e,t),"function"==typeof n&&n(),e?e.__c:null}E.prototype.isReactComponent={},["componentWillMount","componentWillReceiveProps","componentWillUpdate"].forEach((function(e){Object.defineProperty(E.prototype,e,{configurable:!0,get:function(){return this["UNSAFE_"+e]},set:function(t){Object.defineProperty(this,e,{configurable:!0,writable:!0,value:t})}})}));var xe=s.event;function Ne(){}function Re(){return this.cancelBubble}function Te(){return this.defaultPrevented}s.event=function(e){return xe&&(e=xe(e)),e.persist=Ne,e.isPropagationStopped=Re,e.isDefaultPrevented=Te,e.nativeEvent=e};var Le,qe={configurable:!0,get:function(){return this.class}},Me=s.vnode;s.vnode=function(e){var t=e.type,n=e.props,r=n;if("string"==typeof t){for(var o in r={},n){var c=n[o];"value"===o&&"defaultValue"in n&&null==c||("defaultValue"===o&&"value"in n&&null==n.value?o="value":"download"===o&&!0===c?c="":/ondoubleclick/i.test(o)?o="ondblclick":/^onchange(textarea|input)/i.test(o+t)&&!Ce(n.type)?o="oninput":/^on(Ani|Tra|Tou|BeforeInp)/.test(o)?o=o.toLowerCase():De.test(o)?o=o.replace(/[A-Z0-9]/,"-$&").toLowerCase():null===c&&(c=void 0),r[o]=c)}"select"==t&&r.multiple&&Array.isArray(r.value)&&(r.value=C(n.children).forEach((function(e){e.props.selected=-1!=r.value.indexOf(e.props.value)}))),"select"==t&&null!=r.defaultValue&&(r.value=C(n.children).forEach((function(e){e.props.selected=r.multiple?-1!=r.defaultValue.indexOf(e.props.value):r.defaultValue==e.props.value}))),e.props=r}t&&n.class!=n.className&&(qe.enumerable="className"in n,null!=n.className&&(r.class=n.className),Object.defineProperty(r,"className",qe)),e.$$typeof=ke,Me&&Me(e)};var He=s.__r;s.__r=function(e){He&&He(e),Le=e.__c};var Ue={ReactCurrentDispatcher:{current:{readContext:function(e){return Le.__n[e.__c].props.value}}}};"object"==("undefined"==typeof performance?"undefined":n(performance))&&"function"==typeof performance.now&&performance.now.bind(performance);function Fe(e){return!!e&&e.$$typeof===ke}var Be={useState:ne,useReducer:re,useEffect:oe,useLayoutEffect:ce,useRef:function(e){return $=5,ie((function(){return{current:e}}),[])},useImperativeHandle:function(e,t,n){$=6,ce((function(){"function"==typeof e?e(t()):e&&(e.current=t())}),null==n?n:n.concat(e))},useMemo:ie,useCallback:function(e,t){return $=8,ie((function(){return e}),t)},useContext:function(e){var t=K.context[e.__c],n=te(W++,9);return n.__c=e,t?(null==n.__&&(n.__=!0,t.sub(K)),t.props.value):e.__},useDebugValue:function(e,t){s.useDebugValue&&s.useDebugValue(t?t(e):e)},version:"16.8.0",Children:be,render:Ae,hydrate:function(e,t,n){return V(e,t),"function"==typeof n&&n(),e?e.__c:null},unmountComponentAtNode:function(e){return!!e.__k&&(B(null,e),!0)},createPortal:Ie,createElement:g,createContext:function(e,t){var n={__c:t="__cC"+d++,__:e,Consumer:function(e,t){return e.children(t)},Provider:function(e){var n,r;return this.getChildContext||(n=[],(r={})[t]=this,this.getChildContext=function(){return r},this.shouldComponentUpdate=function(e){this.props.value!==e.value&&n.some(P)},this.sub=function(e){n.push(e);var t=e.componentWillUnmount;e.componentWillUnmount=function(){n.splice(n.indexOf(e),1),t&&t.call(e)}}),e.children}};return n.Provider.__=n.Consumer.contextType=n},createFactory:function(e){return g.bind(null,e)},cloneElement:function(e){return Fe(e)?z.apply(null,arguments):e},createRef:function(){return{current:null}},Fragment:S,isValidElement:Fe,findDOMNode:function(e){return e&&(e.base||1===e.nodeType&&e)||null},Component:E,PureComponent:he,memo:function(e,t){function n(e){var n=this.props.ref,r=n==e.ref;return!r&&n&&(n.call?n(null):n.current=null),t?!t(this.props,e)||!r:de(this.props,e)}function r(t){return this.shouldComponentUpdate=n,g(e,t)}return r.displayName="Memo("+(e.displayName||e.name)+")",r.prototype.isReactComponent=!0,r.__f=!0,r},forwardRef:function(e){function t(t,r){var o=me({},t);return delete o.ref,e(o,(r=t.ref||r)&&("object"!=n(r)||"current"in r)?r:null)}return t.$$typeof=ye,t.render=t,t.prototype.isReactComponent=t.__f=!0,t.displayName="ForwardRef("+(e.displayName||e.name)+")",t},unstable_batchedUpdates:function(e,t){return e(t)},StrictMode:S,Suspense:Oe,SuspenseList:Ee,lazy:function(e){var t,n,r;function o(o){if(t||(t=e()).then((function(e){n=e.default||e}),(function(e){r=e})),r)throw r;if(!n)throw t;return g(n,o)}return o.displayName="Lazy",o.__f=!0,o},__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:Ue};function Ve(){return Be.createElement("svg",{width:"15",height:"15",className:"DocSearch-Control-Key-Icon"},Be.createElement("path",{d:"M4.505 4.496h2M5.505 5.496v5M8.216 4.496l.055 5.993M10 7.5c.333.333.5.667.5 1v2M12.326 4.5v5.996M8.384 4.496c1.674 0 2.116 0 2.116 1.5s-.442 1.5-2.116 1.5M3.205 9.303c-.09.448-.277 1.21-1.241 1.203C1 10.5.5 9.513.5 8V7c0-1.57.5-2.5 1.464-2.494.964.006 1.134.598 1.24 1.342M12.553 10.5h1.953",strokeWidth:"1.2",stroke:"currentColor",fill:"none",strokeLinecap:"square"}))}function ze(){return Be.createElement("i",{className:"icon icon-search DocSearch-Search-Icon"})}var We=["translations"];function Ke(){return Ke=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var Ye="Ctrl";var Ge=Be.forwardRef((function(e,t){var n=e.translations,r=void 0===n?{}:n,o=Qe(e,We),c=r.buttonText,i=void 0===c?"Search":c,a=r.buttonAriaLabel,u=void 0===a?"Search":a,l=Je(ne(null),2),s=l[0],f=l[1];return oe((function(){"undefined"!=typeof navigator&&(/(Mac|iPhone|iPod|iPad)/i.test(navigator.platform)?f("⌘"):f(Ye))}),[]),Be.createElement("button",Ke({type:"button",className:"DocSearch DocSearch-Button","aria-label":u},o,{ref:t}),Be.createElement("span",{className:"DocSearch-Button-Container"},Be.createElement(ze,null),Be.createElement("span",{className:"DocSearch-Button-Placeholder"},i)),Be.createElement("span",{className:"DocSearch-Button-Keys"},null!==s&&Be.createElement(Be.Fragment,null,Be.createElement("kbd",{className:"DocSearch-Button-Key"},s===Ye?Be.createElement(Ve,null):s),Be.createElement("kbd",{className:"DocSearch-Button-Key"},"K"))))}));function Ze(e){return e.reduce((function(e,t){return e.concat(t)}),[])}var Xe=0;function et(e){return 0===e.collections.length?0:e.collections.reduce((function(e,t){return e+t.items.length}),0)}var tt=function(){},nt=[{segment:"autocomplete-core",version:"1.7.1"}];function rt(e,t){var n=t;return{then:function(t,r){return rt(e.then(ct(t,n,e),ct(r,n,e)),n)},catch:function(t){return rt(e.catch(ct(t,n,e)),n)},finally:function(t){return t&&n.onCancelList.push(t),rt(e.finally(ct(t&&function(){return n.onCancelList=[],t()},n,e)),n)},cancel:function(){n.isCanceled=!0;var e=n.onCancelList;n.onCancelList=[],e.forEach((function(e){e()}))},isCanceled:function(){return!0===n.isCanceled}}}function ot(e){return rt(e,{isCanceled:!1,onCancelList:[]})}function ct(e,t,n){return e?function(n){return t.isCanceled?n:e(n)}:n}function it(e,t,n,r){if(!n)return null;if(e<0&&(null===t||null!==r&&0===t))return n+e;var o=(null===t?-1:t)+e;return o<=-1||o>=n?null===r?null:0:o}function at(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function ut(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function lt(e,t){var n=[];return Promise.resolve(e(t)).then((function(e){return Promise.all(e.filter((function(e){return Boolean(e)})).map((function(e){if(e.sourceId,n.includes(e.sourceId))throw new Error("[Autocomplete] The `sourceId` ".concat(JSON.stringify(e.sourceId)," is not unique."));n.push(e.sourceId);var t=function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var Vt,zt,Wt,Kt=null,Jt=(Vt=-1,zt=-1,Wt=void 0,function(e){var t=++Vt;return Promise.resolve(e).then((function(e){return Wt&&t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var en=["props","refresh","store"],tn=["inputElement","formElement","panelElement"],nn=["inputElement"],rn=["inputElement","maxLength"],on=["item","source"];function cn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function an(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function sn(e){var t=e.props,n=e.refresh,r=e.store,o=ln(e,en);return{getEnvironmentProps:function(e){var n=e.inputElement,o=e.formElement,c=e.panelElement;function i(e){!r.getState().isOpen&&r.pendingRequests.isEmpty()||e.target===n||!1===[o,c].some((function(t){return n=t,r=e.target,n===r||n.contains(r);var n,r}))&&(r.dispatch("blur",null),t.debug||r.pendingRequests.cancelAll())}return an({onTouchStart:i,onMouseDown:i,onTouchMove:function(e){!1!==r.getState().isOpen&&n===t.environment.document.activeElement&&e.target!==n&&n.blur()}},ln(e,tn))},getRootProps:function(e){return an({role:"combobox","aria-expanded":r.getState().isOpen,"aria-haspopup":"listbox","aria-owns":r.getState().isOpen?"".concat(t.id,"-list"):void 0,"aria-labelledby":"".concat(t.id,"-label")},e)},getFormProps:function(e){e.inputElement;return an({action:"",noValidate:!0,role:"search",onSubmit:function(c){var i;c.preventDefault(),t.onSubmit(an({event:c,refresh:n,state:r.getState()},o)),r.dispatch("submit",null),null===(i=e.inputElement)||void 0===i||i.blur()},onReset:function(c){var i;c.preventDefault(),t.onReset(an({event:c,refresh:n,state:r.getState()},o)),r.dispatch("reset",null),null===(i=e.inputElement)||void 0===i||i.focus()}},ln(e,nn))},getLabelProps:function(e){return an({htmlFor:"".concat(t.id,"-input"),id:"".concat(t.id,"-label")},e)},getInputProps:function(e){var c;function i(e){(t.openOnFocus||Boolean(r.getState().query))&&$t(an({event:e,props:t,query:r.getState().completion||r.getState().query,refresh:n,store:r},o)),r.dispatch("focus",null)}var a=e||{},u=(a.inputElement,a.maxLength),l=void 0===u?512:u,s=ln(a,rn),f=st(r.getState()),p=function(e){return Boolean(e&&e.match(ft))}((null===(c=t.environment.navigator)||void 0===c?void 0:c.userAgent)||""),m=null!=f&&f.itemUrl&&!p?"go":"search";return an({"aria-autocomplete":"both","aria-activedescendant":r.getState().isOpen&&null!==r.getState().activeItemId?"".concat(t.id,"-item-").concat(r.getState().activeItemId):void 0,"aria-controls":r.getState().isOpen?"".concat(t.id,"-list"):void 0,"aria-labelledby":"".concat(t.id,"-label"),value:r.getState().completion||r.getState().query,id:"".concat(t.id,"-input"),autoComplete:"off",autoCorrect:"off",autoCapitalize:"off",enterKeyHint:m,spellCheck:"false",autoFocus:t.autoFocus,placeholder:t.placeholder,maxLength:l,type:"search",onChange:function(e){$t(an({event:e,props:t,query:e.currentTarget.value.slice(0,l),refresh:n,store:r},o))},onKeyDown:function(e){!function(e){var t=e.event,n=e.props,r=e.refresh,o=e.store,c=Xt(e,Qt);if("ArrowUp"===t.key||"ArrowDown"===t.key){var i=function(){var e=n.environment.document.getElementById("".concat(n.id,"-item-").concat(o.getState().activeItemId));e&&(e.scrollIntoViewIfNeeded?e.scrollIntoViewIfNeeded(!1):e.scrollIntoView(!1))},a=function(){var e=st(o.getState());if(null!==o.getState().activeItemId&&e){var n=e.item,i=e.itemInputValue,a=e.itemUrl,u=e.source;u.onActive(Gt({event:t,item:n,itemInputValue:i,itemUrl:a,refresh:r,source:u,state:o.getState()},c))}};t.preventDefault(),!1===o.getState().isOpen&&(n.openOnFocus||Boolean(o.getState().query))?$t(Gt({event:t,props:n,query:o.getState().query,refresh:r,store:o},c)).then((function(){o.dispatch(t.key,{nextActiveItemId:n.defaultActiveItemId}),a(),setTimeout(i,0)})):(o.dispatch(t.key,{}),a(),i())}else if("Escape"===t.key)t.preventDefault(),o.dispatch(t.key,null),o.pendingRequests.cancelAll();else if("Tab"===t.key)o.dispatch("blur",null),o.pendingRequests.cancelAll();else if("Enter"===t.key){if(null===o.getState().activeItemId||o.getState().collections.every((function(e){return 0===e.items.length})))return void(n.debug||o.pendingRequests.cancelAll());t.preventDefault();var u=st(o.getState()),l=u.item,s=u.itemInputValue,f=u.itemUrl,p=u.source;if(t.metaKey||t.ctrlKey)void 0!==f&&(p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c)),n.navigator.navigateNewTab({itemUrl:f,item:l,state:o.getState()}));else if(t.shiftKey)void 0!==f&&(p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c)),n.navigator.navigateNewWindow({itemUrl:f,item:l,state:o.getState()}));else if(t.altKey);else{if(void 0!==f)return p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c)),void n.navigator.navigate({itemUrl:f,item:l,state:o.getState()});$t(Gt({event:t,nextState:{isOpen:!1},props:n,query:s,refresh:r,store:o},c)).then((function(){p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c))}))}}}(an({event:e,props:t,refresh:n,store:r},o))},onFocus:i,onBlur:tt,onClick:function(n){e.inputElement!==t.environment.document.activeElement||r.getState().isOpen||i(n)}},s)},getPanelProps:function(e){return an({onMouseDown:function(e){e.preventDefault()},onMouseLeave:function(){r.dispatch("mouseleave",null)}},e)},getListProps:function(e){return an({role:"listbox","aria-labelledby":"".concat(t.id,"-label"),id:"".concat(t.id,"-list")},e)},getItemProps:function(e){var c=e.item,i=e.source,a=ln(e,on);return an({id:"".concat(t.id,"-item-").concat(c.__autocomplete_id),role:"option","aria-selected":r.getState().activeItemId===c.__autocomplete_id,onMouseMove:function(e){if(c.__autocomplete_id!==r.getState().activeItemId){r.dispatch("mousemove",c.__autocomplete_id);var t=st(r.getState());if(null!==r.getState().activeItemId&&t){var i=t.item,a=t.itemInputValue,u=t.itemUrl,l=t.source;l.onActive(an({event:e,item:i,itemInputValue:a,itemUrl:u,refresh:n,source:l,state:r.getState()},o))}}},onMouseDown:function(e){e.preventDefault()},onClick:function(e){var a=i.getItemInputValue({item:c,state:r.getState()}),u=i.getItemUrl({item:c,state:r.getState()});(u?Promise.resolve():$t(an({event:e,nextState:{isOpen:!1},props:t,query:a,refresh:n,store:r},o))).then((function(){i.onSelect(an({event:e,item:c,itemInputValue:a,itemUrl:u,refresh:n,source:i,state:r.getState()},o))}))}},a)}}}function fn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function pn(e){for(var t=1;t0},reshape:function(e){return e.sources}},e),{},{id:null!==(n=e.id)&&void 0!==n?n:"autocomplete-".concat(Xe++),plugins:o,initialState:wt({activeItemId:null,query:"",completion:null,collections:[],isOpen:!1,status:"idle",context:{}},e.initialState),onStateChange:function(t){var n;null===(n=e.onStateChange)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onStateChange)||void 0===n?void 0:n.call(e,t)}))},onSubmit:function(t){var n;null===(n=e.onSubmit)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onSubmit)||void 0===n?void 0:n.call(e,t)}))},onReset:function(t){var n;null===(n=e.onReset)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onReset)||void 0===n?void 0:n.call(e,t)}))},getSources:function(n){return Promise.all([].concat(Ot(o.map((function(e){return e.getSources}))),[e.getSources]).filter(Boolean).map((function(e){return lt(e,n)}))).then((function(e){return Ze(e)})).then((function(e){return e.map((function(e){return wt(wt({},e),{},{onSelect:function(n){e.onSelect(n),t.forEach((function(e){var t;return null===(t=e.onSelect)||void 0===t?void 0:t.call(e,n)}))},onActive:function(n){e.onActive(n),t.forEach((function(e){var t;return null===(t=e.onActive)||void 0===t?void 0:t.call(e,n)}))}})}))}))},navigator:wt({navigate:function(e){var t=e.itemUrl;r.location.assign(t)},navigateNewTab:function(e){var t=e.itemUrl,n=r.open(t,"_blank","noopener");null==n||n.focus()},navigateNewWindow:function(e){var t=e.itemUrl;r.open(t,"_blank","noopener")}},e.navigator)})}(e,t),r=yt(bn,n,(function(e){var t=e.prevState,r=e.state;n.onStateChange(On({prevState:t,state:r,refresh:i},o))})),o=function(e){var t=e.store;return{setActiveItemId:function(e){t.dispatch("setActiveItemId",e)},setQuery:function(e){t.dispatch("setQuery",e)},setCollections:function(e){var n=0,r=e.map((function(e){return bt(bt({},e),{},{items:Ze(e.items).map((function(e){return bt(bt({},e),{},{__autocomplete_id:n++})}))})}));t.dispatch("setCollections",r)},setIsOpen:function(e){t.dispatch("setIsOpen",e)},setStatus:function(e){t.dispatch("setStatus",e)},setContext:function(e){t.dispatch("setContext",e)}}}({store:r}),c=sn(On({props:n,refresh:i,store:r},o));function i(){return $t(On({event:new Event("input"),nextState:{isOpen:r.getState().isOpen},props:n,query:r.getState().query,refresh:i,store:r},o))}return n.plugins.forEach((function(e){var n;return null===(n=e.subscribe)||void 0===n?void 0:n.call(e,On(On({},o),{},{refresh:i,onSelect:function(e){t.push({onSelect:e})},onActive:function(e){t.push({onActive:e})}}))})),function(e){var t,n,r=e.metadata,o=e.environment;if(null===(t=o.navigator)||void 0===t||null===(n=t.userAgent)||void 0===n?void 0:n.includes("Algolia Crawler")){var c=o.document.createElement("meta"),i=o.document.querySelector("head");c.name="algolia:metadata",setTimeout((function(){c.content=JSON.stringify(r),i.appendChild(c)}),0)}}({metadata:dn({plugins:n.plugins,options:e}),environment:n.environment}),On(On({refresh:i},c),o)}function wn(e){var t=e.translations,n=(void 0===t?{}:t).searchByText,r=void 0===n?"Search by":n;return Be.createElement("a",{href:"https://www.algolia.com/ref/docsearch/?utm_source=".concat(window.location.hostname,"&utm_medium=referral&utm_content=powered_by&utm_campaign=docsearch"),target:"_blank",rel:"noopener noreferrer"},Be.createElement("span",{className:"DocSearch-Label"},r),Be.createElement("svg",{width:"77",height:"19","aria-label":"Algolia",role:"img"},Be.createElement("path",{d:"M2.5067 0h14.0245c1.384.001 2.5058 1.1205 2.5068 2.5017V16.5c-.0014 1.3808-1.1232 2.4995-2.5068 2.5H2.5067C1.1232 18.9995.0014 17.8808 0 16.5V2.4958A2.495 2.495 0 01.735.7294 2.505 2.505 0 012.5068 0zM37.95 15.0695c-3.7068.0168-3.7068-2.986-3.7068-3.4634L34.2372.3576 36.498 0v11.1794c0 .2715 0 1.9889 1.452 1.994v1.8961zm-9.1666-1.8388c.694 0 1.2086-.0397 1.5678-.1088v-2.2934a5.3639 5.3639 0 00-1.3303-.1679 4.8283 4.8283 0 00-.758.0582 2.2845 2.2845 0 00-.688.2024c-.2029.0979-.371.2362-.4919.4142-.1268.1788-.185.2826-.185.5533 0 .5297.185.8359.5205 1.0375.3355.2016.7928.3053 1.365.3053v-.0008zm-.1969-8.1817c.7463 0 1.3768.092 1.8856.2767.5088.1838.9195.4428 1.2204.7717.3068.334.5147.7777.6423 1.251.1327.4723.196.991.196 1.5603v5.798c-.5235.1036-1.05.192-1.5787.2649-.7048.1037-1.4976.156-2.3774.156-.5832 0-1.1215-.0582-1.6016-.167a3.385 3.385 0 01-1.2432-.5364 2.6034 2.6034 0 01-.8037-.9565c-.191-.3922-.29-.9447-.29-1.5208 0-.5533.11-.905.3246-1.2863a2.7351 2.7351 0 01.8849-.9329c.376-.242.8029-.415 1.2948-.5187a7.4517 7.4517 0 011.5381-.156 7.1162 7.1162 0 011.6667.2024V8.886c0-.259-.0296-.5061-.093-.7372a1.5847 1.5847 0 00-.3245-.6158 1.5079 1.5079 0 00-.6119-.4158 2.6788 2.6788 0 00-.966-.173c-.5206 0-.9948.0634-1.4283.1384a6.5481 6.5481 0 00-1.065.259l-.2712-1.849c.2831-.0986.7048-.1964 1.2491-.2943a9.2979 9.2979 0 011.752-.1501v.0008zm44.6597 8.1193c.6947 0 1.2086-.0405 1.567-.1097v-2.2942a5.3743 5.3743 0 00-1.3303-.1679c-.2485 0-.503.0177-.7573.0582a2.2853 2.2853 0 00-.688.2024 1.2333 1.2333 0 00-.4918.4142c-.1268.1788-.1843.2826-.1843.5533 0 .5297.1843.8359.5198 1.0375.3414.2066.7927.3053 1.365.3053v.0009zm-.191-8.1767c.7463 0 1.3768.0912 1.8856.2759.5087.1847.9195.4436 1.2204.7717.3.329.5147.7786.6414 1.251a5.7248 5.7248 0 01.197 1.562v5.7972c-.3466.0742-.874.1602-1.5788.2648-.7049.1038-1.4976.1552-2.3774.1552-.5832 0-1.1215-.0573-1.6016-.167a3.385 3.385 0 01-1.2432-.5356 2.6034 2.6034 0 01-.8038-.9565c-.191-.3922-.2898-.9447-.2898-1.5216 0-.5533.1098-.905.3245-1.2854a2.7373 2.7373 0 01.8849-.9338c.376-.2412.8029-.4141 1.2947-.5178a7.4545 7.4545 0 012.325-.1097c.2781.0287.5672.081.879.156v-.3686a2.7781 2.7781 0 00-.092-.738 1.5788 1.5788 0 00-.3246-.6166 1.5079 1.5079 0 00-.612-.415 2.6797 2.6797 0 00-.966-.1729c-.5205 0-.9947.0633-1.4282.1384a6.5608 6.5608 0 00-1.065.259l-.2712-1.8498c.283-.0979.7048-.1957 1.2491-.2935a9.8597 9.8597 0 011.752-.1494zm-6.79-1.072c-.7576.001-1.373-.6103-1.3759-1.3664 0-.755.6128-1.3664 1.376-1.3664.764 0 1.3775.6115 1.3775 1.3664s-.6195 1.3664-1.3776 1.3664zm1.1393 11.1507h-2.2726V5.3409l2.2734-.3568v10.0845l-.0008.0017zm-3.984 0c-3.707.0168-3.707-2.986-3.707-3.4642L59.7069.3576 61.9685 0v11.1794c0 .2715 0 1.9889 1.452 1.994V15.0703zm-7.3512-4.979c0-.975-.2138-1.7873-.6305-2.3516-.4167-.571-.9998-.852-1.747-.852-.7454 0-1.3302.281-1.7452.852-.4166.5702-.6195 1.3765-.6195 2.3516 0 .9851.208 1.6473.6254 2.2183.4158.576.9998.8587 1.7461.8587.7454 0 1.3303-.2885 1.747-.8595.4158-.5761.6237-1.2315.6237-2.2184v.0009zm2.3132-.006c0 .7609-.1099 1.3361-.3356 1.9654a4.654 4.654 0 01-.9533 1.6076A4.214 4.214 0 0155.613 14.69c-.579.2412-1.4697.3795-1.9143.3795-.4462-.005-1.3303-.1324-1.9033-.3795a4.307 4.307 0 01-1.474-1.0316c-.4115-.4445-.7293-.9801-.9609-1.6076a5.3423 5.3423 0 01-.3465-1.9653c0-.7608.104-1.493.3356-2.1155a4.683 4.683 0 01.9719-1.5958 4.3383 4.3383 0 011.479-1.0257c.5739-.242 1.2043-.3567 1.8864-.3567.6829 0 1.3125.1197 1.8906.3567a4.1245 4.1245 0 011.4816 1.0257 4.7587 4.7587 0 01.9592 1.5958c.2426.6225.3643 1.3547.3643 2.1155zm-17.0198 0c0 .9448.208 1.9932.6238 2.431.4166.4386.955.6579 1.6142.6579.3584 0 .6998-.0523 1.0176-.1502.3186-.0978.5721-.2134.775-.3517V7.0784a8.8706 8.8706 0 00-1.4926-.1906c-.8206-.0236-1.4452.312-1.8847.8468-.4335.5365-.6533 1.476-.6533 2.3516v-.0008zm6.2863 4.4485c0 1.5385-.3938 2.662-1.1866 3.3773-.791.7136-2.0005 1.0712-3.6308 1.0712-.5958 0-1.834-.1156-2.8228-.334l.3643-1.7865c.8282.173 1.9202.2193 2.4932.2193.9077 0 1.555-.1847 1.943-.5533.388-.3686.578-.916.578-1.643v-.3687a6.8289 6.8289 0 01-.8848.3349c-.3634.1096-.786.167-1.261.167-.6246 0-1.1917-.0979-1.7055-.2944a3.5554 3.5554 0 01-1.3244-.8645c-.3642-.3796-.6541-.8579-.8561-1.4289-.2028-.571-.3068-1.59-.3068-2.339 0-.7034.1099-1.5856.3245-2.1735.2198-.5871.5316-1.0949.9542-1.515.4167-.42.9255-.743 1.5213-.98a5.5923 5.5923 0 012.052-.3855c.7353 0 1.4114.092 2.0707.2024.6592.1088 1.2204.2236 1.6776.35v8.945-.0008zM11.5026 4.2418v-.6511c-.0005-.4553-.3704-.8241-.8266-.8241H8.749c-.4561 0-.826.3688-.8265.824v.669c0 .0742.0693.1264.1445.1096a6.0346 6.0346 0 011.6768-.2362 6.125 6.125 0 011.6202.2185.1116.1116 0 00.1386-.1097zm-5.2806.852l-.3296-.3282a.8266.8266 0 00-1.168 0l-.393.3922a.8199.8199 0 000 1.164l.3237.323c.0524.0515.1268.0397.1733-.0117.191-.259.3989-.507.6305-.7372.2374-.2362.48-.4437.7462-.6335.0575-.0354.0634-.1155.017-.1687zm3.5159 2.069v2.818c0 .081.0879.1392.1622.0987l2.5102-1.2964c.0574-.0287.0752-.0987.0464-.1552a3.1237 3.1237 0 00-2.603-1.574c-.0575 0-.115.0456-.115.1097l-.0008-.0009zm.0008 6.789c-2.0933.0005-3.7915-1.6912-3.7947-3.7804C5.9468 8.0821 7.6452 6.39 9.7387 6.391c2.0932-.0005 3.7911 1.6914 3.794 3.7804a3.7783 3.7783 0 01-1.1124 2.675 3.7936 3.7936 0 01-2.6824 1.1054h.0008zM9.738 4.8002c-1.9218 0-3.6975 1.0232-4.6584 2.6841a5.359 5.359 0 000 5.3683c.9609 1.661 2.7366 2.6841 4.6584 2.6841a5.3891 5.3891 0 003.8073-1.5725 5.3675 5.3675 0 001.578-3.7987 5.3574 5.3574 0 00-1.5771-3.797A5.379 5.379 0 009.7387 4.801l-.0008-.0008z",fill:"currentColor",fillRule:"evenodd"})))}function jn(e){return Be.createElement("svg",{width:"15",height:"15","aria-label":e.ariaLabel,role:"img"},Be.createElement("g",{fill:"none",stroke:"currentColor",strokeLinecap:"round",strokeLinejoin:"round",strokeWidth:"1.2"},e.children))}function Pn(e){var t=e.translations,n=void 0===t?{}:t,r=n.selectText,o=void 0===r?"to select":r,c=n.selectKeyAriaLabel,i=void 0===c?"Enter key":c,a=n.navigateText,u=void 0===a?"to navigate":a,l=n.navigateUpKeyAriaLabel,s=void 0===l?"Arrow up":l,f=n.navigateDownKeyAriaLabel,p=void 0===f?"Arrow down":f,m=n.closeText,d=void 0===m?"to close":m,h=n.closeKeyAriaLabel,v=void 0===h?"Escape key":h,y=n.searchByText,_=void 0===y?"Search by":y;return Be.createElement(Be.Fragment,null,Be.createElement("div",{className:"DocSearch-Logo"},Be.createElement(wn,{translations:{searchByText:_}})),Be.createElement("ul",{className:"DocSearch-Commands"},Be.createElement("li",null,Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:i},Be.createElement("path",{d:"M12 3.53088v3c0 1-1 2-2 2H4M7 11.53088l-3-3 3-3"}))),Be.createElement("span",{className:"DocSearch-Label"},o)),Be.createElement("li",null,Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:p},Be.createElement("path",{d:"M7.5 3.5v8M10.5 8.5l-3 3-3-3"}))),Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:s},Be.createElement("path",{d:"M7.5 11.5v-8M10.5 6.5l-3-3-3 3"}))),Be.createElement("span",{className:"DocSearch-Label"},u)),Be.createElement("li",null,Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:v},Be.createElement("path",{d:"M13.6167 8.936c-.1065.3583-.6883.962-1.4875.962-.7993 0-1.653-.9165-1.653-2.1258v-.5678c0-1.2548.7896-2.1016 1.653-2.1016.8634 0 1.3601.4778 1.4875 1.0724M9 6c-.1352-.4735-.7506-.9219-1.46-.8972-.7092.0246-1.344.57-1.344 1.2166s.4198.8812 1.3445.9805C8.465 7.3992 8.968 7.9337 9 8.5c.032.5663-.454 1.398-1.4595 1.398C6.6593 9.898 6 9 5.963 8.4851m-1.4748.5368c-.2635.5941-.8099.876-1.5443.876s-1.7073-.6248-1.7073-2.204v-.4603c0-1.0416.721-2.131 1.7073-2.131.9864 0 1.6425 1.031 1.5443 2.2492h-2.956"}))),Be.createElement("span",{className:"DocSearch-Label"},d))))}function In(e){var t=e.hit,n=e.children;return Be.createElement("a",{href:t.url},n)}function kn(){return Be.createElement("svg",{viewBox:"0 0 38 38",stroke:"currentColor",strokeOpacity:".5"},Be.createElement("g",{fill:"none",fillRule:"evenodd"},Be.createElement("g",{transform:"translate(1 1)",strokeWidth:"2"},Be.createElement("circle",{strokeOpacity:".3",cx:"18",cy:"18",r:"18"}),Be.createElement("path",{d:"M36 18c0-9.94-8.06-18-18-18"},Be.createElement("animateTransform",{attributeName:"transform",type:"rotate",from:"0 18 18",to:"360 18 18",dur:"1s",repeatCount:"indefinite"})))))}function Dn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("g",{stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M3.18 6.6a8.23 8.23 0 1112.93 9.94h0a8.23 8.23 0 01-11.63 0"}),Be.createElement("path",{d:"M6.44 7.25H2.55V3.36M10.45 6v5.6M10.45 11.6L13 13"})))}function Cn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M10 10l5.09-5.09L10 10l5.09 5.09L10 10zm0 0L4.91 4.91 10 10l-5.09 5.09L10 10z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"}))}function An(){return Be.createElement("svg",{className:"DocSearch-Hit-Select-Icon",width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("g",{stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M18 3v4c0 2-2 4-4 4H2"}),Be.createElement("path",{d:"M8 17l-6-6 6-6"})))}var xn=function(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M17 6v12c0 .52-.2 1-1 1H4c-.7 0-1-.33-1-1V2c0-.55.42-1 1-1h8l5 5zM14 8h-3.13c-.51 0-.87-.34-.87-.87V4",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinejoin:"round"}))};function Nn(e){switch(e.type){case"lvl1":return Be.createElement(xn,null);case"content":return Be.createElement(Tn,null);default:return Be.createElement(Rn,null)}}function Rn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M13 13h4-4V8H7v5h6v4-4H7V8H3h4V3v5h6V3v5h4-4v5zm-6 0v4-4H3h4z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"}))}function Tn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M17 5H3h14zm0 5H3h14zm0 5H3h14z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinejoin:"round"}))}function Ln(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M10 14.2L5 17l1-5.6-4-4 5.5-.7 2.5-5 2.5 5 5.6.8-4 4 .9 5.5z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinejoin:"round"}))}function qn(){return Be.createElement("svg",{width:"40",height:"40",viewBox:"0 0 20 20",fill:"none",fillRule:"evenodd",stroke:"currentColor",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M19 4.8a16 16 0 00-2-1.2m-3.3-1.2A16 16 0 001.1 4.7M16.7 8a12 12 0 00-2.8-1.4M10 6a12 12 0 00-6.7 2M12.3 14.7a4 4 0 00-4.5 0M14.5 11.4A8 8 0 0010 10M3 16L18 2M10 18h0"}))}function Mn(){return Be.createElement("svg",{width:"40",height:"40",viewBox:"0 0 20 20",fill:"none",fillRule:"evenodd",stroke:"currentColor",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M15.5 4.8c2 3 1.7 7-1 9.7h0l4.3 4.3-4.3-4.3a7.8 7.8 0 01-9.8 1m-2.2-2.2A7.8 7.8 0 0113.2 2.4M2 18L18 2"}))}function Hn(e){var t=e.translations,n=void 0===t?{}:t,r=n.titleText,o=void 0===r?"Unable to fetch results":r,c=n.helpText,i=void 0===c?"You might want to check your network connection.":c;return Be.createElement("div",{className:"DocSearch-ErrorScreen"},Be.createElement("div",{className:"DocSearch-Screen-Icon"},Be.createElement(qn,null)),Be.createElement("p",{className:"DocSearch-Title"},o),Be.createElement("p",{className:"DocSearch-Help"},i))}var Un=["translations"];function Fn(e){return function(e){if(Array.isArray(e))return Bn(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"==typeof e)return Bn(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return Bn(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function Bn(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function zn(e){var t=e.translations,n=void 0===t?{}:t,r=Vn(e,Un),o=n.noResultsText,c=void 0===o?"No results for":o,i=n.suggestedQueryText,a=void 0===i?"Try searching for":i,u=n.reportMissingResultsText,l=void 0===u?"Believe this query should return results?":u,s=n.reportMissingResultsLinkText,f=void 0===s?"Let us know.":s,p=r.state.context.searchSuggestions;return Be.createElement("div",{className:"DocSearch-NoResults"},Be.createElement("div",{className:"DocSearch-Screen-Icon"},Be.createElement(Mn,null)),Be.createElement("p",{className:"DocSearch-Title"},c,' "',Be.createElement("strong",null,r.state.query),'"'),p&&p.length>0&&Be.createElement("div",{className:"DocSearch-NoResults-Prefill-List"},Be.createElement("p",{className:"DocSearch-Help"},a,":"),Be.createElement("ul",null,p.slice(0,3).reduce((function(e,t){return[].concat(Fn(e),[Be.createElement("li",{key:t},Be.createElement("button",{className:"DocSearch-Prefill",key:t,type:"button",onClick:function(){r.setQuery(t.toLowerCase()+" "),r.refresh(),r.inputRef.current.focus()}},t))])}),[]))),r.getMissingResultsUrl&&Be.createElement("p",{className:"DocSearch-Help"},"".concat(l," "),Be.createElement("a",{href:r.getMissingResultsUrl({query:r.state.query}),target:"_blank",rel:"noopener noreferrer"},f)))}var Wn=["hit","attribute","tagName"];function Kn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Jn(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function Yn(e,t){return t.split(".").reduce((function(e,t){return null!=e&&e[t]?e[t]:null}),e)}function Gn(e){var t=e.hit,n=e.attribute,r=e.tagName;return g(void 0===r?"span":r,Jn(Jn({},Qn(e,Wn)),{},{dangerouslySetInnerHTML:{__html:Yn(t,"_snippetResult.".concat(n,".value"))||Yn(t,n)}}))}function Zn(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=null==e?null:"undefined"!=typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null==n)return;var r,o,c=[],i=!0,a=!1;try{for(n=n.call(e);!(i=(r=n.next()).done)&&(c.push(r.value),!t||c.length!==t);i=!0);}catch(e){a=!0,o=e}finally{try{i||null==n.return||n.return()}finally{if(a)throw o}}return c}(e,t)||function(e,t){if(!e)return;if("string"==typeof e)return Xn(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return Xn(e,t)}(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function Xn(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n|<\/mark>)/g,ar=RegExp(ir.source);function ur(e){var t,n,r,o,c,i=e;if(!i.__docsearch_parent&&!e._highlightResult)return e.hierarchy.lvl0;var a=((i.__docsearch_parent?null===(t=i.__docsearch_parent)||void 0===t||null===(n=t._highlightResult)||void 0===n||null===(r=n.hierarchy)||void 0===r?void 0:r.lvl0:null===(o=e._highlightResult)||void 0===o||null===(c=o.hierarchy)||void 0===c?void 0:c.lvl0)||{}).value;return a&&ar.test(a)?a.replace(ir,""):a}function lr(){return lr=Object.assign||function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function dr(e){var t=e.translations,n=void 0===t?{}:t,r=mr(e,fr),o=n.recentSearchesTitle,c=void 0===o?"Recent":o,i=n.noRecentSearchesText,a=void 0===i?"No recent searches":i,u=n.saveRecentSearchButtonTitle,l=void 0===u?"Save this search":u,s=n.removeRecentSearchButtonTitle,f=void 0===s?"Remove this search from history":s,p=n.favoriteSearchesTitle,m=void 0===p?"Favorite":p,d=n.removeFavoriteSearchButtonTitle,h=void 0===d?"Remove this search from favorites":d;return"idle"===r.state.status&&!1===r.hasCollections?r.disableUserPersonalization?null:Be.createElement("div",{className:"DocSearch-StartScreen"},Be.createElement("p",{className:"DocSearch-Help"},a)):!1===r.hasCollections?null:Be.createElement("div",{className:"DocSearch-Dropdown-Container"},Be.createElement(tr,pr({},r,{title:c,collection:r.state.collections[0],renderIcon:function(){return Be.createElement("div",{className:"DocSearch-Hit-icon"},Be.createElement(Dn,null))},renderAction:function(e){var t=e.item,n=e.runFavoriteTransition,o=e.runDeleteTransition;return Be.createElement(Be.Fragment,null,Be.createElement("div",{className:"DocSearch-Hit-action"},Be.createElement("button",{className:"DocSearch-Hit-action-button",title:l,type:"submit",onClick:function(e){e.preventDefault(),e.stopPropagation(),n((function(){r.favoriteSearches.add(t),r.recentSearches.remove(t),r.refresh()}))}},Be.createElement(Ln,null))),Be.createElement("div",{className:"DocSearch-Hit-action"},Be.createElement("button",{className:"DocSearch-Hit-action-button",title:f,type:"submit",onClick:function(e){e.preventDefault(),e.stopPropagation(),o((function(){r.recentSearches.remove(t),r.refresh()}))}},Be.createElement(Cn,null))))}})),Be.createElement(tr,pr({},r,{title:m,collection:r.state.collections[1],renderIcon:function(){return Be.createElement("div",{className:"DocSearch-Hit-icon"},Be.createElement(Ln,null))},renderAction:function(e){var t=e.item,n=e.runDeleteTransition;return Be.createElement("div",{className:"DocSearch-Hit-action"},Be.createElement("button",{className:"DocSearch-Hit-action-button",title:h,type:"submit",onClick:function(e){e.preventDefault(),e.stopPropagation(),n((function(){r.favoriteSearches.remove(t),r.refresh()}))}},Be.createElement(Cn,null)))}})))}var hr=["translations"];function vr(){return vr=Object.assign||function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var _r=Be.memo((function(e){var t=e.translations,n=void 0===t?{}:t,r=yr(e,hr);if("error"===r.state.status)return Be.createElement(Hn,{translations:null==n?void 0:n.errorScreen});var o=r.state.collections.some((function(e){return e.items.length>0}));return r.state.query?!1===o?Be.createElement(zn,vr({},r,{translations:null==n?void 0:n.noResultsScreen})):Be.createElement(sr,r):Be.createElement(dr,vr({},r,{hasCollections:o,translations:null==n?void 0:n.startScreen}))}),(function(e,t){return"loading"===t.state.status||"stalled"===t.state.status})),br=["translations"];function gr(){return gr=Object.assign||function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function Sr(e){var t=e.translations,n=void 0===t?{}:t,r=Or(e,br),o=n.resetButtonTitle,c=void 0===o?"Clear the query":o,i=n.resetButtonAriaLabel,a=void 0===i?"Clear the query":i,u=n.cancelButtonText,l=void 0===u?"Cancel":u,s=n.cancelButtonAriaLabel,f=void 0===s?"Cancel":s,p=r.getFormProps({inputElement:r.inputRef.current}).onReset;return Be.useEffect((function(){r.autoFocus&&r.inputRef.current&&r.inputRef.current.focus()}),[r.autoFocus,r.inputRef]),Be.useEffect((function(){r.isFromSelection&&r.inputRef.current&&r.inputRef.current.select()}),[r.isFromSelection,r.inputRef]),Be.createElement(Be.Fragment,null,Be.createElement("form",{className:"DocSearch-Form",onSubmit:function(e){e.preventDefault()},onReset:p},Be.createElement("label",gr({className:"DocSearch-MagnifierLabel"},r.getLabelProps()),Be.createElement(ze,null)),Be.createElement("div",{className:"DocSearch-LoadingIndicator"},Be.createElement(kn,null)),Be.createElement("input",gr({className:"DocSearch-Input",ref:r.inputRef},r.getInputProps({inputElement:r.inputRef.current,autoFocus:r.autoFocus,maxLength:64}))),Be.createElement("button",{type:"reset",title:c,className:"DocSearch-Reset","aria-label":a,hidden:!r.state.query},Be.createElement(Cn,null))),Be.createElement("button",{className:"DocSearch-Cancel",type:"reset","aria-label":f,onClick:r.onClose},l))}var Er=["_highlightResult","_snippetResult"];function wr(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},c=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function jr(e){return!1===function(){var e="__TEST_KEY__";try{return localStorage.setItem(e,""),localStorage.removeItem(e),!0}catch(e){return!1}}()?{setItem:function(){},getItem:function(){return[]}}:{setItem:function(t){return window.localStorage.setItem(e,JSON.stringify(t))},getItem:function(){var t=window.localStorage.getItem(e);return t?JSON.parse(t):[]}}}function Pr(e){var t=e.key,n=e.limit,r=void 0===n?5:n,o=jr(t),c=o.getItem().slice(0,r);return{add:function(e){var t=e,n=(t._highlightResult,t._snippetResult,wr(t,Er)),i=c.findIndex((function(e){return e.objectID===n.objectID}));i>-1&&c.splice(i,1),c.unshift(n),c=c.slice(0,r),o.setItem(c)},remove:function(e){c=c.filter((function(t){return t.objectID!==e.objectID})),o.setItem(c)},getAll:function(){return c}}}var Ir=["facetName","facetQuery"];function kr(e){var t,n="algoliasearch-client-js-".concat(e.key),r=function(){return void 0===t&&(t=e.localStorage||window.localStorage),t},o=function(){return JSON.parse(r().getItem(n)||"{}")};return{get:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}};return Promise.resolve().then((function(){var n=JSON.stringify(e),r=o()[n];return Promise.all([r||t(),void 0!==r])})).then((function(e){var t=i(e,2),r=t[0],o=t[1];return Promise.all([r,o||n.miss(r)])})).then((function(e){return i(e,1)[0]}))},set:function(e,t){return Promise.resolve().then((function(){var c=o();return c[JSON.stringify(e)]=t,r().setItem(n,JSON.stringify(c)),t}))},delete:function(e){return Promise.resolve().then((function(){var t=o();delete t[JSON.stringify(e)],r().setItem(n,JSON.stringify(t))}))},clear:function(){return Promise.resolve().then((function(){r().removeItem(n)}))}}}function Dr(e){var t=a(e.caches),n=t.shift();return void 0===n?{get:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}};return t().then((function(e){return Promise.all([e,n.miss(e)])})).then((function(e){return i(e,1)[0]}))},set:function(e,t){return Promise.resolve(t)},delete:function(e){return Promise.resolve()},clear:function(){return Promise.resolve()}}:{get:function(e,r){var o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}};return n.get(e,r,o).catch((function(){return Dr({caches:t}).get(e,r,o)}))},set:function(e,r){return n.set(e,r).catch((function(){return Dr({caches:t}).set(e,r)}))},delete:function(e){return n.delete(e).catch((function(){return Dr({caches:t}).delete(e)}))},clear:function(){return n.clear().catch((function(){return Dr({caches:t}).clear()}))}}}function Cr(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{serializable:!0},t={};return{get:function(n,r){var o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}},c=JSON.stringify(n);if(c in t)return Promise.resolve(e.serializable?JSON.parse(t[c]):t[c]);var i=r(),a=o&&o.miss||function(){return Promise.resolve()};return i.then((function(e){return a(e)})).then((function(){return i}))},set:function(n,r){return t[JSON.stringify(n)]=e.serializable?JSON.stringify(r):r,Promise.resolve(r)},delete:function(e){return delete t[JSON.stringify(e)],Promise.resolve()},clear:function(){return t={},Promise.resolve()}}}function Ar(e){for(var t=e.length-1;t>0;t--){var n=Math.floor(Math.random()*(t+1)),r=e[t];e[t]=e[n],e[n]=r}return e}function xr(e,t){return t?(Object.keys(t).forEach((function(n){e[n]=t[n](e)})),e):e}function Nr(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r0?r:void 0,timeout:n.timeout||t,headers:n.headers||{},queryParameters:n.queryParameters||{},cacheable:n.cacheable}}var qr={Read:1,Write:2,Any:3},Mr=1,Hr=2,Ur=3,Fr=12e4;function Br(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:Mr;return t(t({},e),{},{status:n,lastUpdate:Date.now()})}function Vr(e){return"string"==typeof e?{protocol:"https",url:e,accept:qr.Any}:{protocol:e.protocol||"https",url:e.url,accept:e.accept||qr.Any}}var zr="GET",Wr="POST";function Kr(e,t){return Promise.all(t.map((function(t){return e.get(t,(function(){return Promise.resolve(Br(t))}))}))).then((function(e){var n=e.filter((function(e){return function(e){return e.status===Mr||Date.now()-e.lastUpdate>Fr}(e)})),r=e.filter((function(e){return function(e){return e.status===Ur&&Date.now()-e.lastUpdate<=Fr}(e)})),o=[].concat(a(n),a(r));return{getTimeout:function(e,t){return(0===r.length&&0===e?1:r.length+3+e)*t},statelessHosts:o.length>0?o.map((function(e){return Vr(e)})):t}}))}function Jr(e,n,r,o){var c=[],i=function(e,n){if(e.method===zr||void 0===e.data&&void 0===n.data)return;var r=Array.isArray(e.data)?e.data:t(t({},e.data),n.data);return JSON.stringify(r)}(r,o),u=function(e,n){var r=t(t({},e.headers),n.headers),o={};return Object.keys(r).forEach((function(e){var t=r[e];o[e.toLowerCase()]=t})),o}(e,o),l=r.method,s=r.method!==zr?{}:t(t({},r.data),o.data),f=t(t(t({"x-algolia-agent":e.userAgent.value},e.queryParameters),s),o.queryParameters),p=0,m=function t(n,a){var s=n.pop();if(void 0===s)throw{name:"RetryError",message:"Unreachable hosts - your application id may be incorrect. If the error persists, contact support@algolia.com.",transporterStackTrace:Gr(c)};var m={data:i,headers:u,method:l,url:Qr(s,r.path,f),connectTimeout:a(p,e.timeouts.connect),responseTimeout:a(p,o.timeout)},d=function(e){var t={request:m,response:e,host:s,triesLeft:n.length};return c.push(t),t},h={onSucess:function(e){return function(e){try{return JSON.parse(e.content)}catch(t){throw function(e,t){return{name:"DeserializationError",message:e,response:t}}(t.message,e)}}(e)},onRetry:function(r){var o=d(r);return r.isTimedOut&&p++,Promise.all([e.logger.info("Retryable failure",Zr(o)),e.hostsCache.set(s,Br(s,r.isTimedOut?Ur:Hr))]).then((function(){return t(n,a)}))},onFail:function(e){throw d(e),function(e,t){var n=e.content,r=e.status,o=n;try{o=JSON.parse(n).message}catch(e){}return function(e,t,n){return{name:"ApiError",message:e,status:t,transporterStackTrace:n}}(o,r,t)}(e,Gr(c))}};return e.requester.send(m).then((function(e){return function(e,t){return function(e){var t=e.status;return e.isTimedOut||function(e){var t=e.isTimedOut,n=e.status;return!t&&0==~~n}(e)||2!=~~(t/100)&&4!=~~(t/100)}(e)?t.onRetry(e):2==~~(e.status/100)?t.onSucess(e):t.onFail(e)}(e,h)}))};return Kr(e.hostsCache,n).then((function(e){return m(a(e.statelessHosts).reverse(),e.getTimeout)}))}function $r(e){var t={value:"Algolia for JavaScript (".concat(e,")"),add:function(e){var n="; ".concat(e.segment).concat(void 0!==e.version?" (".concat(e.version,")"):"");return-1===t.value.indexOf(n)&&(t.value="".concat(t.value).concat(n)),t}};return t}function Qr(e,t,n){var r=Yr(n),o="".concat(e.protocol,"://").concat(e.url,"/").concat("/"===t.charAt(0)?t.substr(1):t);return r.length&&(o+="?".concat(r)),o}function Yr(e){return Object.keys(e).map((function(t){return Nr("%s=%s",t,(n=e[t],"[object Object]"===Object.prototype.toString.call(n)||"[object Array]"===Object.prototype.toString.call(n)?JSON.stringify(e[t]):e[t]));var n})).join("&")}function Gr(e){return e.map((function(e){return Zr(e)}))}function Zr(e){var n=e.request.headers["x-algolia-api-key"]?{"x-algolia-api-key":"*****"}:{};return t(t({},e),{},{request:t(t({},e.request),{},{headers:t(t({},e.request.headers),n)})})}var Xr=function(e){var n=e.appId,r=function(e,t,n){var r={"x-algolia-api-key":n,"x-algolia-application-id":t};return{headers:function(){return e===Tr.WithinHeaders?r:{}},queryParameters:function(){return e===Tr.WithinQueryParameters?r:{}}}}(void 0!==e.authMode?e.authMode:Tr.WithinHeaders,n,e.apiKey),o=function(e){var t=e.hostsCache,n=e.logger,r=e.requester,o=e.requestsCache,c=e.responsesCache,a=e.timeouts,u=e.userAgent,l=e.hosts,s=e.queryParameters,f={hostsCache:t,logger:n,requester:r,requestsCache:o,responsesCache:c,timeouts:a,userAgent:u,headers:e.headers,queryParameters:s,hosts:l.map((function(e){return Vr(e)})),read:function(e,t){var n=Lr(t,f.timeouts.read),r=function(){return Jr(f,f.hosts.filter((function(e){return 0!=(e.accept&qr.Read)})),e,n)};if(!0!==(void 0!==n.cacheable?n.cacheable:e.cacheable))return r();var o={request:e,mappedRequestOptions:n,transporter:{queryParameters:f.queryParameters,headers:f.headers}};return f.responsesCache.get(o,(function(){return f.requestsCache.get(o,(function(){return f.requestsCache.set(o,r()).then((function(e){return Promise.all([f.requestsCache.delete(o),e])}),(function(e){return Promise.all([f.requestsCache.delete(o),Promise.reject(e)])})).then((function(e){var t=i(e,2);return t[0],t[1]}))}))}),{miss:function(e){return f.responsesCache.set(o,e)}})},write:function(e,t){return Jr(f,f.hosts.filter((function(e){return 0!=(e.accept&qr.Write)})),e,Lr(t,f.timeouts.write))}};return f}(t(t({hosts:[{url:"".concat(n,"-dsn.algolia.net"),accept:qr.Read},{url:"".concat(n,".algolia.net"),accept:qr.Write}].concat(Ar([{url:"".concat(n,"-1.algolianet.com")},{url:"".concat(n,"-2.algolianet.com")},{url:"".concat(n,"-3.algolianet.com")}]))},e),{},{headers:t(t(t({},r.headers()),{"content-type":"application/x-www-form-urlencoded"}),e.headers),queryParameters:t(t({},r.queryParameters()),e.queryParameters)})),c={transporter:o,appId:n,addAlgoliaAgent:function(e,t){o.userAgent.add({segment:e,version:t})},clearCache:function(){return Promise.all([o.requestsCache.clear(),o.responsesCache.clear()]).then((function(){}))}};return xr(c,e.methods)},eo=function(e){return function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r={transporter:e.transporter,appId:e.appId,indexName:t};return xr(r,n.methods)}},to=function(e){return function(n,r){var o=n.map((function(e){return t(t({},e),{},{params:Yr(e.params||{})})}));return e.transporter.read({method:Wr,path:"1/indexes/*/queries",data:{requests:o},cacheable:!0},r)}},no=function(e){return function(n,r){return Promise.all(n.map((function(n){var o=n.params,i=o.facetName,a=o.facetQuery,u=c(o,Ir);return eo(e)(n.indexName,{methods:{searchForFacetValues:co}}).searchForFacetValues(i,a,t(t({},r),u))})))}},ro=function(e){return function(t,n,r){return e.transporter.read({method:Wr,path:Nr("1/answers/%s/prediction",e.indexName),data:{query:t,queryLanguages:n},cacheable:!0},r)}},oo=function(e){return function(t,n){return e.transporter.read({method:Wr,path:Nr("1/indexes/%s/query",e.indexName),data:{query:t},cacheable:!0},n)}},co=function(e){return function(t,n,r){return e.transporter.read({method:Wr,path:Nr("1/indexes/%s/facets/%s/query",e.indexName,t),data:{facetQuery:n},cacheable:!0},r)}},io=1,ao=2,uo=3;function lo(e,n,r){var o,c={appId:e,apiKey:n,timeouts:{connect:1,read:2,write:30},requester:{send:function(e){return new Promise((function(t){var n=new XMLHttpRequest;n.open(e.method,e.url,!0),Object.keys(e.headers).forEach((function(t){return n.setRequestHeader(t,e.headers[t])}));var r,o=function(e,r){return setTimeout((function(){n.abort(),t({status:0,content:r,isTimedOut:!0})}),1e3*e)},c=o(e.connectTimeout,"Connection timeout");n.onreadystatechange=function(){n.readyState>n.OPENED&&void 0===r&&(clearTimeout(c),r=o(e.responseTimeout,"Socket timeout"))},n.onerror=function(){0===n.status&&(clearTimeout(c),clearTimeout(r),t({content:n.responseText||"Network request failed",status:n.status,isTimedOut:!1}))},n.onload=function(){clearTimeout(c),clearTimeout(r),t({content:n.responseText,status:n.status,isTimedOut:!1})},n.send(e.data)}))}},logger:(o=uo,{debug:function(e,t){return io>=o&&console.debug(e,t),Promise.resolve()},info:function(e,t){return ao>=o&&console.info(e,t),Promise.resolve()},error:function(e,t){return console.error(e,t),Promise.resolve()}}),responsesCache:Cr(),requestsCache:Cr({serializable:!1}),hostsCache:Dr({caches:[kr({key:"".concat(Rr,"-").concat(e)}),Cr()]}),userAgent:$r(Rr).add({segment:"Browser",version:"lite"}),authMode:Tr.WithinQueryParameters};return Xr(t(t(t({},c),r),{},{methods:{search:to,searchForFacetValues:no,multipleQueries:to,multipleSearchForFacetValues:no,initIndex:function(e){return function(t){return eo(e)(t,{methods:{search:oo,searchForFacetValues:co,findAnswers:ro}})}}}}))}lo.version=Rr;var so="3.2.0";var fo=["footer","searchBox"];function po(){return po=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function go(e){var t=e.appId,n=e.apiKey,r=e.indexName,o=e.placeholder,c=void 0===o?"Search docs":o,i=e.searchParameters,a=e.onClose,u=void 0===a?cr:a,l=e.transformItems,s=void 0===l?or:l,f=e.hitComponent,p=void 0===f?In:f,m=e.resultsFooterComponent,d=void 0===m?function(){return null}:m,h=e.navigator,v=e.initialScrollY,y=void 0===v?0:v,_=e.transformSearchClient,b=void 0===_?or:_,g=e.disableUserPersonalization,O=void 0!==g&&g,S=e.initialQuery,E=void 0===S?"":S,w=e.translations,j=void 0===w?{}:w,P=e.getMissingResultsUrl,I=j.footer,k=j.searchBox,D=bo(j,fo),C=yo(Be.useState({query:"",collections:[],completion:null,context:{},isOpen:!1,activeItemId:null,status:"idle"}),2),A=C[0],x=C[1],N=Be.useRef(null),R=Be.useRef(null),T=Be.useRef(null),L=Be.useRef(null),q=Be.useRef(null),M=Be.useRef(10),H=Be.useRef("undefined"!=typeof window?window.getSelection().toString().slice(0,64):"").current,U=Be.useRef(E||H).current,F=function(e,t,n){return Be.useMemo((function(){var r=lo(e,t);return r.addAlgoliaAgent("docsearch",so),!1===/docsearch.js \(.*\)/.test(r.transporter.userAgent.value)&&r.addAlgoliaAgent("docsearch-react",so),n(r)}),[e,t,n])}(t,n,b),B=Be.useRef(Pr({key:"__DOCSEARCH_FAVORITE_SEARCHES__".concat(r),limit:10})).current,V=Be.useRef(Pr({key:"__DOCSEARCH_RECENT_SEARCHES__".concat(r),limit:0===B.getAll().length?7:4})).current,z=Be.useCallback((function(e){if(!O){var t="content"===e.type?e.__docsearch_parent:e;t&&-1===B.getAll().findIndex((function(e){return e.objectID===t.objectID}))&&V.add(t)}}),[B,V,O]),W=Be.useMemo((function(){return En({id:"docsearch",defaultActiveItemId:0,placeholder:c,openOnFocus:!0,initialState:{query:U,context:{searchSuggestions:[]}},navigator:h,onStateChange:function(e){x(e.state)},getSources:function(e){var t=e.query,n=e.state,o=e.setContext,c=e.setStatus;return t?F.search([{query:t,indexName:r,params:ho({attributesToRetrieve:["hierarchy.lvl0","hierarchy.lvl1","hierarchy.lvl2","hierarchy.lvl3","hierarchy.lvl4","hierarchy.lvl5","hierarchy.lvl6","content","type","url"],attributesToSnippet:["hierarchy.lvl1:".concat(M.current),"hierarchy.lvl2:".concat(M.current),"hierarchy.lvl3:".concat(M.current),"hierarchy.lvl4:".concat(M.current),"hierarchy.lvl5:".concat(M.current),"hierarchy.lvl6:".concat(M.current),"content:".concat(M.current)],snippetEllipsisText:"…",highlightPreTag:"",highlightPostTag:"",hitsPerPage:20},i)}]).catch((function(e){throw"RetryError"===e.name&&c("error"),e})).then((function(e){var t=e.results[0],r=t.hits,c=t.nbHits,i=rr(r,(function(e){return ur(e)}));return n.context.searchSuggestions.length0&&($(),q.current&&q.current.focus())}),[U,$]),Be.useEffect((function(){function e(){if(R.current){var e=.01*window.innerHeight;R.current.style.setProperty("--docsearch-vh","".concat(e,"px"))}}return e(),window.addEventListener("resize",e),function(){window.removeEventListener("resize",e)}}),[]),Be.createElement("div",po({ref:N},J({"aria-expanded":!0}),{className:["DocSearch","DocSearch-Container","stalled"===A.status&&"DocSearch-Container--Stalled","error"===A.status&&"DocSearch-Container--Errored"].filter(Boolean).join(" "),role:"button",tabIndex:0,onMouseDown:function(e){e.target===e.currentTarget&&u()}}),Be.createElement("div",{className:"DocSearch-Modal",ref:R},Be.createElement("header",{className:"DocSearch-SearchBar",ref:T},Be.createElement(Sr,po({},W,{state:A,autoFocus:0===U.length,inputRef:q,isFromSelection:Boolean(U)&&U===H,translations:k,onClose:u}))),Be.createElement("div",{className:"DocSearch-Dropdown",ref:L},Be.createElement(_r,po({},W,{indexName:r,state:A,hitComponent:p,resultsFooterComponent:d,disableUserPersonalization:O,recentSearches:V,favoriteSearches:B,inputRef:q,translations:D,getMissingResultsUrl:P,onItemClick:function(e){z(e),u()}}))),Be.createElement("footer",{className:"DocSearch-Footer"},Be.createElement(Pn,{translations:I}))))}function Oo(){return Oo=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n1&&void 0!==arguments[1]?arguments[1]:window;return"string"==typeof e?t.document.querySelector(e):e}(e.container,e.environment))}})); -//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/docs/manifest.json b/docs/manifest.json deleted file mode 100644 index 67381e1..0000000 --- a/docs/manifest.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "short_name": "Docura", - "name": "Docura", - "description": "A modular Hugo theme to build your next documentation site.", - "start_url": "/?source=pwa", - "display": "standalone", - "icons": [ - { - "src": "/img/icon/icon-192.png", - "type": "image/png", - "sizes": "192x192" - }, - { - "src": "/img/icon/icon-512.png", - "type": "image/png", - "sizes": "512x512" - }, - { - "src": "/img/icon/maskable-icon-192.png", - "type": "image/png", - "sizes": "192x192", - "purpose": "maskable" - }, - { - "src": "/img/icon/maskable-icon-512.png", - "type": "image/png", - "sizes": "512x512", - "purpose": "maskable" - }, - { - "src": "/img/icon/icon-vector.svg", - "type": "image/svg+xml", - "sizes": "512x512" - } - ], - "background_color": "#ffffff", - "theme_color": "#ffffff" -} \ No newline at end of file diff --git a/docs/muagent/agent-flow-zh/index.html b/docs/muagent/agent-flow-zh/index.html deleted file mode 100644 index 50e6ac0..0000000 --- a/docs/muagent/agent-flow-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - - - - - - diff --git a/docs/muagent/agent-flow/index.html b/docs/muagent/agent-flow/index.html deleted file mode 100644 index 103d4db..0000000 --- a/docs/muagent/agent-flow/index.html +++ /dev/null @@ -1,420 +0,0 @@ - - - - - - - - -Agent Flow · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Agent Flow

    -
    -
    - - -

    Introduction to Core Connectors

    -

    To facilitate everyone’s understanding of the entire muagent link, we adopt the Flow format to introduce in detail how to build through configuration

    -
    - 图片 -
    -


    Below, we first introduce the related core components

    -

    Agent

    -

    On the design level of the Agent, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios:

    -
      -
    1. BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input => output according to the Prompt format.
    2. -
    3. ReactAgent: Provides standard React functionality, accomplishing current tasks based on questions.
    4. -
    5. ExecutorAgent: Performs sequential execution of task lists, completing related tasks according to plans arranged by the User or the previous Agent.
    6. -
    7. SelectorAgent: Provides the function of selecting an Agent, choosing the appropriate Agent to respond according to the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which will later be managed by the Memory Manager.
    8. -
    -

    It selects the appropriate Agent to respond based on the question from the User or the previous Agent. After output, the message is pushed into the memory pool, which is subsequently managed by the Memory Manager.

    -

    Chain

    -

    Basic Chain: BaseChain, connects the interactions of agents, manages the related messages and memory.

    -

    Phase

    -

    Basic Phase: BasePhase, connects the interactions of chains, and manages the related messages and memory.

    -

    Prompt Manager

    -

    The prompt creation for each agent in the Mutli-Agent link:

    -
      -
    • By setting simple prompt_input_keys and prompt_output_keys, the preset Prompt Context creation logic can be followed to quickly configure the agent prompt.
    • -
    • It is also possible to design a new key-context in the prompt manager module, achieving personalized Agent Prompt.
    • -
    -

    Memory Manager

    -

    Mainly used for the management of chat history:

    -
      -
    • Manages the reading and writing of chat history in a database, including user input, llm output, doc retrieval, code retrieval, search retrieval.
    • -
    • Summarizes the key information in chat history to create a summary context, which serves as a prompt context.
    • -
    • Provides a retrieval function to search for information related to the question in the chat history or summary context, assisting with question answering.
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/muagent/agent-\347\274\226\346\216\222/index.html" "b/docs/muagent/agent-\347\274\226\346\216\222/index.html" deleted file mode 100644 index 50e6ac0..0000000 --- "a/docs/muagent/agent-\347\274\226\346\216\222/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - - - - - - diff --git a/docs/muagent/connector-agent-zh/index.html b/docs/muagent/connector-agent-zh/index.html deleted file mode 100644 index ef00fac..0000000 --- a/docs/muagent/connector-agent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/connector-agent-zh/ - - - - - - diff --git a/docs/muagent/connector-agent/index.html b/docs/muagent/connector-agent/index.html deleted file mode 100644 index ad31838..0000000 --- a/docs/muagent/connector-agent/index.html +++ /dev/null @@ -1,614 +0,0 @@ - - - - - - - - -Connector Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Agent

    -
    -
    - - -

    Quickly Build an Agent

    -
      -
    • First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat)
    • -
    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • Then Set LLM Configuration and Vector Model Configuration -Configure related LLM and Embedding Model
    • -
    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Agent Configuration

    -
      -
    • Define two react agents for actual task execution
    • -
    -
    # Here, predefined prompts are used, but you can also refer to the above prompts to complete the writing
    -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT
    -
    -# A tool agent based on react is defined
    -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT)
    -tool_react_agent = ReactAgent(
    -    role=tool_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -
    -# A code agent based on react is defined
    -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT)
    -code_react_agent = ReactAgent(
    -    role=code_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
      -
    • Define a groupAgent for agent selection
    • -
    -
    prompt = """#### Agent Profile
    -Your goal is to respond according to the information in the Context Data with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list.
    -ATTENTION: respond carefully following the "Response Output Format".
    -#### Response Output Format
    -**Thoughts:** think step by step about why you selected one role
    -**Role:** Select the role from the agent names.
    -"""
    -
    -# A groupAgent is defined
    -role = Role(role_type="assistant", role_name="qaer", prompt=prompt)
    -base_agent = SelectorAgent(
    -    role=role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[tool_react_agent, code_react_agent]
    -)
    -

    Start Actual Q&A

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart"
    -
    -query = Message(
    -    user_name="test", role_type="user", role_name="user", input_query=question,
    -    tools=tools,
    -)
    -# base_agent.pre_print(query)
    -output_message = base_agent.step(query)
    -print(output_message.input_query)
    -print(output_message.role_content)
    -

    Agent Configs

    -
    # Configuration structure is in this directory
    -from muagent.connector.schema import Role
    -

    Agent Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    roleRoleRole description
    focus_agentsList[String]Logic of MetaGPT, focusing on the messages generated by which agents, optional values are: role_name
    focus_message_keysList[String]Additional logic, focusing on specific key information in the message, optional values are: agent’s output_keys
    chat_turnintValid only for ReactAgent
    llm_configLLMConfigLarge language model configuration
    embed_configEmbedConfigVector model configuration
    sandbox_serverDictSandbox environment, i.e., notebook startup configuration
    jupyter_work_pathstrWorking directory of the sandbox environment
    kb_root_pathstrStorage path for memory
    log_verbosestrLog printing level of agent prompt & predict
    -

    Role

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    role_typestrRole type, Enum: system, user, assistant, function, observation, summary
    role_namestrRole name
    role_descstrRole description
    agent_typestrAgent type
    role_promptstrRole instruction
    promptstrComplete prompt structure
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/connector-chain-zh/index.html b/docs/muagent/connector-chain-zh/index.html deleted file mode 100644 index 289054e..0000000 --- a/docs/muagent/connector-chain-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/connector-chain-zh/ - - - - - - diff --git a/docs/muagent/connector-chain/index.html b/docs/muagent/connector-chain/index.html deleted file mode 100644 index 30b0031..0000000 --- a/docs/muagent/connector-chain/index.html +++ /dev/null @@ -1,560 +0,0 @@ - - - - - - - - -Connector Chain · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Chain

    -
    -
    - - -

    Quickly Build an Agent

    -

    First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat)

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    Then Set LLM Configuration and Vector Model Configuration

    -

    Configure related LLM and Embedding Model

    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Agent Configuration

    -
      -
    • Define two react agents for actual task execution
    • -
    -
    # Here, predefined prompts are used, but you can also refer to the above prompts to complete the writing
    -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT
    -
    -# A tool agent based on react is defined
    -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT)
    -tool_react_agent = ReactAgent(
    -    role=tool_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -# A code agent based on react is defined
    -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT)
    -code_react_agent = ReactAgent(
    -    role=code_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
      -
    • Define a groupAgent for agent selection
    • -
    -
    prompt = """#### Agent Profile
    -Your goal is to respond according to the information in the Context Data with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list.
    -ATTENTION: respond carefully following the "Response Output Format".
    -#### Response Output Format
    -**Thoughts:** think step by step about why you selected one role
    -**Role:** Select the role from the agent names.
    -"""
    -# A groupAgent is defined
    -role = Role(role_type="assistant", role_name="qaer", prompt=prompt)
    -base_agent = SelectorAgent(
    -    role=role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[tool_react_agent, code_react_agent]
    -)
    -

    Chain Config

    -
    chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1)
    -base_chain = BaseChain(
    -    chainConfig=chain_config, agents=[base_agent], 
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -

    Start Actual Q&A

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -question = "Confirm if employee_data.csv exists locally, and check its columns and data types; then draw a bar chart"
    -query = Message(
    -    user_name="test", role_type="user", role_name="user", input_query=question,
    -    tools=tools,
    -)
    -
    -# base_chain.pre_print(query)
    -output_message, output_memory = base_chain.step(query)
    -print(output_message.input_query)
    -print(output_message.role_content)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Chain Parameter Configuration

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    agentsList[BaseAgent]
    llm_configLLMConfigLarge Language Model Configuration
    embed_configEmbedConfigVector Model Configuration
    sandbox_serverDictSandbox environment or notebook startup configuration
    jupyter_work_pathstrWorking directory for the sandbox environment
    kb_root_pathstrStorage path for memory
    log_verbosestrLog printing level for agent prompts & predictions
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/connector-memory-zh/index.html b/docs/muagent/connector-memory-zh/index.html deleted file mode 100644 index a3195b0..0000000 --- a/docs/muagent/connector-memory-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/connector-memory-zh/ - - - - - - diff --git a/docs/muagent/connector-memory/index.html b/docs/muagent/connector-memory/index.html deleted file mode 100644 index 828fd51..0000000 --- a/docs/muagent/connector-memory/index.html +++ /dev/null @@ -1,500 +0,0 @@ - - - - - - - - -Connector Memory · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Memory

    -
    -
    - - -

    Memory Manager

    -

    Primarily used for managing chat history, not yet completed

    -
      -
    • Read and write chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval.
    • -
    • Summarize key information from the chat history into a summary context, serving as a prompt context.
    • -
    • Provide a search function to retrieve information related to the question from chat history or summary context, aiding in Q&A.
    • -
    -

    Usage Example

    -

    Create memory manager instance

    -
    import os
    -import openai
    -from coagent.base_configs.env_config import KB_ROOT_PATH
    -from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.schema import Message
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
    -# LLM and Embedding Model configurations
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -# 
    -phase_name = "test"
    -memory_manager = LocalMemoryManager(
    -            unique_name=phase_name, 
    -            do_init=True, 
    -            kb_root_path=KB_ROOT_PATH, 
    -            embed_config=embed_config, 
    -            llm_config=llm_config
    -        )
    -

    Support for Message management

    -
    message1 = Message(
    -    role_name="test1", role_type="user", input_query="hello", origin_query="hello",
    -    parsed_output_list=[{"input": "hello"}]
    -)
    -text = "hi! how can I help you?"
    -message2 = Message(
    -    role_name="test2", role_type="assistant", input_query=text, origin_query=text,
    -    role_content=text, step_content=text, parsed_output_list=[{"answer": text}]
    -)
    -text = "they say hello and hi to each other"
    -message3 = Message(
    -    role_name="test3", role_type="summary",
    -    role_content=text, step_content=text,
    -    parsed_output_list=[{"summary": text}]
    -    )
    -

    Support for memory retrieval

    -
    # embedding retrieval test
    -text = "say hi, i want some help"
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "datetime"))
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "embedding"))
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "text"))
    -

    Support for memory summarization

    -
    # recursive_summary test
    -print(memory_manager.recursive_summary(local_memory_manager.recall_memory.messages, split_n=1))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/connector-phase-zh/index.html b/docs/muagent/connector-phase-zh/index.html deleted file mode 100644 index 73be898..0000000 --- a/docs/muagent/connector-phase-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/connector-phase-zh/ - - - - - - diff --git a/docs/muagent/connector-phase/index.html b/docs/muagent/connector-phase/index.html deleted file mode 100644 index 3ba6df1..0000000 --- a/docs/muagent/connector-phase/index.html +++ /dev/null @@ -1,581 +0,0 @@ - - - - - - - - -Connector Phase · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Phase

    -
    -
    - - -

    Quickly Build an Agent Phase

    -
      -
    • First, add OpenAI configuration, which can be models with similar interfaces to OpenAI (triggered via fastchat).
    • -
    -
    import os, sys
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    Then Set LLM Configuration and Vector Model Configuration

    -
      -
    • Configure related LLM and Embedding Model.
    • -
    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Agent Configuration

    -
      -
    • Define two react agents for actual task execution.
    • -
    -
    # Predefined prompts are used here; you can also refer to the above-mentioned prompts to write your own.
    -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT
    -# Defined a tool agent based on react
    -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT)
    -tool_react_agent = ReactAgent(
    -    role=tool_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -# Defined a code agent based on react
    -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT)
    -code_react_agent = ReactAgent(
    -    role=code_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
      -
    • Define a GroupAgent for agent selection.
    • -
    -
    prompt = """#### Agent Profile
    -Your goal is to respond according to the information provided by the Context Data's with the role that will best facilitate a solution, taking into account all relevant context data (Context).
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions, and tool list.
    -ATTENTION: respond carefully, referenced to the "Response Output Format" standard.
    -#### Response Output Format
    -**Thoughts:** think the reason step by step about why you select one role
    -**Role:** Select the role from the agent names.
    -"""
    -# Defined a GroupAgent
    -role = Role(role_type="assistant", role_name="qaer", prompt=prompt)
    -base_agent = SelectorAgent(
    -    role=role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[tool_react_agent, code_react_agent]
    -)
    -

    Chain Configuration

    -
    chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1)
    -base_chain = BaseChain(
    -    chainConfig=chain_config, agents=[base_agent], 
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -

    Phase Configuration

    -
    base_phase = BasePhase(
    -    phase_name="group_phase", chains=[base_chain],
    -    embed_config=embed_config, llm_config=llm_config
    -)
    -

    Start Real Q&A

    -
      -
    • Start execution.
    • -
    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -question = "Confirm whether employee_data.csv exists locally, and review its columns and data types; then plot a bar chart."
    -query = Message(
    -    user_name="test", role_type="user", role_name="user", input_query=question,
    -    tools=tools,
    -)
    -
    -
    -# base_phase.pre_print(query)
    -output_message, output_memory = base_phase.step(query)
    -print(output_message.input_query)
    -print(output_message.role_content)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Phase Parameter Configuration

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    phase_nameStringScenario name
    chainsList[Chain]List of chains to be executed in order
    llm_configLLMConfigLarge Language Model configuration
    embed_configEmbedConfigVector model configuration
    sandbox_serverDictSandbox environment, i.e., notebook startup configuration
    jupyter_work_pathstrWorking directory in the sandbox environment
    kb_root_pathstrStorage path for memory
    log_verbosestrLog print level for agent prompts & predictions
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/connector-prompt-zh/index.html b/docs/muagent/connector-prompt-zh/index.html deleted file mode 100644 index 314c70e..0000000 --- a/docs/muagent/connector-prompt-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/connector-prompt-zh/ - - - - - - diff --git a/docs/muagent/connector-prompt/index.html b/docs/muagent/connector-prompt/index.html deleted file mode 100644 index 64f8b86..0000000 --- a/docs/muagent/connector-prompt/index.html +++ /dev/null @@ -1,633 +0,0 @@ - - - - - - - - -Connector Prompt · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Prompt

    -
    -
    - - -

    Prompt Manager

    -

    Managing prompt creation in multi-agent linkages

    -
      -
    • Quick Configuration: Utilizing preset processing functions, users can easily configure by simply defining the inputs and outputs of the agents, enabling fast assembly and configuration of multi-agent prompts.
    • -
    • Customization Support: Allows users to customize the internal processing logic of each module within the prompt to achieve personalized implementation of the agent prompt.
    • -
    -

    Preset Template Structure for Prompts

    -
      -
    • Agent Profile: This section involves the basic description of the agent, including but not limited to the type of agent, its functions, and command set. Users can set the basic attributes of the agent here to ensure its behavior aligns with expectations.
    • -
    • Context: Contextual Information, provided as a reference for the agent, aiding in better decision-making. -
        -
      • Tool Information: This part provides the agent with a list of available tools, from which the agent can choose appropriate ones to assist in task execution based on current scenario requirements.
      • -
      • Reference Documents: This may include documents or code snippets for the agent to refer to when handling requests, to facilitate the use of relevant information.
      • -
      • Session Records: In multi-round conversations, this section records previous dialogue content to ensure continuity within the context.
      • -
      -
    • -
    • Response Output Format: Here the user can set the output format of the agent to ensure that the generated responses meet specific formatting requirements, including structure, grammar, etc.
    • -
    -

    Standard Structure of Prompt

    -

    In the entire structure of a Prompt, we need to define three parts:

    -
      -
    • Agent Profile
    • -
    • Input Format
    • -
    • Response Output Format
    • -
    -
    #### Agent Profile
    -Agent Description ...
    -
    -#### Input Format
    -**Origin Query:** the initial question or objective that the user wanted to achieve
    -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved.
    -
    -#### Response Output Format
    -**Action Status:** finished or continued
    -If it's 'finished', the context can answer the origin query.
    -If it's 'continued', the context can't answer the origin query.
    -**REASON:** Justify the decision of choosing 'finished' or 'continued' by evaluating the progress step by step.
    -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion.
    -

    Here, we have integrated some of the common operations of the Input Format, with certain fields and operational procedures built in to form a standardized configurable operation. -In the future, we will also make parts of the Agent Profile and Response Output Format configurable to reduce the difficulty of writing Prompts.

    -

    Customizing Agents

    -
      -
    • Implement construction with custom fields according to actual needs
    • -
    -
    class CodeGenDocer(BaseAgent):
    -    def start_action_step(self, message: Message) -> Message:
    -        '''do action before agent predict '''
    -        # Get code snippets and node information based on the question
    -        action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, 
    -                                              embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag")
    -        current_vertex = action_json['vertex']
    -        message.customed_kargs["Code Snippet"] = action_json["code"]
    -        message.customed_kargs['Current_Vertex'] = current_vertex
    -        return message
    -    
    -

    pre_print Function

    -

    After building phases, chains, or agents, we can confirm agent linkages using the pre-print function of methods, allowing for debugging in advance to avoid discovering issues only after execution.

    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -
    -import os, sys
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
    -llm_config = LLMConfig(
    -    model_name="gpt-4", api_key=api_key,  api_base_url=api_base_url, temperature=0.3
    -)
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -phase.pre_print(query)
    -

    Here, pre-defined agents are used,,custom case can be seen customed_example -

    -

    check the pre-print prompt

    -
    ##########################
    -<<<<baseGroup's prompt>>>>
    -##########################
    -
    -### Agent Profile
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -### Tool Information
    -
    -### Agent Infomation
    -        Please ensure your selection is one of the listed roles. Available roles for selection:
    -        "role name: tool_react
    -role description:  Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.,"
    -"role name: code_react
    -role description:  Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.,"
    -        Please ensure select the Role from agent names, such as tool_react, code_react
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Current Plan
    -
    -### Response Output Format
    -**Thoughts:** think the reason step by step about why you selecte one role
    -**Role:** Select the role from agent names.
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Role:**
    -
    -
    -###########################
    -<<<<tool_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.
    -Please note that all the tools you can use are listed below. You can only choose from these tools for use.
    -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.
    -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.
    -
    -### Tool Information
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Task Records
    -
    -### Response Output Format
    -**Thoughts:** According the previous observations, plan the approach for using the tool effectively.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -###########################
    -<<<<code_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When users need help with coding, your role is to provide precise and effective guidance.
    -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -### Response Output Format
    -
    -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem,
    -outline the plan for executing this step.
    -
    -**Action Status:** Set to 'stopped' or 'code_executing'.
    -If it's 'stopped', the action is to provide the final answer to the session records and executed steps.
    -If it's 'code_executing', the action is to write the code.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/custom-examples-zh/index.html b/docs/muagent/custom-examples-zh/index.html deleted file mode 100644 index 7dfe1ba..0000000 --- a/docs/muagent/custom-examples-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/custom-examples-zh/ - - - - - - diff --git a/docs/muagent/custom-examples/index.html b/docs/muagent/custom-examples/index.html deleted file mode 100644 index f1e56c7..0000000 --- a/docs/muagent/custom-examples/index.html +++ /dev/null @@ -1,699 +0,0 @@ - - - - - - - - -Customed Examples · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Customed Examples

    -
    -
    - - -

    How to Create Your Personalized Agent Phase Scenario

    -

    Below we will use a code repository to demonstrate the automatic generation of API documentation from code, detailing how to customize the construction of an agent phase.

    -

    Design Your Prompt Structure

    -
      -
    • codeGenDocGroup_PROMPT, create group Agent Prompt
    • -
    -
    # update new agent configs
    -codeGenDocGroup_PROMPT = """#### Agent Profile
    -
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -
    -#### Input Format
    -
    -#### Response Output Format
    -
    -**Code Path:** Extract the paths for the class/method/function that need to be addressed from the context
    -
    -**Role:** Select the role from agent names
    -"""
    -
      -
    • classGenDoc_PROMPT, create class code to api doc Prompt
    • -
    -
    classGenDoc_PROMPT = """#### Agent Profile
    -As an advanced code documentation generator, you are proficient in translating class definitions into comprehensive documentation with a focus on instantiation parameters. 
    -Your specific task is to parse the given code snippet of a class, extract information regarding its instantiation parameters.
    -
    -#### Input Format
    -
    -**Current_Vertex:** Provide the code vertex of the function or method.
    -
    -**Code Snippet:** Provide the full class definition, including the constructor and any parameters it may require for instantiation.
    -
    -#### Response Output Format
    -**Class Base:** Specify the base class or interface from which the current class extends, if any.
    -
    -**Class Description:** Offer a brief description of the class's purpose and functionality.
    -
    -**Init Parameters:** List each parameter from construct. For each parameter, provide:
    -    - `param`: The parameter name
    -    - `param_description`: A concise explanation of the parameter's purpose.
    -    - `param_type`: The data type of the parameter, if explicitly defined.
    -
    -    ```json
    -    [
    -        {
    -            "param": "parameter_name",
    -            "param_description": "A brief description of what this parameter is used for.",
    -            "param_type": "The data type of the parameter"
    -        },
    -        ...
    -    ]
    -    ```
    -
    -        
    -    If no parameter for construct, return 
    -    ```json
    -    []
    -    ```
    -"""
    -
      -
    • funcGenDoc_PROMPT,create function code to api doc Prompt
    • -
    -
    funcGenDoc_PROMPT = """#### Agent Profile
    -You are a high-level code documentation assistant, skilled at extracting information from function/method code into detailed and well-structured documentation.
    -
    -
    -#### Input Format
    -**Code Path:** Provide the code path of the function or method you wish to document. 
    -This name will be used to identify and extract the relevant details from the code snippet provided.
    -
    -**Current_Vertex:** Provide the code vertex of the function or method.
    -
    -**Code Snippet:** A segment of code that contains the function or method to be documented.
    -
    -#### Response Output Format
    -
    -**Class Description:** Offer a brief description of the method(function)'s purpose and functionality.
    -
    -**Parameters:** Extract parameter for the specific function/method Code from Code Snippet. For parameter, provide:
    -    - `param`: The parameter name
    -    - `param_description`: A concise explanation of the parameter's purpose.
    -    - `param_type`: The data type of the parameter, if explicitly defined.
    -    ```json
    -    [
    -        {
    -            "param": "parameter_name",
    -            "param_description": "A brief description of what this parameter is used for.",
    -            "param_type": "The data type of the parameter"
    -        },
    -        ...
    -    ]
    -    ```
    -
    -    If no parameter for function/method, return 
    -    ```json
    -    []
    -    ```
    -
    -**Return Value Description:** Describe what the function/method returns upon completion.
    -
    -**Return Type:** Indicate the type of data the function/method returns (e.g., string, integer, object, void).
    -"""
    -

    Import Packages and Basic Configuration Parameters

    -
      -
    • First, add openai configuration or similar interfaces to models such as openai (launched via fastchat)
    • -
    -
    import os, sys
    -from muagent.base_configs.env_config import CB_ROOT_PATH
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.connector.phase import BasePhase
    -from muagent.connector.agents import BaseAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Message, Role, ChainConfig
    -from muagent.codechat.codebase_handler.codebase_handler import CodeBaseHandler
    -from loguru import logger
    -from muagent.tools import CodeRetrievalSingle
    -
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    Defining a New Agent Class

    -

    For custom key-value information

    -
    class CodeGenDocer(BaseAgent):
    -    def start_action_step(self, message: Message) -> Message:
    -        '''do action before agent predict '''
    -        # Retrieve code snippets and node information based on the question
    -        action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, 
    -                                              embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag")
    -        current_vertex = action_json['vertex']
    -        message.customed_kargs["Code Snippet"] = action_json["code"]
    -        message.customed_kargs['Current_Vertex'] = current_vertex
    -        return message
    -    
    -

    Preparing LLM & Embedding

    -
    llm_config = LLMConfig(
    -    model_name="gpt-4", api_key=api_key,  api_base_url=api_base_url, temperature=0.3
    -)
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Codebase Loading

    -
    # initialize codebase
    -# delete codebase
    -codebase_name = 'client_nebula'
    -code_path = "D://chromeDownloads/devopschat-bot/client_v2/client"
    -use_nh = True
    -do_interpret = False
    -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH,
    -                      llm_config=llm_config, embed_config=embed_config)
    -cbh.delete_codebase(codebase_name=codebase_name)
    -# load codebase
    -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH,
    -                      llm_config=llm_config, embed_config=embed_config)
    -cbh.import_code(do_interpret=do_interpret)
    -

    Then Construct a Phase Instance and Begin Execution

    -
    # log-level, print prompt, and llm predict
    -os.environ["log_verbose"] = "1"
    -
    -funcGenDoc_role = Role(role_type="assistant", role_name="funcGenDoc_role", prompt=funcGenDoc_PROMPT)
    -funcGenDoc = CodeGenDocer(
    -    role=funcGenDoc_role,
    -    chat_turn=1,
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -classGenDoc_role = Role(role_type="assistant", role_name="classGenDoc_role", prompt=classGenDoc_PROMPT)
    -classGenDoc = CodeGenDocer(
    -    role=classGenDoc_role,
    -    chat_turn=1,
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -codeGenDocGroup_role = Role(role_type="assistant", role_name="codeGenDocGroup_role", prompt=codeGenDocGroup_PROMPT)
    -codeGenDocGroup = SelectorAgent(
    -    role=codeGenDocGroup_role,
    -    chat_turn=1,
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[funcGenDoc, classGenDoc]
    -)
    -
    -chain_config = ChainConfig(
    -    chain_name="codeGenDocGroup_chain", agents=[codeGenDocGroup.role.role_name,], 
    -    chat_turn=1)
    -chain = BaseChain(
    -    chainConfig=chain_config, agents=[codeGenDocGroup], 
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -phase = BasePhase(
    -    phase_name="codeGenDocGroup_phase", chains=[chain],
    -    embed_config=embed_config, llm_config=llm_config
    -)
    -

    start to generate api docs from code

    -
    # Initialize based on the previous loading process
    -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH,
    -                      llm_config=llm_config, embed_config=embed_config)
    -cbh.search_vertices(vertex_type="method")
    -# Begin transforming code into API documentation structure
    -for vertex_type in ["class", "method"]:
    -    vertices = cbh.search_vertices(vertex_type=vertex_type)
    -    logger.info(f"vertices={vertices}")
    -    # round-1
    -    docs = []
    -    for vertex in vertices:
    -        vertex = vertex.split("-")[0] # '-' is the delimiter for method parameters
    -        query_content = f"Generate documentation for {vertex_type} node {vertex}"
    -        query = Message(
    -            role_name="human", role_type="user", input_query=query_content,
    -            code_engine_name=codebase_name, score_threshold=1.0, top_k=3, cb_search_type="tag", use_nh=use_nh,
    -            local_graph_path=CB_ROOT_PATH,
    -            )
    -        output_message, output_memory = phase.step(query, reinit_memory=True)
    -        # print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -        docs.append(output_memory.get_spec_parserd_output())
    -        os.makedirs(f"{CB_ROOT_PATH}/docs", exist_ok=True)
    -        with open(f"{CB_ROOT_PATH}/docs/raw_{vertex_type}.json", "w") as f:
    -            json.dump(docs, f)
    -
    -
    -# Convert the generated document information into markdown text
    -from muagent.utils.code2doc_util import *
    -import json
    -with open(f"/home/user/code_base/docs/raw_method.json", "r") as f:
    -    method_raw_data = json.load(f)
    -
    -    
    -with open(f"/home/user/code_base/docs/raw_class.json", "r") as f:
    -    class_raw_data = json.load(f)
    -    
    -method_data = method_info_decode(method_raw_data)
    -class_data = class_info_decode(class_raw_data)
    -method_mds = encode2md(method_data, method_text_md)
    -class_mds = encode2md(class_data, class_text_md)
    -
    -docs_dict = {}
    -for k,v in class_mds.items():
    -    method_textmds = method_mds.get(k, [])
    -    for vv in v:
    -        # Theoretically, there should only be one
    -        text_md = vv
    -    for method_textmd in method_textmds:
    -        text_md += "\n<br>" + method_textmd
    -    docs_dict.setdefault(k, []).append(text_md)
    -    
    -    with open(f"/home/user/code_base/docs/{k}.md", "w") as f:
    -        f.write(text_md)
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/custom-retrieval-zh/index.html b/docs/muagent/custom-retrieval-zh/index.html deleted file mode 100644 index d5fa0f8..0000000 --- a/docs/muagent/custom-retrieval-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/custom-retrieval-zh/ - - - - - - diff --git a/docs/muagent/custom-retrieval/index.html b/docs/muagent/custom-retrieval/index.html deleted file mode 100644 index 12b2268..0000000 --- a/docs/muagent/custom-retrieval/index.html +++ /dev/null @@ -1,547 +0,0 @@ - - - - - - - - -Custom Retrieval · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Custom Retrieval

    -
    -
    - - -

    Basic Introduction

    -

    Doc Retrieval is the document vector database, which is the most mainstream method for knowledge base construction nowadays. It uses Text Embedding models to vectorize documents and stores them in a vector database. In the future, we will also support querying based on knowledge graph and automatically extracting entities and relationships through large models to explore various complex relationships in data.

    -

    Code Retrieval LLM faces challenges in tasks such as code generation, repair, and component understanding, including lagging code training data and the inability to perceive the dependency structure of code context. During development, understanding existing codebases and dependencies, retrieving related code, querying metadata, etc., can take a significant amount of time. Therefore, we hope to support LLM with code outside of its knowledge system through code structure analysis and code retrieval.

    -

    Search Retrieval In addition to existing document and code knowledge bases, in daily practice, we browse a large amount of web content to acquire more knowledge, helping us understand emerging scenarios, businesses, technologies, etc., hence we integrated duckduckgo search, an open-source search tool, to provide LLM with content beyond its knowledge reserve.

    -

    Retrieval Structure

    -
    class IMRertrieval:
    -    def __init__(self,):
    -        '''
    -        init your personal attributes
    -        '''
    -        pass
    -
    -    def run(self, ):
    -        '''
    -        execute interface, and can use init' attributes
    -        '''
    -        pass
    -
    -
    -class BaseDocRetrieval(IMRertrieval):
    -
    -    def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH):
    -        self.knowledge_base_name = knowledge_base_name
    -        self.search_top = search_top
    -        self.score_threshold = score_threshold
    -        self.embed_config = embed_config
    -        self.kb_root_path = kb_root_path
    -
    -    def run(self, query: str, search_top=None, score_threshold=None, ):
    -        docs = DocRetrieval.run(
    -            query=query, knowledge_base_name=self.knowledge_base_name,
    -            search_top=search_top or self.search_top,
    -            score_threshold=score_threshold or self.score_threshold,
    -            embed_config=self.embed_config,
    -            kb_root_path=self.kb_root_path
    -        )
    -        return docs
    -

    Usage Example

    -
    # retrieval your customized register demo
    -from muagent.tools import DocRetrieval
    -
    -class BaseDocRetrieval(IMRertrieval):
    -
    -    def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH):
    -        self.knowledge_base_name = knowledge_base_name
    -        self.search_top = search_top
    -        self.score_threshold = score_threshold
    -        self.embed_config = embed_config
    -        self.kb_root_path = kb_root_path
    -
    -    def run(self, query: str, search_top=None, score_threshold=None, ):
    -        docs = DocRetrieval.run(
    -            query=query, knowledge_base_name=self.knowledge_base_name,
    -            search_top=search_top or self.search_top,
    -            score_threshold=score_threshold or self.score_threshold,
    -            embed_config=self.embed_config,
    -            kb_root_path=self.kb_root_path
    -        )
    -
    -        return docs
    -
    -
    -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config)
    -
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH,
    -    doc_retrieval=doc_retrieval
    -)
    -
    -
    -# round-1
    -query_content = "What modules does langchain have?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    -# round-2
    -query_content = "What is the use of prompts?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/custom-tool-zh/index.html b/docs/muagent/custom-tool-zh/index.html deleted file mode 100644 index 5f8eb9f..0000000 --- a/docs/muagent/custom-tool-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/custom-tool-zh/ - - - - - - diff --git a/docs/muagent/custom-tool/index.html b/docs/muagent/custom-tool/index.html deleted file mode 100644 index 3b52808..0000000 --- a/docs/muagent/custom-tool/index.html +++ /dev/null @@ -1,568 +0,0 @@ - - - - - - - - -Custom Tool · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Custom Tool

    -
    -
    - - -

    Introduction

    -

    In MuAgent, it also supports the registration of Tools by Agents. By registering the BaseToolModel class with Python and writing

    -
      -
    • Tool_name
    • -
    • Tool_description
    • -
    • ToolInputArgs
    • -
    • ToolOutputArgs
    • -
    • run
    • -
    -

    and other relevant properties and methods, the quick integration of tools can be achieved. It also supports the direct use of the langchain Tool interface. For example, functions like the aforementioned XXRetrieval can also be registered as a Tool, to be ultimately called by an LLM.

    -

    BaseTool Structure

    -
    from langchain.agents import Tool
    -from pydantic import BaseModel, Field
    -from typing import List, Dict
    -import json
    -
    -
    -class BaseToolModel:
    -    name = "BaseToolModel"
    -    description = "Tool Description"
    -
    -    class ToolInputArgs(BaseModel):
    -        """
    -        Input for MoveFileTool.
    -        Tips:
    -            default control Required, e.g.  key1 is not Required/key2 is Required
    -        """
    -
    -        key1: str = Field(default=None, description="hello world!")
    -        key2: str = Field(..., description="hello world!!")
    -
    -    class ToolOutputArgs(BaseModel):
    -        """
    -        Input for MoveFileTool.
    -        Tips:
    -            default control Required, e.g.  key1 is not Required/key2 is Required
    -        """
    -
    -        key1: str = Field(default=None, description="hello world!")
    -        key2: str = Field(..., description="hello world!!")
    -
    -    @classmethod
    -    def run(cls, tool_input_args: ToolInputArgs) -> ToolOutputArgs:
    -        """excute your tool!"""
    -        pass
    -

    Register Example

    -
    from pydantic import BaseModel, Field
    -from typing import List, Dict
    -import requests
    -from loguru import logger
    -
    -from .base_tool import BaseToolModel
    -
    -class Multiplier(BaseToolModel):
    -    """
    -    Tips:
    -        default control Required, e.g.  key1 is not Required/key2 is Required
    -    """
    -
    -    name: str = "Multiplier"
    -    description: str = """useful for when you need to multiply two numbers together. \
    -    The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. \
    -    For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
    -
    -    class ToolInputArgs(BaseModel):
    -        """Input for Multiplier."""
    -
    -        # key: str = Field(..., description="用户在高德地图官网申请web服务API类型KEY")
    -        a: int = Field(..., description="num a")
    -        b: int = Field(..., description="num b")
    -
    -    class ToolOutputArgs(BaseModel):
    -        """Output for Multiplier."""
    -
    -        res: int = Field(..., description="the result of two nums")
    -    
    -    @staticmethod
    -    def run(a, b):
    -        return a * b
    -

    Use Example

    -
    from langchain.tools import StructuredTool
    -from muagent.tools import (
    -    WeatherInfo, Multiplier, toLangchainTools,
    -    TOOL_DICT, TOOL_SETS
    -)
    -
    -# Function exec
    -tools =  [
    -    StructuredTool(
    -            name=Multiplier.name,
    -            func=Multiplier.run,
    -            description=Multiplier.description,
    -            args_schema=Multiplier.ToolInputArgs,
    -        ), 
    -        StructuredTool(
    -            name=WeatherInfo.name,
    -            func=WeatherInfo.run,
    -            description=WeatherInfo.description,
    -            args_schema=WeatherInfo.ToolInputArgs,
    -        )
    -        ]
    -
    -tools = toLangchainTools([TOOL_DICT["Multiplier"]])
    -
    -# tool run Test
    -print(tools[0].func(1,2))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/embedding-model-config-zh/index.html b/docs/muagent/embedding-model-config-zh/index.html deleted file mode 100644 index 94dee56..0000000 --- a/docs/muagent/embedding-model-config-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/embedding-model-config-zh/ - - - - - - diff --git a/docs/muagent/embedding-model-config/index.html b/docs/muagent/embedding-model-config/index.html deleted file mode 100644 index 70a092a..0000000 --- a/docs/muagent/embedding-model-config/index.html +++ /dev/null @@ -1,504 +0,0 @@ - - - - - - - - -Embedding Config · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Embedding Config

    -
    -
    - - -

    Prepare Relevant Parameters

    -

    First, add the OpenAI configuration; this could also be a model similar to the OpenAI interface (launched via fastchat).

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -

    Build LLM Config

    -
      -
    • Constructing with a local model file
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -
      -
    • Constructing via OpenAI
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -embed_config = EmbedConfig(
    -    embed_engine="openai", api_key=api_key, api_base_url=api_base_url,
    -)
    -
      -
    • Customizing and inputting langchain embeddings
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -class CustomizedEmbeddings(Embeddings):
    -    def embed_documents(self, texts: List[str]) -> List[List[float]]:
    -        embeddings = []
    -        # add your embedding code
    -        return embeddings
    -    def embed_query(self, text: str) -> List[float]:
    -        """Compute query embeddings using a HuggingFace transformer model.
    -        Args:
    -            text: The text to embed.
    -        Returns:
    -            Embeddings for the text.
    -        """
    -        # add your embedding code
    -        return embedding
    -
    -
    -embeddings = CustomizedEmbeddings()
    -embed_config = EmbedConfig(
    -    embed_model="default",
    -    langchain_embeddings=embeddings
    -)
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/index.html b/docs/muagent/index.html deleted file mode 100644 index d5cc634..0000000 --- a/docs/muagent/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /muagent/muagent/ - - - - - - diff --git a/docs/muagent/index.xml b/docs/muagent/index.xml deleted file mode 100644 index 38b9729..0000000 --- a/docs/muagent/index.xml +++ /dev/null @@ -1,109 +0,0 @@ - - - - Muagents on CodeFuse-AI - /muagent/ - Recent content in Muagents on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - Agent Flow - /muagent/agent-flow/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/agent-flow/ - Introduction to Core Connectors To facilitate everyone&rsquo;s understanding of the entire muagent link, we adopt the Flow format to introduce in detail how to build through configuration Below, we first introduce the related core components Agent On the design level of the Agent, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios: BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input =&gt; output according to the Prompt format. - - - Connector Agent - /muagent/connector-agent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-agent/ - Quickly Build an Agent First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then Set LLM Configuration and Vector Model Configuration Configure related LLM and Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent. - - - Connector Chain - /muagent/connector-chain/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-chain/ - Quickly Build an Agent First, add an OpenAI configuration, or a model with a similar interface to OpenAI (launched through fastchat) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then Set LLM Configuration and Vector Model Configuration Configure related LLM and Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent. - - - Connector Memory - /muagent/connector-memory/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-memory/ - Memory Manager Primarily used for managing chat history, not yet completed Read and write chat history in the database, including user input, llm output, doc retrieval, code retrieval, search retrieval. Summarize key information from the chat history into a summary context, serving as a prompt context. Provide a search function to retrieve information related to the question from chat history or summary context, aiding in Q&amp;A. Usage Example Create memory manager instance import os import openai from coagent. - - - Connector Phase - /muagent/connector-phase/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-phase/ - Quickly Build an Agent Phase First, add OpenAI configuration, which can be models with similar interfaces to OpenAI (triggered via fastchat). import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then Set LLM Configuration and Vector Model Configuration Configure related LLM and Embedding Model. from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent. - - - Connector Prompt - /muagent/connector-prompt/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/connector-prompt/ - Prompt Manager Managing prompt creation in multi-agent linkages Quick Configuration: Utilizing preset processing functions, users can easily configure by simply defining the inputs and outputs of the agents, enabling fast assembly and configuration of multi-agent prompts. Customization Support: Allows users to customize the internal processing logic of each module within the prompt to achieve personalized implementation of the agent prompt. Preset Template Structure for Prompts Agent Profile: This section involves the basic description of the agent, including but not limited to the type of agent, its functions, and command set. - - - Custom Retrieval - /muagent/custom-retrieval/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/custom-retrieval/ - Basic Introduction Doc Retrieval is the document vector database, which is the most mainstream method for knowledge base construction nowadays. It uses Text Embedding models to vectorize documents and stores them in a vector database. In the future, we will also support querying based on knowledge graph and automatically extracting entities and relationships through large models to explore various complex relationships in data. Code Retrieval LLM faces challenges in tasks such as code generation, repair, and component understanding, including lagging code training data and the inability to perceive the dependency structure of code context. - - - Custom Tool - /muagent/custom-tool/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/custom-tool/ - Introduction In MuAgent, it also supports the registration of Tools by Agents. By registering the BaseToolModel class with Python and writing Tool_name Tool_description ToolInputArgs ToolOutputArgs run and other relevant properties and methods, the quick integration of tools can be achieved. It also supports the direct use of the langchain Tool interface. For example, functions like the aforementioned XXRetrieval can also be registered as a Tool, to be ultimately called by an LLM. - - - Customed Examples - /muagent/custom-examples/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/custom-examples/ - How to Create Your Personalized Agent Phase Scenario Below we will use a code repository to demonstrate the automatic generation of API documentation from code, detailing how to customize the construction of an agent phase. Design Your Prompt Structure codeGenDocGroup_PROMPT, create group Agent Prompt # update new agent configs codeGenDocGroup_PROMPT = &#34;&#34;&#34;#### Agent Profile Your goal is to response according the Context Data&#39;s information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. - - - Embedding Config - /muagent/embedding-model-config/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/embedding-model-config/ - Prepare Relevant Parameters First, add the OpenAI configuration; this could also be a model similar to the OpenAI interface (launched via fastchat). import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; Build LLM Config Constructing with a local model file from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) Constructing via OpenAI from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;openai&#34;, api_key=api_key, api_base_url=api_base_url, ) Customizing and inputting langchain embeddings from muagent. - - - LLM Config - /muagent/llm-model-config/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/llm-model-config/ - Prepare Relevant Parameters First, add the OpenAI configuration, or you can use another model similar to the OpenAI interface (launched through fastchat). import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; Build LLM Config By passing the class openai from muagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) Customizing and inputting langchain LLM from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from langchain.llms.base import BaseLLM, LLM class CustomizedModel(LLM): repetition_penalty = 1. - - - MuAgent - /muagent/muagent/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/muagent/ - Introduction To enhance the performance of large models in terms of inference accuracy, various innovative Large Language Model (LLM) playbooks have emerged in the industry. From the earliest Chain of Thought (CoT) and Thread of Thought (ToT) to Games on Tracks (GoT), these methods have continually expanded the capability boundaries of LLMs. When handling complex problems, we can select, invoke and execute tool feedback through the ReAct process, while realizing multi-round tool use and multi-step execution. - - - Prompt Manager - /coagent/prompt-manager/ - Mon, 01 Jan 0001 00:00:00 +0000 - /coagent/prompt-manager/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 Prompt自定义配置 Prompt模块参数 field_name:唯一的字段名称标识,必须提供。 function:指定如何处理输入数据的函数,必须提供。 title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 Prompt配置示例 Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 [ {&#34;field_name&#34;: &#39;agent_profile&#39;, &#34;function_name&#34;: &#39;handle_agent_profile&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;context_placeholder&#39;, &#34;function_name&#34;: &#39;&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;tool_information&#39;,&#34;function_name&#34;: &#39;handle_tool_data&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;reference_documents&#39;, &#34;function_name&#34;: &#39;handle_doc_info&#39;}, {&#34;field_name&#34;: &#39;session_records&#39;, &#34;function_name&#34;: &#39;handle_session_records&#39;}, {&#34;field_name&#34;: &#39;task_records&#39;, &#34;function_name&#34;: &#39;handle_task_records&#39;}, {&#34;field_name&#34;: &#39;output_format&#39;, &#34;function_name&#34;: &#39;handle_output_format&#39;, &#39;title&#39;: &#39;Response Output Format&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;response&#39;, &#34;function_name&#34;: &#39;handle_response&#39;, &#34;title&#34;=&#34;begin! - - - Quick Start - /muagent/quick-start/ - Mon, 01 Jan 0001 00:00:00 +0000 - /muagent/quick-start/ - Quick Start For a complete example, see examples/muagent_examples First, prepare the relevant configuration information import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; Then, set up LLM configuration and Embedding model configuration from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.connector.phase import BasePhase from muagent.connector.schema import Message llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0. - - - diff --git a/docs/muagent/llm-model-config-zh/index.html b/docs/muagent/llm-model-config-zh/index.html deleted file mode 100644 index 92e7f97..0000000 --- a/docs/muagent/llm-model-config-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/llm-model-config-zh/ - - - - - - diff --git a/docs/muagent/llm-model-config/index.html b/docs/muagent/llm-model-config/index.html deleted file mode 100644 index e571901..0000000 --- a/docs/muagent/llm-model-config/index.html +++ /dev/null @@ -1,488 +0,0 @@ - - - - - - - - -LLM Config · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    LLM Config

    -
    -
    - - -

    Prepare Relevant Parameters

    -

    First, add the OpenAI configuration, or you can use another model similar to the OpenAI interface (launched through fastchat).

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -

    Build LLM Config

    -
      -
    • By passing the class openai
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
      -
    • Customizing and inputting langchain LLM
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from langchain.llms.base import BaseLLM, LLM
    -
    -
    -class CustomizedModel(LLM):
    -        repetition_penalty = 1.1
    -        temperature = 0.2
    -        top_k = 40
    -        top_p = 0.9
    -        
    -        def predict(self, prompt: str, stop: Optional[List[str]] = None) -> str:
    -            return self._call(prompt, stop)
    -
    -        def _call(self, prompt: str,
    -                  stop: Optional[List[str]] = None) -> str:
    -            """_call"""
    -            return ""
    -
    -
    -llm = CustomizedModel()
    -llm_config = LLMConfig(
    -    llm=llm
    -)
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/muagent-overview/index.html b/docs/muagent/muagent-overview/index.html deleted file mode 100644 index d5cc634..0000000 --- a/docs/muagent/muagent-overview/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /muagent/muagent/ - - - - - - diff --git a/docs/muagent/muagent-zh/index.html b/docs/muagent/muagent-zh/index.html deleted file mode 100644 index e0b1631..0000000 --- a/docs/muagent/muagent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - - - - - - diff --git "a/docs/muagent/muagent-\346\246\202\350\247\210/index.html" "b/docs/muagent/muagent-\346\246\202\350\247\210/index.html" deleted file mode 100644 index e0b1631..0000000 --- "a/docs/muagent/muagent-\346\246\202\350\247\210/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - - - - - - diff --git a/docs/muagent/muagent/index.html b/docs/muagent/muagent/index.html deleted file mode 100644 index 70c0cca..0000000 --- a/docs/muagent/muagent/index.html +++ /dev/null @@ -1,493 +0,0 @@ - - - - - - - - -MuAgent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MuAgent

    -
    -
    - - -

    Introduction

    -

    To enhance the performance of large models in terms of inference accuracy, various innovative Large Language Model (LLM) playbooks have emerged in the industry. From the earliest Chain of Thought (CoT) and Thread of Thought (ToT) to Games on Tracks (GoT), these methods have continually expanded the capability boundaries of LLMs. When handling complex problems, we can select, invoke and execute tool feedback through the ReAct process, while realizing multi-round tool use and multi-step execution.

    -

    However, for more complex scenarios, such as the development of complex code, a single-function LLM Agent is clearly not up to the task. Therefore, the community has begun to develop combinations of multiple Agents, such as projects focused on the development field like metaGPT, GPT-Engineer, and chatDev, as well as the AutoGen project that focuses on automating the construction of Agents and Agent dialogue.

    -

    After an in-depth analysis of these frameworks, it has been found that most Agent frameworks are highly coupled, with poor usability and extensibility. They implement specific scenarios in preset settings, but expanding to new scenarios can be very challenging.

    -

    Therefore, we hope to build an extensible, easy-to-use Multi-Agent framework to support ChatBots in retrieving knowledge base information while assisting with various general tasks such as daily office work, data analysis, development, and operations.

    -

    This project’s Multi-Agent framework incorporates excellent designs from multiple frameworks, such as the message pool from metaGPT and the agent selector from autogen.

    -
    - 图片 -
    -

    MuAgent Framework

    -

    In MuAgent, in addition to defining the Agent interaction link and AgentBase basic execution flow, we have also designed two basic components: Prompt Manager and Memory Manager, which are used for automated construction of Prompts and chat history management, respectively. We have built an extensible, easy-to-use Multi-Agent framework, including the following content:

    -
      -
    • Agent Base: Established four basic types of Agents – BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent – to support basic activities in various scenarios.
    • -
    • Communication: Completes the transfer of information between Agents through Message and Parse Message entities, and interacts with Memory Manager to manage memory in the Memory Pool.
    • -
    • Prompt Manager: Automates the assembly of Customized Agent Prompts through Role Handler, Doc/Tool Handler, Session Handler, Customized Handler.
    • -
    • Memory Manager: Supports storage management of chat history, information compression, memory retrieval, and finally storage in databases, local or vector databases through the Memory Pool.
    • -
    • Component: Auxiliary ecosystem components for building Agents, including Retrieval, Tool, Action, Sandbox, etc.
    • -
    • Customized Model: Supports the integration of private LLM and Embedding.
    • -
    -

    Agent Base

    -

    At the Agent level, we provide four basic types of Agents, with Role settings for these Agents that can meet the interactions and uses of various common scenarios. All Actions are executed by Agents.

    -
      -
    1. BaseAgent: Provides basic question answering, tool usage, and code execution functions, and realizes input => output according to the Prompt format.
    2. -
    -
    - 图片 -
    -
      -
    1. ReactAgent: Provides standard React functionality, according to questions to execute current tasks.
    2. -
    -
    - 图片 -
    -
      -
    1. ExecutorAgent: Sequentially executes a list of tasks, completing related tasks according to plans arranged by the User or the previous Agent. The Agent receives a task list ([List[task]) and loops through the tasks (Feedback Agents can also be added in the middle for task re-optimization), until the task is complete.
    2. -
    -
    - 图片 -
    -
      -
    1. SelectorAgent: Provides the function of selecting an Agent, choosing the appropriate Agent to respond based on the question from the User or the previous Agent.
    2. -
    -
    - 图片 -
    -

    Communication

    -

    To enable better interaction between Agents, as well as to provide each Agent with enough information to complete their specific tasks, we have divided the Message information body into several parts, such as System Content, Info Content, LLM Content, and LLM Parsed Content, etc.

    -

    System Content: Used to store and manage the timing of the current LLM output, Role information, etc. -Info Content: LLM auxiliary information, such as knowledge base query information, code library retrieval information, tool information, Agent information, etc.

    -

    LLM Content: Directly stores and conveys information generated by the LLM. -LLM Parsed Content: Parses the LLM’s output into a more manageable key-value data structure, making it easier to filter through LLM content. -Customized Content: Manages key-value data content generated by custom actions, used for subsequent assembly and construction of custom Prompt templates. -By defining the above message formats, we can accomplish the transfer and management of general messages. Specific assembly methods can be seen in the Prompt Manager module.

    -

    Context Manager

    -

    Memory Manager

    -

    Mainly used for the management of chat history:

    -
      -
    • Storage Management: Implements the save and load management of chat history in the database or locally, including user input, LLM output, observation output.
    • -
    • Information Compression: Summarizes key information from the chat history into a summary context, such as single text summaries, summaries from different angles, key information extraction, multi-text summaries, and serves as Prompt context.
    • -
    • Memory Retrieval: Provides basic retrieval functions, retrieving information related to questions from chat history or Summary Context to assist in Q&A.
    • -
    • LLM Automatic Trigger: Future definitions of policies or the use of LLM to trigger the compression summary and retrieval functions.
    • -
    -

    Prompt Manager

    -

    Asking LLMs has become common practice, but how to coordinate the planning and usage of tools, code writing abilities among multiple large models to guide their expected outputs has become a key issue. Essentially, this involves abstracting business problems into executable Prompts, so we’re not just designing Agents but rather engaging in framework design after a deep understanding of the current demands.

    -

    In actual business scenarios where LLMs are involved (excluding the SFT process), we can designate LLM to complete specific tasks and obtain expected outputs through the design of Agent Prompt content. In the process of MuAgent, the Prompt is divided into three parts: System Prompt, Context Prompt, Customized Prompt.

    -
      -
    • System Prompt includes Role Name, Role Description, Task, etc.
    • -
    • Context Prompt includes Doc Context, Code Context, Tool Context, Agent Context, Session Context, etc.
    • -
    • Customized Prompt involves custom inputs and outputs, such as… -We can also ask the model to output structured texts, such as the JSON string of a tool, code\ncode_content, etc., to complete particular workflows.
    • -
    -

    Automatic Prompt Assemble

    -

    After defining the structure as above, we can complete the automation assembly of Prompts in the following ways, without having to make extensive adjustments to the prompt each time:

    -
      -
    1. Upon defining an Agent, configure Role Name, Role Description, Task, etc., to determine what the Agent needs to do.
    2. -
    3. Pre-package some reusable Context Prompt general strategies, such as selectable Role’s SessionContext, configurable Tool, Code Retrieval, Doc Retrieval, Search Retrieval, Agent to complete corresponding assemblies.
    4. -
    5. As the Agent’s Prompt requires relatively personalized operations, it also supports the addition of new key-context designs within the Prompt Manager module to achieve personalized Agent Prompts.
    6. -
    -

    Automatic Prompt Design -Able to automatically design the best prompt based on role description, task, query, etc.; to be defined…

    -

    Multi Prompt Design -Based on the previous definition of Prompt, we know that a Prompt consists of three parts: System Prompt, Context Prompt, Customized Prompt. Any changes in the three parts may cause changes in the final output of the LLM.

    -

    For the same type of task, their System Prompt is the same. So, without considering the variations of Customiezd Prompt, it is possible to achieve the assembly differences of different contexts. For example, Prompt A obtains 10 rounds of chat history, while Prompt B uses 5 rounds of chat history, or alternatively, filters and compresses information in chat history.

    -

    To be implemented…

    -

    Component

    -

    Retrieval

    -

    In all Prompts’ Contexts, aside from Chat History session information, information based on external document libraries, code repositories, internet search results is also relied upon. This knowledge system beyond the model parameters can significantly enhance the Agent’s ability to complete complex tasks.

    -

    Thus, in MuAgent, we integrated three ways to retrieve information: Doc, Internet Search, Code Retrieval, and defined an abstract class IMRetrieval, supporting developers to customize their knowledge bases to complete the Agent’s knowledge base registration.

    -

    Doc Retrieval

    -

    Document vector databases are currently the mainstream method for building knowledge bases, using Text Embedding models to vectorize documents and store them in vector databases. In the future, we will also support queries based on knowledge graphs and automatically extract entities and relations through large models to explore the complex relationships in data.

    -

    Code Retrieval

    -

    LLMs face the challenge of lagging training data for code generation, repair, and component understanding tasks, as well as not being able to perceive the context-dependent structure of code. During development, understanding, retrieving and querying metadata from the existing codebase and dependencies can take a considerable amount of time. Hence, we hope to provide an external knowledge system

    -

    Search Retrieval -In addition to the readily available document and code knowledge bases, in daily practice, browsing a large amount of web content to acquire more knowledge helps us understand emerging scenarios, businesses, technologies, and more. Hence, we’ve integrated duckduckgosearch, an open-source search tool, to provide LLMs with content beyond their knowledge reserves.

    -

    Tool

    -

    With OpenAI launching the Function Call feature, which generates parameters for specified tools through LLM and executes the call, machines can better understand and respond to human needs, thus solving practical problems and repetitive work. Nowadays, the ability to learn tools is increasingly becoming a standard feature of open-source models. Therefore, in MuAgent, it also supports agents to complete Tool registration. By using the Python registration template BaseToolModel class and writing related properties and methods such as Tool_name, Tool_description, ToolInputArgs, ToolOutputArgs, and run, tools can be quickly integrated. It also supports the direct use of langchain Tool interfaces. -For example, functions like the above XXRetrieval can also be registered as a Tool, ultimately called by LLM.

    -

    Action

    -

    In the definition of MuAgent, Action is viewed as a specific action or action flow that LLM needs to execute, including LLM information processing, knowledge retrieval, tool invocation, and code execution, etc., constituting a comprehensive and complex dynamic process. For instance, in the React process, we obtained a Tool parameter through LLM, and then “putting the tool parameter into the Tool and executing the call” is an Action, which practically invokes the Tool. Or, we defined an Agent, who orchestrates a fixed agent’s Action steps, with the input parameters of this Agent specially designated by the Action. That is to say, whether the parameters are generated by LLM or set by engineering, as long as it involves a specific execution process, it is an Action.

    -

    Modules

    -
      -
    • connector Mainly introduces the work of this block of the Agent framework
    • -
    • llm_models
    • -
    • retrieval
    • -
    • tools
    • -
    • sandbox
    • -
    • utils
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/muagent/multi-agent-zh/index.html b/docs/muagent/multi-agent-zh/index.html deleted file mode 100644 index e0b1631..0000000 --- a/docs/muagent/multi-agent-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - - - - - - diff --git a/docs/muagent/multi-agent/index.html b/docs/muagent/multi-agent/index.html deleted file mode 100644 index d5cc634..0000000 --- a/docs/muagent/multi-agent/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /muagent/muagent/ - - - - - - diff --git a/docs/muagent/quick-start-zh/index.html b/docs/muagent/quick-start-zh/index.html deleted file mode 100644 index 9e268d9..0000000 --- a/docs/muagent/quick-start-zh/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - - - - - - diff --git a/docs/muagent/quick-start/index.html b/docs/muagent/quick-start/index.html deleted file mode 100644 index 73e39db..0000000 --- a/docs/muagent/quick-start/index.html +++ /dev/null @@ -1,704 +0,0 @@ - - - - - - - - -Quick Start · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Quick Start

    -
    -
    - - -

    Quick Start

    -

    For a complete example, see examples/muagent_examples

    -

    First, prepare the relevant configuration information

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    Then, set up LLM configuration and Embedding model configuration

    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.connector.phase import BasePhase
    -from muagent.connector.schema import Message
    -
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Finally, choose an existing scenario to execute

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# Choose a scenario
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -# round-1 needs to be completed by code interpreter
    -query_content = "Confirm whether employee_data.csv exists locally and view its columns and data types; then draw a bar chart"
    -query = Message(
    -    role_name="human", role_type="user", tools=[], input_query=query_content, 
    -)
    -# phase.pre_print(query)  # This function is used to pre-print the Prompt of the Agents' execution chain
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    -# round-2 requires the execution of a tool
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -query_content = "Please help me check if the server at 127.0.0.1 had any issues at 10 o'clock, help me to determine"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools, input_query=query_content, 
    -)
    -# phase.pre_print(query)  # This function is used to pre-print the Prompt of the Agents' execution chain
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Phase Customization

    -

    Refer to How to Customize Phase

    -

    Introduction and Usage of Scenes

    -

    Below are some specific scene introductions and usages. -We also welcome everyone to brainstorm and construct some interesting cases.

    -

    baseTaskPhase

    -

    Scenarios involving task segmentation and multi-step execution of xAgents

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "baseTaskPhase"
    -phase = BasePhase(
    -phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -
    -
    -# round-1
    -query_content = "Check if employee_data.csv exists locally and see what columns and data types it has; then draw a bar chart"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeReactPhase

    -

    The code interpreter scenario based on React

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# then, create a data analyze phase
    -phase_name = "codeReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -)
    -
    -# round-1
    -query_content = "Check if 'employee_data.csv' exists locally, view its columns and data types; then draw a bar chart"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeToolReactPhase

    -

    The tool invocation and code interpreter scenario based on the React template

    -
    TOOL_SETS = [
    -     "StockName", "StockInfo", 
    -    ]
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "codeToolReactPhase"
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -query_content =  "Query the stock code of Kweichow Moutai and acquire the time series data of the last 10 days up to the current date (December 24th, 2023); then use code to draw a line chart and analyze it"
    -
    -query = Message(role_name="human", role_type="user", input_query=query_content, tools=tools)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    docChatPhase

    -

    Knowledge Base Retrieval and Question-Answering Pipeline

    -
      -
    • example 1
    • -
    -
    # create your knowledge base
    -from muagent.service.kb_api import create_kb, upload_files2kb
    -from muagent.utils.server_utils import run_async
    -from muagent.orm import create_tables
    -
    -
    -# use to test, don't create some directory
    -create_tables()
    -
    -# create a knowledge base
    -kb_name = "example_test"
    -run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH))
    -
    -# add doc to knowledge base
    -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl")
    -files = [file]
    -upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH)
    -
    -
    -## start to chat with knowledge base
    -# log-level, print prompt, and llm predict
    -os.environ["log_verbose"] = "0"
    -
    -## example 1
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH,
    -)
    -
    -# round-1
    -query_content = "What modules does langchain have?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content = "What is the use of prompts?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
      -
    • example 2
    • -
    -
    ## Customized register demo
    -from muagent.tools import DocRetrieval
    -class BaseDocRetrieval(IMRertrieval):
    -
    -    def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH):
    -        self.knowledge_base_name = knowledge_base_name
    -        self.search_top = search_top
    -        self.score_threshold = score_threshold
    -        self.embed_config = embed_config
    -        self.kb_root_path = kb_root_path
    -
    -    def run(self, query: str, search_top=None, score_threshold=None, ):
    -        docs = DocRetrieval.run(
    -            query=query, knowledge_base_name=self.knowledge_base_name,
    -            search_top=search_top or self.search_top,
    -            score_threshold=score_threshold or self.score_threshold,
    -            embed_config=self.embed_config,
    -            kb_root_path=self.kb_root_path
    -        )
    -        return docs
    -
    -
    -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config)
    -
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH,
    -    doc_retrieval=doc_retrieval
    -)
    -
    -# round-1
    -query_content = "What modules does langchain have?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    -# round-2
    -query_content = "What is the use of prompts?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    metagpt_code_devlop

    -

    The code construction Phase in metagpt

    -
    # log level, print prompt, and llm predict
    -os.environ["log_verbose"] = "2"
    -phase_name = "metagpt_code_development"
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -query_content = "create a snake game"
    -query = Message(role_name="human", role_type="user", input_query=query_content)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    searchChatPhase

    -

    Fixed scenario chain, search first then directly answer based on LLM

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# This can be configured when the duckduckgo connection is not available
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5h://127.0.0.1:13659"
    -phase_name = "searchChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -
    -# round-1
    -query_content1 = "Who is the current President of the United States?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content1,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content2 = "Who was the previous president of the United States, and do these two people have any relationship?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content2,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    toolReactPhase

    -

    The tool invocation scene based on the React template

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -phase_name = "toolReactPhase"
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -# round-1
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -query_content = "Please help me check if there were any issues with the server at 127.0.0.1 at 10 o'clock, I need your assistance in determining this."
    -query = Message(
    -    role_name="human", role_type="user", tools=tools, input_query=query_content,
    -)
    -
    -# phase.pre_print(query)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/muagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/muagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" deleted file mode 100644 index 9e268d9..0000000 --- "a/docs/muagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ /dev/null @@ -1,10 +0,0 @@ - - - - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - - - - - - diff --git a/docs/multi-agent/index.html b/docs/multi-agent/index.html deleted file mode 100644 index d3c31b6..0000000 --- a/docs/multi-agent/index.html +++ /dev/null @@ -1,685 +0,0 @@ - - - - - - - - -Multi-Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Multi-Agent

    -
    -
    - - -

    📜 目录

    - -

    简介

    -

    为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。

    -

    但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。

    -

    经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。

    -

    因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。

    -

    本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。

    -
    - 图片 -
    -

    以下模块将从5个方面介绍Multi Agent框架所需要素:

    -
      -
    • Agent Communication在Multi Agent框架中,确保Agent可以有效地进行信息交流对于管理上下文以及提高问答效率至关重要。 -a. 遵循简洁直观易于理解的链式对话原则,将Agent以线性方式排列串连成一个执行链路。 -b. 借鉴metaGPT中的Message Pool框架,允许Agent对Message Pool进行推送和订阅,使链路更加灵活。有利于精细化Prompt工程的场景,但难以把握复杂链路的关系分析。
    • -
    • Standard Operation Process(SOP):对LLM的生成结果进行标准化解析和处理。 -a. 定义Agent的 Input 和 Output 范围,能够组装和解析相关Action和Status,保证框架运行的稳定性 -b. 封装多种基础Action执行模块,如Tool Using、Planning、Coding、Direct Answering、final answer等SOP标识,以满足Agent的基本工作需求。
    • -
    • Plan and Executor:增加LLM的Tool使用、Agent调度、代码的生成。设置了几种基本链路,例如: -a. 单轮问答,也可以扩展到CoT、ToT、GoT等形式。 -b. ReAct,基础的响应决策过程,模型设置SOP 状态以终止循环 -c. TaskPlaning - Executor,任务完成即可结束
    • -
    • Long-short term memory Management:Multi-Agent与单Agent的关键区别在于,Multi-Agent需要处理大量的交流信息,类似人类团队协作的过程。增加一个专门负责内容总结(类似于会议助理)的Agent,对长期记忆进行总结并提更有效信息传递给下一位Agent,而非传递所有内容给下一位Agent。
    • -
    • Human-agent interaction:面对复杂场景时,需要人类介入Agent交互过程并提供反馈。通过上述 Long-short term memory Management 和 Agent Communication 过程,使LLM能准确理解人类的意图,从而更有效地完成任务。
    • -
    -

    总的来说,这五个要素共同构建了一个Multi Agent框架,确保Agent之间的协作更加紧密和高效,同时也能够适应更复杂的任务需求和更多样的交互场景。通过组合多个Agent链路来实现一个完整且复杂的项目上线场景(Dev Phase),如Demand Chain(CEO)、Product Arguement Chain(CPO、CFO、CTO)、Engineer Group Chain(Selector、Developer1~N)、QA Engineer Chain(Developer、Tester)、Deploy Chain(Developer、Deploer)。

    -

    模块介绍

    -

    为了便于大家理解整个Multi-Agent的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建

    -
    - 图片 -
    -


    下面,我们先介绍相关的模块

    -

    Agent

    -

    在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用

    -
      -
    1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出
    2. -
    3. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务
    4. -
    5. ReactAgent:提供标准React的功能,根据问题实现当前任务
    6. -
    7. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答.
    8. -
    -

    输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理

    -

    Chain

    -

    基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理

    -

    Phase

    -

    基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理

    -

    Prompt Manager

    -

    Mutli-Agent链路中每一个agent的prompt创建

    -
      -
    1. 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置
    2. -
    3. 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt -Memory Manager -主要用于 chat history 的管理,暂未完成 -● 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -● 对 chat history 进行关键信息总结 summary context,作为 prompt context -● 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    4. -
    -

    Role Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    role_promptString角色描述
    role_typeStringEnum: assistant
    role_nameString角色名称,用于后续prompt context的组装和筛选
    agent_typeStringEnum:BaseAgent、SelectorAgent、ExecutorAgent、ReactAgent 也可以继承以上几种Agent然后去构造相关的Agent
    focus_agentsList[String]metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name
    focus_message_keysList[String]额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys
    promtp_input_keysList[String]Enum:
    promtp_output_keysList[String]Enum:
    chat_turnint只针对ReactAgent有效
    -

    Chain Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    chain_promptStringchain的描述
    chain_nameString角色名称,用于后续prompt context的组装和筛选
    chain_typeStringEnum:BaseChain 也可以继承以上Chain,构造相关的Chain
    agentsList[String]chain当中存在的agent以及agent的执行顺序
    chat_turnint agent之间的交互轮数
    -

    Phase Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    phase_nameString场景名称
    phase_typeStringEnum:BasePhase 也可以继承以上Phase,自定义构造相关的Phase
    chainsList[String]phase当中存在的chain以及chain的执行顺序
    do_doc_retrievalbool在场景执行开始判断是否需要补充额外信息
    do_code_retrievalbool在场景执行开始判断是否需要补充额外信息
    do_tool_retrievalbool在场景执行开始判断是否需要补充额外信息
    -

    快速使用

    -

    Comming soon

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/scss/base.css b/docs/scss/base.css deleted file mode 100644 index 230c42c..0000000 --- a/docs/scss/base.css +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Docura (https://docura.github.io/) - * Copyright 2022-2023 Dumindu Madunuwan - * Licensed under the MIT License. - */*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)){all:unset;display:revert}*,*::before,*::after{box-sizing:border-box}html{-moz-text-size-adjust:none;-webkit-text-size-adjust:none;text-size-adjust:none}a,button{cursor:revert}ol,ul,menu{list-style:none}img{max-inline-size:100%;max-block-size:100%}table{border-collapse:collapse}input,textarea{-webkit-user-select:auto}textarea{white-space:revert}meter{-webkit-appearance:revert;appearance:revert}:where(pre){all:revert;box-sizing:border-box}::placeholder{color:unset}::marker{content:initial}:where([hidden]){display:none}:where([contenteditable]:not([contenteditable="false"])){-moz-user-modify:read-write;-webkit-user-modify:read-write;overflow-wrap:break-word;-webkit-line-break:after-white-space;-webkit-user-select:auto}:where([draggable="true"]){-webkit-user-drag:element}:where(dialog:modal){all:revert;box-sizing:border-box}pre,code{margin:0}:root{--site-header-height: 46px;--site-footer-height: 46px}@media (min-width: 1025px) and (max-width: 1280px),(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait){:root{--site-header-height: 60px;--site-footer-height: 60px}}@media (min-width: 1281px){:root{--site-header-height: 80px;--site-footer-height: 80px}}body{font-family:var(--font-family);background:var(--background);color:var(--color);display:flex;flex-direction:column;min-height:100svh}#site-header{display:grid;grid-template-columns:2fr 1fr;grid-template-rows:repeat(3, var(--site-header-height))}#site-header-menu,#site-header-search{grid-column:1 / 3}#site-footer{display:grid;grid-template-columns:1fr 1fr;grid-template-rows:repeat(3, var(--site-footer-height))}#site-footer-copyright,#site-footer-love{grid-column:1 / 3}#site-main-content-wrapper{display:flex;flex:1}#sidebar,#toc,#article-nav,#sidebar .btn-close,#toc .btn-close{display:none}main{flex:1;display:flex;overflow:auto}#article{flex:1;width:100vw}#sidebar{width:85%;left:-85%}#toc{width:85%;right:-85%}@media (min-width: 768px) and (max-width: 1023px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:repeat(2, var(--site-header-height))}#site-header-brand{grid-column:1 / 6}#site-header-controls{grid-column:6 / 7}#site-header-menu{grid-column:1 / 5}#site-header-search{grid-column:5 / 7}#site-footer{grid-template-columns:repeat(4, 1fr);grid-template-rows:repeat(2, var(--site-footer-height))}#site-footer-copyright{grid-column:1 / 3}#site-footer-social{grid-column:3 / 4}#site-footer-fund{grid-column:4 / 5}#site-footer-love{grid-column:1 / 5}#sidebar{width:50%;left:-50%}#toc{width:50%;right:-50%}}@media (min-width: 1024px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:var(--site-header-height)}#site-header-brand{grid-column:1 / 2}#site-header-menu{grid-column:2 / 5;grid-row:1}#site-header-search{grid-column:5 / 6;grid-row:1}#site-header-controls{grid-column:6 / 7}#site-footer{grid-template-columns:repeat(5, 1fr);grid-template-rows:var(--site-footer-height)}#site-footer-copyright{grid-column:1 / 3}#site-footer-love{grid-column:3 / 4;grid-row:1}#site-footer-social{grid-column:4 / 5}#site-footer-fund{grid-column:5 / 6}#article-nav-toc-btn{display:none}}@media (min-width: 1024px) and (max-width: 1279px){#sidebar{width:33%;left:-33%}#article{width:75vw}#toc{width:25%;display:flex;flex-direction:column}#toc .sticky{position:fixed;right:0;width:25%}}@media (min-width: 1280px){#sidebar{width:20%;display:flex;flex-direction:column}#article{width:60vw}#toc{width:25%;display:flex;flex-direction:column}#sidebar .sticky{position:fixed;left:0;width:20%}#toc .sticky{position:fixed;right:0;width:20%}}@media (max-width: 1023px){#toc{position:fixed;top:0;height:100%;transition:.3s;z-index:300;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #toc,:root[data-color="night"] #toc{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-toc-on #toc{animation:slide-in-right .3s forwards;display:flex;flex-direction:column;padding-left:16px;z-index:10;cursor:default}.offcanvas-toc-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-toc-on #toc .btn-close{display:block;position:absolute;top:10px;left:10px}#article-nav-toc-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}@media (max-width: 1279px){#sidebar{position:fixed;top:0;height:100%;transition:.3s;z-index:200;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #sidebar,:root[data-color="night"] #sidebar{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-sidebar-on #sidebar{animation:slide-in-left .3s forwards;display:flex;flex-direction:column;z-index:10;cursor:default}.offcanvas-sidebar-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-sidebar-on #sidebar .btn-close{display:block;position:absolute;top:10px;right:10px}#article-nav{display:flex;gap:12px;overflow:auto;justify-content:space-between;height:var(--site-header-height);align-items:center;padding:0 2px}#article-nav-menu-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}body.offcanvas-sidebar-on,body.offcanvas-toc-on{cursor:pointer;overflow:hidden}.offcanvas-sidebar-on:before,.offcanvas-toc-on:before{background:rgba(255,255,255,0.1);backdrop-filter:blur(var(--blur));-webkit-backdrop-filter:blur(var(--blur))}@keyframes slide-in-left{from{transform:translateX(0)}to{transform:translateX(100%)}}@keyframes slide-in-right{from{transform:translateX(0)}to{transform:translateX(-100%)}}#site-header-brand{display:flex;align-items:center;font-family:var(--font-family-brand);font-size:1.4em;color:var(--color2)}#site-header-brand a{padding:12px}#site-header-menu{padding:0 12px;display:flex;align-items:center;color:var(--color3)}#site-header-menu nav{width:100%;overflow:auto}#site-header-menu ul{display:flex;height:100%;align-items:center;gap:12px}#site-header-menu a{display:flex;padding:12px 6px;gap:3px;white-space:nowrap}#site-header-menu a:focus,#site-header-menu a:hover,#site-header-menu a.active{border-bottom:3px solid}#site-header-controls{display:flex;align-items:center;padding-right:12px;justify-content:flex-end;gap:12px}#site-header-search{display:flex;align-items:flex-end}@media (min-width: 768px){#site-header-search{align-items:center}}#site-footer-social{display:flex;gap:12px;justify-content:flex-start;padding-left:12px;align-items:center}#site-footer-fund{display:flex;gap:12px;overflow:auto;justify-content:flex-end;padding-right:12px;align-items:center}#site-footer-copyright,#site-footer-love{display:flex;align-items:center;justify-content:center;color:var(--color3)}#site-footer-copyright a{display:flex;align-items:center}@media (min-width: 768px){#site-footer-copyright{justify-content:flex-start;padding-left:12px}#site-footer-social{justify-content:flex-end;padding-right:12px}}#article{padding:8px 16px}#article-header{font-size:3em;font-weight:400;margin-bottom:1em;color:var(--color2)}#article-content h1,#article-content h2,#article-content h3,#article-content h4,#article-content h5,#article-content h6{line-height:1em;font-weight:400;margin:2.6em 0 .1em;color:var(--color2)}#article-content h1{font-size:1.8em}#article-content h2{font-size:1.5em}#article-content h3{font-size:1.3em}#article-content h4{font-size:1.1em}#article-content .highlight,#article-content blockquote,#article-content dl,#article-content iframe,#article-content ol,#article-content p,#article-content table,#article-content ul{margin-top:1em;line-height:1.8rem;letter-spacing:-.1px}#article-content blockquote p{margin:1em 0}#article-content blockquote dl,#article-content blockquote ol,#article-content blockquote ul{margin:0 1em 1em 1em}#article-content a{color:var(--color-anchor);text-decoration:none}#article-content a:hover{color:var(--color-hover);text-decoration:underline}@media print{#article-content a{color:#355265;text-decoration:underline}#article-content a:after{content:" (" attr(href) ")";font-size:80%}}#article-content strong,#article-content b,#article-content table th{font-weight:600}#article-content em{font-style:italic}#article-content dl,#article-content ol,#article-content ul{margin-left:20px}#article-content dl dl,#article-content dl ol,#article-content dl ul,#article-content ol dl,#article-content ol ol,#article-content ol ul,#article-content ul dl,#article-content ul ol,#article-content ul ul{margin-top:0;margin-bottom:0}#article-content ul{list-style:disc}#article-content ol{list-style:decimal}#article-content dl{list-style:square}#article-content li>ul{list-style:circle}#article-content li>ol{list-style:lower-alpha}#article-content li p{margin:0}#article-content li .highlight,#article-content li blockquote,#article-content li iframe,#article-content li table{margin:1em 0}#article-content img,#article-content video{max-width:100%;border-radius:4px}#article-content blockquote{padding:8px 12px;position:relative;background:var(--background-fg);border-left:4px solid var(--border-color);border-radius:6px}#article-content blockquote footer{margin:1em 0;font-style:italic}#article-content blockquote footer cite:before{content:"—";padding:0 .3em}#article-content blockquote footer cite a{color:var(--border-color)}#article-content code,#article-content pre{font-family:var(--font-family-code)}#article-content h1 code,#article-content h2 code,#article-content h3 code,#article-content h4 code,#article-content h5 code,#article-content h6 code,#article-content p code,#article-content blockquote code,#article-content ul code,#article-content ol code,#article-content dl code,#article-content table code{background:var(--chroma-base00);padding:4px;border-radius:4px;font-size:.9em}#article-content pre:not(.chroma){color:var(--chroma-base05);font-size:.9em;line-height:1.8;letter-spacing:-.1px;background-color:var(--chroma-base00);border-radius:6px;padding:16px 24px;overflow-x:auto;margin-top:1em}#article-content blockquote code{background:var(--background-fg2);opacity:.8}#article-content blockquote .chroma,#article-content blockquote pre:not(.chroma){background:var(--background-fg2);margin-bottom:1em}#article-content blockquote .chroma code,#article-content blockquote pre:not(.chroma) code{padding:0}#article-content table{max-width:100%;border:1px solid var(--border-color)}#article-content table td,#article-content table th{padding:5px 15px}#article-content table tr:nth-child(2n){background:var(--background-fg)}#article-footer{display:grid;grid-template-columns:1fr 1fr;padding-top:20px}#article-last-updated,#article-prev-link,#article-next-link{display:flex;align-items:center;padding:12px 0}#article-last-updated{grid-column:1 / 3;justify-content:center;color:var(--color3)}#article-prev-link,#article-next-link{color:var(--color-anchor)}#article-prev-link:hover,#article-next-link:hover{color:var(--color-hover);font-weight:600;font-size:98%}#article-next-link{justify-content:flex-end}#article-prev-link .icon{padding-right:6px}#article-next-link .icon{padding-left:6px}@media (max-width: 767px){#article-next-link[data-first-page="true"]{grid-column:2/ 3}}@media (min-width: 768px){#article{padding:16px 24px}#article-footer{display:grid;grid-template-columns:repeat(3, 1fr)}#article-prev-link{grid-column:1/ 2;grid-row:1}#article-last-updated{grid-column:2 / 3}#article-next-link{grid-column:3 / 4}}@media (min-width: 1024px){#article{padding:24px 32px}}@media (min-width: 1281px){#article{padding:32px 40px}}@media (min-width: 1920px){#article{padding:40px 48px}#article-content{width:90%}}@media (min-width: 2560px){#article-content{width:85%}}@media (min-width: 3840px){#article-content{width:80%}}#sidebar{padding:40px 0}#sidebar .sticky{display:flex;flex-direction:column;padding:0 20px;overflow:auto}.sidebar-section,.sidebar-link{padding:7px 0}.sidebar-section{margin-top:40px;font-weight:600;color:var(--color2)}#sidebar .sidebar-section:first-child{margin-top:0}.sidebar-link{padding-left:10px;color:var(--color3);border-left:1px solid var(--border-color);margin-left:4px}.sidebar-link::before{content:'';display:inline-block;width:6px;height:6px;background:var(--background);box-shadow:var(--box-shadow);border-radius:50%;position:relative;left:-13.5px;top:-3px}.sidebar-link:hover{color:var(--color-hover);font-weight:600;font-size:98%}.sidebar-link.current{color:var(--color-anchor);font-weight:600;font-size:98%}.sidebar-link.current::before,.sidebar-link:hover::before{background:var(--color-anchor)}#toc{padding-top:40px;padding-bottom:40px}#toc .sticky{overflow:auto}#toc strong{font-weight:600;padding:7px 10px 7px 0;display:flex;gap:3px;position:relative;left:-3px;color:var(--color2)}#toc ul{margin-left:.3em;border-left:1px solid var(--border-color)}#toc ul ul{margin-left:1em}#toc ul a{display:inline-block;padding:7px;color:var(--color3)}#toc ul a.active,#toc ul a:hover{color:var(--color-hover)}#toc ul a::before{content:'';display:inline-block;width:6px;height:6px;background:var(--background);box-shadow:var(--box-shadow);position:relative;left:-10.5px;top:-3px}#toc ul a.active::before,#toc ul a:hover::before{background:var(--color-hover)}.btn-github{display:flex;flex-direction:row;gap:2px;font-size:.7em;font-weight:700;line-height:1.8em;color:#576060;background:#f6f8fa;border:1px solid #d5d7da;border-radius:6px;padding:2px 4px}:root[data-color="dark"] .btn-github,:root[data-color="night"] .btn-github{color:#c9d1d9;background:#21262d;border:1px solid #576060}.btn-github .icon{transform:scale(0.8)}.btn-buymeacoffee{width:86px;height:24px;background-image:url("data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A");border-radius:6px;box-shadow:0px 2px 3px rgba(0,0,0,0.1)}:root[data-color="dark"] .btn-buymeacoffee,:root[data-color="night"] .btn-buymeacoffee{box-shadow:0px 2px 3px rgba(255,255,255,0.1)}.btn-close{background:var(--background-fg);border:1px dotted var(--border-color);border-radius:4px;cursor:pointer}.dropdown{position:relative}.dropdown-btn{display:flex;flex-direction:row;box-shadow:var(--box-shadow);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap}.dropdown-btn .icon-select{opacity:.4}.dropdown-menu{display:none;position:absolute;right:0;top:34px;min-width:100px;max-height:240px;overflow-x:auto;background:var(--background);color:var(--color3);box-shadow:var(--box-shadow2);z-index:1;border-radius:6px;padding:3px}.dropdown-menu.show{display:block}.dropdown-menu button,.dropdown-menu a{width:100%;display:flex;gap:2px;padding:6px;align-items:center;justify-content:center;cursor:pointer}.dropdown-menu button:hover,.dropdown-menu a:hover{background:var(--background-fg)}.chroma{font-size:.9em;color:var(--chroma-base05);background-color:var(--chroma-base00);border-radius:6px;padding:16px 24px;overflow-x:auto}.chroma .x{color:var(--chroma-base05)}.chroma .err{color:var(--chroma-base08)}.chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}.chroma .lntable{border-spacing:0;padding:0;margin:0;border:0;width:auto;overflow:auto;display:block}.chroma .hl{display:block;width:100%;background-color:var(--chroma-base02)}.chroma .lnt{margin-right:0.4em;padding:0 0.4em 0 0.4em}.chroma .ln{margin-right:0.4em;padding:0 0.4em 0 0.4em;border-right:1px solid var(--chroma-base0A)}.chroma .line{display:flex}.chroma .k{color:var(--chroma-base0E)}.chroma .kc{color:var(--chroma-base0E)}.chroma .kd{color:var(--chroma-base0E)}.chroma .kn{color:var(--chroma-base0E)}.chroma .kp{color:var(--chroma-base0D)}.chroma .kr{color:var(--chroma-base0E)}.chroma .kt{color:var(--chroma-base0E)}.chroma .n{color:var(--chroma-base05)}.chroma .na{color:var(--chroma-base05)}.chroma .nb{color:var(--chroma-base0D)}.chroma .bp{color:var(--chroma-base0D)}.chroma .nc{color:var(--chroma-base0A)}.chroma .no{color:var(--chroma-base09)}.chroma .nd{color:var(--chroma-base09)}.chroma .ni{color:var(--chroma-base0A)}.chroma .ne{color:var(--chroma-base0A)}.chroma .nf{color:var(--chroma-base05)}.chroma .fm{color:var(--chroma-base05)}.chroma .nl{color:var(--chroma-base08)}.chroma .nn{color:var(--chroma-base0A)}.chroma .nx{color:var(--chroma-base0D)}.chroma .py{color:var(--chroma-base08)}.chroma .nt{color:var(--chroma-base0D)}.chroma .nv{color:var(--chroma-base0D)}.chroma .vc{color:var(--chroma-base0D)}.chroma .vg{color:var(--chroma-base0D)}.chroma .vi{color:var(--chroma-base08)}.chroma .vm{color:var(--chroma-base0D)}.chroma .l{color:var(--chroma-base0B)}.chroma .ld{color:var(--chroma-base0B)}.chroma .s{color:var(--chroma-base0B)}.chroma .sa{color:var(--chroma-base0B)}.chroma .sb{color:var(--chroma-base0B)}.chroma .sc{color:var(--chroma-base0B)}.chroma .dl{color:var(--chroma-base0F)}.chroma .sd{color:var(--chroma-base03)}.chroma .s2{color:var(--chroma-base0B)}.chroma .se{color:var(--chroma-base0C)}.chroma .sh{color:var(--chroma-base0B)}.chroma .si{color:var(--chroma-base0F)}.chroma .sx{color:var(--chroma-base0B)}.chroma .sr{color:var(--chroma-base0C)}.chroma .s1{color:var(--chroma-base0B)}.chroma .ss{color:var(--chroma-base0B)}.chroma .m{color:var(--chroma-base09)}.chroma .mb{color:var(--chroma-base09)}.chroma .mf{color:var(--chroma-base09)}.chroma .mh{color:var(--chroma-base09)}.chroma .mi{color:var(--chroma-base09)}.chroma .il{color:var(--chroma-base09)}.chroma .mo{color:var(--chroma-base09)}.chroma .o{color:var(--chroma-base05)}.chroma .ow{color:var(--chroma-base05)}.chroma .p{color:var(--chroma-base05)}.chroma .c{color:var(--chroma-base03)}.chroma .ch{color:var(--chroma-base03)}.chroma .cm{color:var(--chroma-base03)}.chroma .c1{color:var(--chroma-base03)}.chroma .cs{color:var(--chroma-base03)}.chroma .cp{color:var(--chroma-base0F)}.chroma .cpf{color:var(--chroma-base0B)}.chroma .g{color:var(--chroma-base05)}.chroma .gd{color:var(--chroma-base08)}.chroma .ge{color:var(--chroma-base05);font-style:italic}.chroma .gr{color:var(--chroma-base05)}.chroma .gh{color:var(--chroma-base0D)}.chroma .gi{color:var(--chroma-base0B)}.chroma .go{color:var(--chroma-base05)}.chroma .gp{color:var(--chroma-base05)}.chroma .gs{color:var(--chroma-base05);font-weight:bold}.chroma .gu{color:var(--chroma-base0D)}.chroma .gt{color:var(--chroma-base05)}.chroma .gl{color:var(--chroma-base05);text-decoration:underline}.chroma .w{color:var(--chroma-base00)}html{font-family:var(--font-family);background:var(--background);color:var(--color);scroll-behavior:smooth;scroll-padding:2em} - -/*# sourceMappingURL=base.css.map */ \ No newline at end of file diff --git a/docs/scss/base.css.map b/docs/scss/base.css.map deleted file mode 100644 index be8928e..0000000 --- a/docs/scss/base.css.map +++ /dev/null @@ -1,35 +0,0 @@ -{ - "version": 3, - "file": "base.css", - "sourceRoot": "D:/project/gitlab/llm/external/ant_group/codefuse-ai.github.io", - "sources": [ - "themes/docura/assets/scss/base.scss", - "themes/docura/assets/scss/reset.scss", - "themes/docura/assets/scss/variables.scss", - "themes/docura/assets/scss/layout.scss", - "themes/docura/assets/scss/component/site-header.scss", - "themes/docura/assets/scss/component/site-footer.scss", - "themes/docura/assets/scss/component/article.scss", - "themes/docura/assets/scss/component/sidebar.scss", - "themes/docura/assets/scss/component/toc.scss", - "themes/docura/assets/scss/component/_button.scss", - "themes/docura/assets/scss/component/_dropdown.scss", - "themes/docura/assets/scss/component/_chroma.scss" - ], - "sourcesContent": [ - "/*!\n * Docura (https://docura.github.io/)\n * Copyright 2022-2023 Dumindu Madunuwan\n * Licensed under the MIT License.\n */\n\n@import \"reset\";\n@import \"variables\";\n@import \"layout\";\n\n@import \"component/site-header\";\n@import \"component/site-footer\";\n@import \"component/article\";\n@import \"component/sidebar\";\n@import \"component/toc\";\n\n@import \"component/button\";\n@import \"component/dropdown\";\n@import \"component/chroma\";\n\nhtml {\n font-family: var(--font-family);\n background: var(--background);\n color: var(--color);\n scroll-behavior: smooth;\n scroll-padding: 2em;\n}\n", - "/* https://github.com/elad2412/the-new-css-reset v1.11 */\n/* custom styles for: pre, code */\n\n*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)) {\n all: unset;\n display: revert;\n}\n\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\n\nhtml {\n -moz-text-size-adjust: none;\n -webkit-text-size-adjust: none;\n text-size-adjust: none;\n}\n\na, button {\n cursor: revert;\n}\n\nol, ul, menu {\n list-style: none;\n}\n\nimg {\n max-inline-size: 100%;\n max-block-size: 100%;\n}\n\ntable {\n border-collapse: collapse;\n}\n\ninput, textarea {\n -webkit-user-select: auto;\n}\n\ntextarea {\n white-space: revert;\n}\n\nmeter {\n -webkit-appearance: revert;\n appearance: revert;\n}\n\n:where(pre) {\n all: revert;\n box-sizing: border-box;\n}\n\n::placeholder {\n color: unset;\n}\n\n::marker {\n content: initial;\n}\n\n:where([hidden]) {\n display: none;\n}\n\n:where([contenteditable]:not([contenteditable=\"false\"])) {\n -moz-user-modify: read-write;\n -webkit-user-modify: read-write;\n overflow-wrap: break-word;\n -webkit-line-break: after-white-space;\n -webkit-user-select: auto;\n}\n\n:where([draggable=\"true\"]) {\n -webkit-user-drag: element;\n}\n\n:where(dialog:modal) {\n all: revert;\n box-sizing: border-box;\n}\n\npre, code {\n margin: 0;\n}", - ":root {\n --site-header-height: 46px;\n --site-footer-height: 46px;\n}\n\n@media (min-width: 1025px) and (max-width: 1280px),\n(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait) {\n :root {\n --site-header-height: 60px;\n --site-footer-height: 60px;\n }\n}\n\n@media (min-width: 1281px) {\n :root {\n --site-header-height: 80px;\n --site-footer-height: 80px;\n }\n}", - "body {\n font-family: var(--font-family);\n background: var(--background);\n color: var(--color);\n display: flex;\n flex-direction: column;\n min-height: 100svh;\n}\n\n#site-header {\n display: grid;\n grid-template-columns: 2fr 1fr;\n grid-template-rows: repeat(3, var(--site-header-height));\n}\n\n#site-header-menu, #site-header-search {\n grid-column: 1 / 3;\n}\n\n#site-footer {\n display: grid;\n grid-template-columns: 1fr 1fr;\n grid-template-rows: repeat(3, var(--site-footer-height));\n}\n\n#site-footer-copyright, #site-footer-love {\n grid-column: 1 / 3;\n}\n\n#site-main-content-wrapper {\n display: flex;\n flex: 1;\n}\n\n#sidebar, #toc, #article-nav, #sidebar .btn-close, #toc .btn-close {\n display: none;\n}\n\nmain {\n flex: 1;\n display: flex;\n overflow: auto;\n}\n\n#article {\n flex: 1;\n width: 100vw;\n}\n\n#sidebar {\n width: 85%;\n left: -85%;\n}\n\n#toc {\n width: 85%;\n right: -85%;\n}\n\n/* Small Tablet */\n@media (min-width: 768px) and (max-width: 1023px) {\n #site-header {\n grid-template-columns: repeat(6, 1fr);\n grid-template-rows: repeat(2, var(--site-header-height));\n }\n\n #site-header-brand {\n grid-column: 1 / 6;\n }\n\n #site-header-controls {\n grid-column: 6 / 7;\n }\n\n #site-header-menu {\n grid-column: 1 / 5;\n }\n\n #site-header-search {\n grid-column: 5 / 7;\n }\n\n #site-footer {\n grid-template-columns: repeat(4, 1fr);\n grid-template-rows: repeat(2, var(--site-footer-height));\n }\n\n #site-footer-copyright {\n grid-column: 1 / 3;\n }\n\n #site-footer-social {\n grid-column: 3 / 4;\n }\n\n #site-footer-fund {\n grid-column: 4 / 5;\n }\n\n #site-footer-love {\n grid-column: 1 / 5;\n }\n\n #sidebar {\n width: 50%;\n left: -50%;\n }\n\n #toc {\n width: 50%;\n right: -50%;\n }\n}\n\n/* From Large Tablet */\n@media (min-width: 1024px) {\n #site-header {\n grid-template-columns: repeat(6, 1fr);\n grid-template-rows: var(--site-header-height);\n }\n\n #site-header-brand {\n grid-column: 1 / 2;\n }\n\n #site-header-menu {\n grid-column: 2 / 5;\n grid-row: 1;\n }\n\n #site-header-search {\n grid-column: 5 / 6;\n grid-row: 1;\n }\n\n #site-header-controls {\n grid-column: 6 / 7;\n }\n\n #site-footer {\n grid-template-columns: repeat(5, 1fr);\n grid-template-rows: var(--site-footer-height);\n }\n\n #site-footer-copyright {\n grid-column: 1 / 3;\n }\n\n #site-footer-love {\n grid-column: 3 / 4;\n grid-row: 1;\n }\n\n #site-footer-social {\n grid-column: 4 / 5;\n }\n\n #site-footer-fund {\n grid-column: 5 / 6;\n }\n\n #article-nav-toc-btn {\n display: none;\n }\n}\n\n/* Large Tablet */\n@media (min-width: 1024px) and (max-width: 1279px) {\n #sidebar {\n width: 33%;\n left: -33%;\n }\n\n #article {\n width: 75vw;\n }\n\n #toc {\n width: 25%;\n display: flex;\n flex-direction: column;\n }\n\n #toc .sticky {\n position: fixed;\n right: 0;\n width: 25%;\n }\n}\n\n/* From Desktop */\n@media (min-width: 1280px) {\n #sidebar {\n width: 20%;\n display: flex;\n flex-direction: column;\n }\n\n #article {\n width: 60vw;\n }\n\n #toc {\n width: 25%;\n display: flex;\n flex-direction: column;\n }\n\n #sidebar .sticky {\n position: fixed;\n left: 0;\n width: 20%;\n }\n\n #toc .sticky {\n position: fixed;\n right: 0;\n width: 20%;\n }\n}\n\n/* Upto Large Tablet */\n@media (max-width: 1023px) {\n #toc {\n position: fixed;\n top: 0;\n height: 100%;\n transition: .3s;\n z-index: 300;\n overflow-x: auto;\n background: var(--background);\n box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);\n }\n\n :root[data-color=\"dark\"] #toc, :root[data-color=\"night\"] #toc {\n box-shadow: 0 4px 30px rgba(255, 255, 255, 0.1);\n }\n\n .offcanvas-toc-on #toc {\n animation: slide-in-right .3s forwards;\n display: flex;\n flex-direction: column;\n padding-left: 16px;\n z-index: 10;\n cursor: default;\n }\n\n .offcanvas-toc-on:before {\n content: \"\";\n position: fixed;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n z-index: 5;\n }\n\n .offcanvas-toc-on #toc .btn-close {\n display: block;\n position: absolute;\n top: 10px;\n left: 10px;\n }\n\n #article-nav-toc-btn {\n display: flex;\n box-shadow: var(--box-shadow2);\n border-radius: 6px;\n padding: 6px;\n cursor: pointer;\n white-space: nowrap;\n gap: 6px;\n color: var(--color2);\n }\n}\n\n/* Upto Desktop */\n@media (max-width: 1279px) {\n #sidebar {\n position: fixed;\n top: 0;\n height: 100%;\n transition: .3s;\n z-index: 200;\n overflow-x: auto;\n background: var(--background);\n box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);\n }\n\n :root[data-color=\"dark\"] #sidebar, :root[data-color=\"night\"] #sidebar {\n box-shadow: 0 4px 30px rgba(255, 255, 255, 0.1);\n }\n\n .offcanvas-sidebar-on #sidebar {\n animation: slide-in-left .3s forwards;\n display: flex;\n flex-direction: column;\n z-index: 10;\n cursor: default;\n }\n\n .offcanvas-sidebar-on:before {\n content: \"\";\n position: fixed;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n z-index: 5;\n }\n\n .offcanvas-sidebar-on #sidebar .btn-close {\n display: block;\n position: absolute;\n top: 10px;\n right: 10px;\n }\n\n #article-nav {\n display: flex;\n gap: 12px;\n overflow: auto;\n justify-content: space-between;\n height: var(--site-header-height);\n align-items: center;\n padding: 0 2px;\n }\n\n #article-nav-menu-btn {\n display: flex;\n box-shadow: var(--box-shadow2);\n border-radius: 6px;\n padding: 6px;\n cursor: pointer;\n white-space: nowrap;\n gap: 6px;\n color: var(--color2);\n }\n}\n\nbody.offcanvas-sidebar-on, body.offcanvas-toc-on {\n cursor: pointer;\n overflow: hidden;\n}\n\n.offcanvas-sidebar-on:before, .offcanvas-toc-on:before {\n background: rgba(255, 255, 255, 0.1);\n backdrop-filter: blur(var(--blur));\n -webkit-backdrop-filter: blur(var(--blur));\n}\n\n@keyframes slide-in-left {\n from {\n transform: translateX(0);\n }\n to {\n transform: translateX(100%);\n }\n}\n\n@keyframes slide-in-right {\n from {\n transform: translateX(0);\n }\n to {\n transform: translateX(-100%);\n }\n}", - "#site-header-brand {\n display: flex;\n align-items: center;\n font-family: var(--font-family-brand);\n font-size: 1.4em;\n color: var(--color2);\n}\n\n#site-header-brand a {\n padding: 12px;\n}\n\n#site-header-menu {\n padding: 0 12px;\n display: flex;\n align-items: center;\n color: var(--color3);\n}\n\n#site-header-menu nav {\n width: 100%;\n overflow: auto;\n}\n\n#site-header-menu ul {\n display: flex;\n height: 100%;\n align-items: center;\n gap: 12px;\n}\n\n#site-header-menu a {\n display: flex;\n padding: 12px 6px;\n gap: 3px;\n white-space: nowrap;\n}\n\n#site-header-menu a:focus, #site-header-menu a:hover, #site-header-menu a.active {\n border-bottom: 3px solid;\n}\n\n#site-header-controls {\n display: flex;\n align-items: center;\n padding-right: 12px;\n justify-content: flex-end;\n gap: 12px\n}\n\n#site-header-search {\n display: flex;\n align-items: flex-end;\n}\n\n/* From Small Tablet */\n@media (min-width: 768px) {\n #site-header-search {\n align-items: center;\n }\n}", - "#site-footer-social {\n display: flex;\n gap: 12px;\n justify-content: flex-start;\n padding-left: 12px;\n align-items: center;\n}\n\n#site-footer-fund {\n display: flex;\n gap: 12px;\n overflow: auto;\n justify-content: flex-end;\n padding-right: 12px;\n align-items: center;\n}\n\n#site-footer-copyright, #site-footer-love {\n display: flex;\n align-items: center;\n justify-content: center;\n color: var(--color3)\n}\n\n#site-footer-copyright a {\n display: flex;\n align-items: center;\n}\n\n/* From Small Tablet */\n@media (min-width: 768px) {\n #site-footer-copyright {\n justify-content: flex-start;\n padding-left: 12px;\n }\n\n #site-footer-social {\n justify-content: flex-end;\n padding-right: 12px;\n }\n}\n", - "#article {\n padding: 8px 16px;\n}\n\n#article-header {\n font-size: 3em;\n font-weight: 400;\n margin-bottom: 1em;\n color: var(--color2)\n}\n\n#article-content h1,\n#article-content h2,\n#article-content h3,\n#article-content h4,\n#article-content h5,\n#article-content h6 {\n line-height: 1em;\n font-weight: 400;\n margin: 2.6em 0 .1em;\n color: var(--color2)\n}\n\n#article-content h1 {\n font-size: 1.8em\n}\n\n#article-content h2 {\n font-size: 1.5em\n}\n\n#article-content h3 {\n font-size: 1.3em\n}\n\n#article-content h4 {\n font-size: 1.1em\n}\n\n#article-content .highlight,\n#article-content blockquote,\n#article-content dl,\n#article-content iframe,\n#article-content ol,\n#article-content p,\n#article-content table,\n#article-content ul {\n margin-top: 1em;\n line-height: 1.8rem;\n letter-spacing: -.1px;\n}\n\n#article-content blockquote p {\n margin: 1em 0\n}\n\n#article-content blockquote dl,\n#article-content blockquote ol,\n#article-content blockquote ul {\n margin: 0 1em 1em 1em\n}\n\n#article-content a {\n color: var(--color-anchor);\n text-decoration: none\n}\n\n#article-content a:hover {\n color: var(--color-hover);\n text-decoration: underline\n}\n\n@media print {\n #article-content a {\n color: #355265;\n text-decoration: underline\n }\n\n #article-content a:after {\n content: \" (\" attr(href) \")\";\n font-size: 80%\n }\n}\n\n#article-content strong, #article-content b, #article-content table th {\n font-weight: 600\n}\n\n#article-content em {\n font-style: italic\n}\n\n#article-content dl,\n#article-content ol,\n#article-content ul {\n margin-left: 20px\n}\n\n#article-content dl dl,\n#article-content dl ol,\n#article-content dl ul,\n#article-content ol dl,\n#article-content ol ol,\n#article-content ol ul,\n#article-content ul dl,\n#article-content ul ol,\n#article-content ul ul {\n margin-top: 0;\n margin-bottom: 0\n}\n\n#article-content ul {\n list-style: disc\n}\n\n#article-content ol {\n list-style: decimal\n}\n\n#article-content dl {\n list-style: square\n}\n\n#article-content li > ul {\n list-style: circle\n}\n\n#article-content li > ol {\n list-style: lower-alpha\n}\n\n#article-content li p {\n margin: 0\n}\n\n#article-content li .highlight,\n#article-content li blockquote,\n#article-content li iframe,\n#article-content li table {\n margin: 1em 0\n}\n\n#article-content img,\n#article-content video {\n max-width: 100%;\n border-radius: 4px\n}\n\n#article-content blockquote {\n padding: 8px 12px;\n position: relative;\n background: var(--background-fg);\n border-left: 4px solid var(--border-color);\n border-radius: 6px;\n}\n\n#article-content blockquote footer {\n margin: 1em 0;\n font-style: italic\n}\n\n#article-content blockquote footer cite:before {\n content: \"—\";\n padding: 0 .3em\n}\n\n#article-content blockquote footer cite a {\n color: var(--border-color);\n}\n\n#article-content code, #article-content pre {\n font-family: var(--font-family-code);\n}\n\n#article-content h1 code,\n#article-content h2 code,\n#article-content h3 code,\n#article-content h4 code,\n#article-content h5 code,\n#article-content h6 code,\n#article-content p code,\n#article-content blockquote code,\n#article-content ul code,\n#article-content ol code,\n#article-content dl code,\n#article-content table code {\n background: var(--chroma-base00);\n padding: 4px;\n border-radius: 4px;\n font-size: .9em;\n}\n\n#article-content pre:not(.chroma) {\n color: var(--chroma-base05);\n font-size: .9em;\n line-height: 1.8;\n letter-spacing: -.1px;\n background-color: var(--chroma-base00);\n border-radius: 6px;\n padding: 16px 24px;\n overflow-x: auto;\n margin-top: 1em;\n}\n\n#article-content blockquote code {\n background: var(--background-fg2);\n opacity: .8;\n}\n\n#article-content blockquote .chroma, #article-content blockquote pre:not(.chroma) {\n background: var(--background-fg2);\n margin-bottom: 1em;\n}\n\n#article-content blockquote .chroma code, #article-content blockquote pre:not(.chroma) code {\n padding: 0;\n}\n\n#article-content table {\n max-width: 100%;\n border: 1px solid var(--border-color)\n}\n\n#article-content table td,\n#article-content table th {\n padding: 5px 15px\n}\n\n#article-content table tr:nth-child(2n) {\n background: var(--background-fg)\n}\n\n#article-footer {\n display: grid;\n grid-template-columns: 1fr 1fr;\n padding-top: 20px;\n}\n\n#article-last-updated, #article-prev-link, #article-next-link {\n display: flex;\n align-items: center;\n padding: 12px 0;\n}\n\n#article-last-updated {\n grid-column: 1 / 3;\n justify-content: center;\n color: var(--color3);\n}\n\n#article-prev-link, #article-next-link {\n color: var(--color-anchor);\n}\n\n#article-prev-link:hover, #article-next-link:hover {\n color: var(--color-hover);\n font-weight: 600;\n font-size: 98%;\n}\n\n#article-next-link {\n justify-content: flex-end;\n}\n\n#article-prev-link .icon {\n padding-right: 6px;\n}\n\n#article-next-link .icon {\n padding-left: 6px;\n}\n\n@media (max-width: 767px) {\n #article-next-link[data-first-page=\"true\"] {\n grid-column: 2/ 3;\n }\n}\n\n@media (min-width: 768px) {\n #article {\n padding: 16px 24px;\n }\n\n #article-footer {\n display: grid;\n grid-template-columns: repeat(3, 1fr);\n }\n\n #article-prev-link {\n grid-column: 1/ 2;\n grid-row: 1;\n }\n\n #article-last-updated {\n grid-column: 2 / 3;\n }\n\n #article-next-link {\n grid-column: 3 / 4;\n }\n}\n\n@media (min-width: 1024px) {\n #article {\n padding: 24px 32px;\n }\n}\n\n@media (min-width: 1281px) {\n #article {\n padding: 32px 40px;\n }\n}\n\n@media (min-width: 1920px) {\n #article {\n padding: 40px 48px;\n }\n\n #article-content {\n width: 90%;\n }\n}\n\n@media (min-width: 2560px) {\n #article-content {\n width: 85%;\n }\n}\n\n@media (min-width: 3840px) {\n #article-content {\n width: 80%;\n }\n}\n", - "#sidebar {\n padding: 40px 0;\n}\n\n#sidebar .sticky {\n display: flex;\n flex-direction: column;\n padding: 0 20px;\n overflow: auto;\n}\n\n.sidebar-section, .sidebar-link {\n padding: 7px 0;\n}\n\n.sidebar-section {\n margin-top: 40px;\n font-weight: 600;\n color: var(--color2)\n}\n\n#sidebar .sidebar-section:first-child {\n margin-top: 0;\n}\n\n.sidebar-link {\n padding-left: 10px;\n color: var(--color3);\n border-left: 1px solid var(--border-color);\n margin-left: 4px;\n}\n\n.sidebar-link::before {\n content: '';\n display: inline-block;\n width: 6px;\n height: 6px;\n background: var(--background);\n box-shadow: var(--box-shadow);\n border-radius: 50%;\n position: relative;\n left: -13.5px;\n top: -3px;\n}\n\n.sidebar-link:hover {\n color: var(--color-hover);\n font-weight: 600;\n font-size: 98%;\n}\n\n.sidebar-link.current {\n color: var(--color-anchor);\n font-weight: 600;\n font-size: 98%;\n}\n\n.sidebar-link.current::before, .sidebar-link:hover::before {\n background: var(--color-anchor);\n}\n", - "#toc {\n padding-top: 40px;\n padding-bottom: 40px;\n}\n\n#toc .sticky{\n overflow: auto;\n}\n\n#toc strong {\n font-weight: 600;\n padding: 7px 10px 7px 0;\n display: flex;\n gap: 3px;\n position: relative;\n left: -3px;\n color: var(--color2)\n}\n\n#toc ul {\n margin-left: .3em;\n border-left: 1px solid var(--border-color);\n}\n\n#toc ul ul {\n margin-left: 1em;\n}\n\n#toc ul a {\n display: inline-block;\n padding: 7px;\n color: var(--color3);\n}\n\n#toc ul a.active, #toc ul a:hover {\n color: var(--color-hover);\n}\n\n#toc ul a::before {\n content: '';\n display: inline-block;\n width: 6px;\n height: 6px;\n background: var(--background);\n box-shadow: var(--box-shadow);\n position: relative;\n left: -10.5px;\n top: -3px;\n}\n\n#toc ul a.active::before, #toc ul a:hover::before {\n background: var(--color-hover);\n}\n\n", - ".btn-github {\n display: flex;\n flex-direction: row;\n gap: 2px;\n font-size: .7em; /*11 px*/\n font-weight: 700;\n line-height: 1.8em;\n color: #576060;\n background: #f6f8fa;\n border: 1px solid #d5d7da;\n border-radius: 6px;\n padding: 2px 4px;\n}\n\n:root[data-color=\"dark\"] .btn-github, :root[data-color=\"night\"] .btn-github {\n color: #c9d1d9;\n background: #21262d;\n border: 1px solid #576060;\n}\n\n.btn-github .icon {\n transform: scale(.8); /* 18px */\n}\n\n.btn-buymeacoffee {\n width: 86px;\n height: 24px;\n background-image: url(\"data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A\");\n border-radius: 6px;\n box-shadow: 0px 2px 3px rgba(0, 0, 0, 0.1);\n}\n\n:root[data-color=\"dark\"] .btn-buymeacoffee, :root[data-color=\"night\"] .btn-buymeacoffee {\n box-shadow: 0px 2px 3px rgba(255, 255, 255, 0.1);\n}\n\n.btn-close {\n background: var(--background-fg);\n border: 1px dotted var(--border-color);\n border-radius: 4px;\n cursor: pointer;\n}\n", - ".dropdown {\n position: relative;\n}\n\n.dropdown-btn {\n display: flex;\n flex-direction: row;\n box-shadow: var(--box-shadow);\n border-radius: 6px;\n padding: 6px;\n cursor: pointer;\n white-space: nowrap;\n}\n\n.dropdown-btn .icon-select {\n opacity: .4;\n}\n\n.dropdown-menu {\n display: none;\n position: absolute;\n right: 0;\n top: 34px;\n min-width: 100px;\n max-height: 240px;\n overflow-x: auto;\n background: var(--background);\n color: var(--color3);\n box-shadow: var(--box-shadow2);\n z-index: 1;\n border-radius: 6px;\n padding: 3px;\n}\n\n.dropdown-menu.show {\n display: block;\n}\n\n.dropdown-menu button, .dropdown-menu a {\n width: 100%;\n display: flex;\n gap: 2px;\n padding: 6px;\n align-items: center;\n justify-content: center;\n cursor: pointer;\n}\n\n.dropdown-menu button:hover, .dropdown-menu a:hover {\n background: var(--background-fg);\n}\n", - "/* Background */ .chroma { font-size: .9em; color: var(--chroma-base05); background-color: var(--chroma-base00); border-radius: 6px; padding: 16px 24px; overflow-x: auto; }\n/* Other */ .chroma .x { color: var(--chroma-base05) }\n/* Error */ .chroma .err { color: var(--chroma-base08) }\n/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }\n/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; width: auto; overflow: auto; display: block; }\n/* LineHighlight */ .chroma .hl { display: block; width: 100%; background-color: var(--chroma-base02) }\n/* LineNumbersTable */ .chroma .lnt { margin-right: 0.4em; padding: 0 0.4em 0 0.4em; }\n/* LineNumbers */ .chroma .ln { margin-right: 0.4em; padding: 0 0.4em 0 0.4em; border-right: 1px solid var(--chroma-base0A); }\n/* Line */ .chroma .line { display: flex; }\n/* Keyword */ .chroma .k { color: var(--chroma-base0E) }\n/* KeywordConstant */ .chroma .kc { color: var(--chroma-base0E) }\n/* KeywordDeclaration */ .chroma .kd { color: var(--chroma-base0E) }\n/* KeywordNamespace */ .chroma .kn { color: var(--chroma-base0E) }\n/* KeywordPseudo */ .chroma .kp { color: var(--chroma-base0D) }\n/* KeywordReserved */ .chroma .kr { color: var(--chroma-base0E) }\n/* KeywordType */ .chroma .kt { color: var(--chroma-base0E) }\n/* Name */ .chroma .n { color: var(--chroma-base05) }\n/* NameAttribute */ .chroma .na { color: var(--chroma-base05) }\n/* NameBuiltin */ .chroma .nb { color: var(--chroma-base0D) }\n/* NameBuiltinPseudo */ .chroma .bp { color: var(--chroma-base0D) }\n/* NameClass */ .chroma .nc { color: var(--chroma-base0A) }\n/* NameConstant */ .chroma .no { color: var(--chroma-base09) }\n/* NameDecorator */ .chroma .nd { color: var(--chroma-base09) }\n/* NameEntity */ .chroma .ni { color: var(--chroma-base0A) }\n/* NameException */ .chroma .ne { color: var(--chroma-base0A) }\n/* NameFunction */ .chroma .nf { color: var(--chroma-base05) }\n/* NameFunctionMagic */ .chroma .fm { color: var(--chroma-base05) }\n/* NameLabel */ .chroma .nl { color: var(--chroma-base08) }\n/* NameNamespace */ .chroma .nn { color: var(--chroma-base0A) }\n/* NameOther */ .chroma .nx { color: var(--chroma-base0D) }\n/* NameProperty */ .chroma .py { color: var(--chroma-base08) }\n/* NameTag */ .chroma .nt { color: var(--chroma-base0D) }\n/* NameVariable */ .chroma .nv { color: var(--chroma-base0D) }\n/* NameVariableClass */ .chroma .vc { color: var(--chroma-base0D) }\n/* NameVariableGlobal */ .chroma .vg { color: var(--chroma-base0D) }\n/* NameVariableInstance */ .chroma .vi { color: var(--chroma-base08) }\n/* NameVariableMagic */ .chroma .vm { color: var(--chroma-base0D) }\n/* Literal */ .chroma .l { color: var(--chroma-base0B) }\n/* LiteralDate */ .chroma .ld { color: var(--chroma-base0B) }\n/* LiteralString */ .chroma .s { color: var(--chroma-base0B) }\n/* LiteralStringAffix */ .chroma .sa { color: var(--chroma-base0B) }\n/* LiteralStringBacktick */ .chroma .sb { color: var(--chroma-base0B) }\n/* LiteralStringChar */ .chroma .sc { color: var(--chroma-base0B) }\n/* LiteralStringDelimiter */ .chroma .dl { color: var(--chroma-base0F) }\n/* LiteralStringDoc */ .chroma .sd { color: var(--chroma-base03) }\n/* LiteralStringDouble */ .chroma .s2 { color: var(--chroma-base0B) }\n/* LiteralStringEscape */ .chroma .se { color: var(--chroma-base0C) }\n/* LiteralStringHeredoc */ .chroma .sh { color: var(--chroma-base0B) }\n/* LiteralStringInterpol */ .chroma .si { color: var(--chroma-base0F) }\n/* LiteralStringOther */ .chroma .sx { color: var(--chroma-base0B) }\n/* LiteralStringRegex */ .chroma .sr { color: var(--chroma-base0C) }\n/* LiteralStringSingle */ .chroma .s1 { color: var(--chroma-base0B) }\n/* LiteralStringSymbol */ .chroma .ss { color: var(--chroma-base0B) }\n/* LiteralNumber */ .chroma .m { color: var(--chroma-base09) }\n/* LiteralNumberBin */ .chroma .mb { color: var(--chroma-base09) }\n/* LiteralNumberFloat */ .chroma .mf { color: var(--chroma-base09) }\n/* LiteralNumberHex */ .chroma .mh { color: var(--chroma-base09) }\n/* LiteralNumberInteger */ .chroma .mi { color: var(--chroma-base09) }\n/* LiteralNumberIntegerLong */ .chroma .il { color: var(--chroma-base09) }\n/* LiteralNumberOct */ .chroma .mo { color: var(--chroma-base09) }\n/* Operator */ .chroma .o { color: var(--chroma-base05) }\n/* OperatorWord */ .chroma .ow { color: var(--chroma-base05) }\n/* Punctuation */ .chroma .p { color: var(--chroma-base05) }\n/* Comment */ .chroma .c { color: var(--chroma-base03) }\n/* CommentHashbang */ .chroma .ch { color: var(--chroma-base03) }\n/* CommentMultiline */ .chroma .cm { color: var(--chroma-base03) }\n/* CommentSingle */ .chroma .c1 { color: var(--chroma-base03) }\n/* CommentSpecial */ .chroma .cs { color: var(--chroma-base03) }\n/* CommentPreproc */ .chroma .cp { color: var(--chroma-base0F) }\n/* CommentPreprocFile */ .chroma .cpf { color: var(--chroma-base0B) }\n/* Generic */ .chroma .g { color: var(--chroma-base05) }\n/* GenericDeleted */ .chroma .gd { color: var(--chroma-base08) }\n/* GenericEmph */ .chroma .ge { color: var(--chroma-base05); font-style: italic }\n/* GenericError */ .chroma .gr { color: var(--chroma-base05) }\n/* GenericHeading */ .chroma .gh { color: var(--chroma-base0D) }\n/* GenericInserted */ .chroma .gi { color: var(--chroma-base0B) }\n/* GenericOutput */ .chroma .go { color: var(--chroma-base05) }\n/* GenericPrompt */ .chroma .gp { color: var(--chroma-base05) }\n/* GenericStrong */ .chroma .gs { color: var(--chroma-base05); font-weight: bold }\n/* GenericSubheading */ .chroma .gu { color: var(--chroma-base0D) }\n/* GenericTraceback */ .chroma .gt { color: var(--chroma-base05) }\n/* GenericUnderline */ .chroma .gl { color: var(--chroma-base05); text-decoration: underline }\n/* TextWhitespace */ .chroma .w { color: var(--chroma-base00); }" - ], - "names": [], - "mappings": "AAAA;;;;GAIG,ACDH,AAAA,CAAC,CAAC,KAAM,CAAA,IAAI,AAAA,CAAC,AAAA,uDAAuD,AAAA,CAAC,AAAA,IAAI,AAAA,CAAC,AAAA,eAAe,AAAA,CAAC,CAAE,CAC1F,GAAG,CAAE,KAAK,CACV,OAAO,CAAE,MAAM,CAChB,AAED,AAAA,CAAC,CACD,CAAC,EAAE,MAAM,CACT,CAAC,EAAE,KAAK,AAAC,CACP,UAAU,CAAE,UAAU,CACvB,AAED,AAAA,IAAI,AAAC,CACH,qBAAqB,CAAE,IAAI,CAC3B,wBAAwB,CAAE,IAAI,CAC9B,gBAAgB,CAAE,IAAI,CACvB,AAED,AAAA,CAAC,CAAE,MAAM,AAAC,CACR,MAAM,CAAE,MAAM,CACf,AAED,AAAA,EAAE,CAAE,EAAE,CAAE,IAAI,AAAC,CACX,UAAU,CAAE,IAAI,CACjB,AAED,AAAA,GAAG,AAAC,CACF,eAAe,CAAE,IAAI,CACrB,cAAc,CAAE,IAAI,CACrB,AAED,AAAA,KAAK,AAAC,CACJ,eAAe,CAAE,QAAQ,CAC1B,AAED,AAAA,KAAK,CAAE,QAAQ,AAAC,CACd,mBAAmB,CAAE,IAAI,CAC1B,AAED,AAAA,QAAQ,AAAC,CACP,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,KAAK,AAAC,CACJ,kBAAkB,CAAE,MAAM,CAC1B,UAAU,CAAE,MAAM,CACnB,CAEA,AAAD,KAAO,CAAA,GAAG,CAAE,CACV,GAAG,CAAE,MAAM,CACX,UAAU,CAAE,UAAU,CACvB,EAEC,AAAF,WAAa,AAAC,CACZ,KAAK,CAAE,KAAK,CACb,EAEC,AAAF,MAAQ,AAAC,CACP,OAAO,CAAE,OAAO,CACjB,CAEA,AAAD,KAAO,CAAA,CAAC,AAAA,MAAM,AAAA,CAAC,CAAE,CACf,OAAO,CAAE,IAAI,CACd,CAEA,AAAD,KAAO,CAAA,CAAC,AAAA,eAAe,AAAA,CAAC,AAAA,IAAI,AAAA,CAAC,AAAA,CAAC,AAAA,gBAAgB,AAAA,OAAO,AAAA,CAAC,AAAA,CAAC,CAAE,CACvD,gBAAgB,CAAE,UAAU,CAC5B,mBAAmB,CAAE,UAAU,CAC/B,aAAa,CAAE,UAAU,CACzB,kBAAkB,CAAE,iBAAiB,CACrC,mBAAmB,CAAE,IAAI,CAC1B,CAEA,AAAD,KAAO,CAAA,CAAC,AAAA,UAAU,AAAA,MAAM,AAAA,CAAC,CAAE,CACzB,iBAAiB,CAAE,OAAO,CAC3B,CAEA,AAAD,KAAO,CAAA,YAAY,CAAE,CACnB,GAAG,CAAE,MAAM,CACX,UAAU,CAAE,UAAU,CACvB,AAED,AAAA,GAAG,CAAE,IAAI,AAAC,CACR,MAAM,CAAE,CAAC,CACV,CCtFA,AAAD,IAAK,AAAC,CACJ,oBAAoB,CAAA,KAAC,CACrB,oBAAoB,CAAA,KAAC,CACtB,AAED,MAAM,oHAEJ,EAAC,AAAD,IAAK,AAAC,CACJ,oBAAoB,CAAA,KAAC,CACrB,oBAAoB,CAAA,KAAC,CACtB,CAAA,AAGH,MAAM,oBACJ,EAAC,AAAD,IAAK,AAAC,CACJ,oBAAoB,CAAA,KAAC,CACrB,oBAAoB,CAAA,KAAC,CACtB,CAAA,ACjBH,AAAA,IAAI,AAAC,CACH,WAAW,CAAE,kBAAkB,CAC/B,UAAU,CAAE,iBAAiB,CAC7B,KAAK,CAAE,YAAY,CACnB,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,UAAU,CAAE,MAAM,CACnB,AAED,AAAA,YAAY,AAAC,CACX,OAAO,CAAE,IAAI,CACb,qBAAqB,CAAE,OAAO,CAC9B,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,iBAAiB,CAAE,mBAAmB,AAAC,CACrC,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,YAAY,AAAC,CACX,OAAO,CAAE,IAAI,CACb,qBAAqB,CAAE,OAAO,CAC9B,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,sBAAsB,CAAE,iBAAiB,AAAC,CACxC,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,0BAA0B,AAAC,CACzB,OAAO,CAAE,IAAI,CACb,IAAI,CAAE,CAAC,CACR,AAED,AAAA,QAAQ,CAAE,IAAI,CAAE,YAAY,CAAE,QAAQ,CAAC,UAAU,CAAE,IAAI,CAAC,UAAU,AAAC,CACjE,OAAO,CAAE,IAAI,CACd,AAED,AAAA,IAAI,AAAC,CACH,IAAI,CAAE,CAAC,CACP,OAAO,CAAE,IAAI,CACb,QAAQ,CAAE,IAAI,CACf,AAED,AAAA,QAAQ,AAAC,CACP,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,KAAK,CACb,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,IAAI,CACX,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,KAAK,CAAE,IAAI,CACZ,AAGD,MAAM,2CACJ,CAAA,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,kBAAkB,AAAC,CACjB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,sBAAsB,AAAC,CACrB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,IAAI,CACX,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,KAAK,CAAE,IAAI,CACZ,CA/CA,AAmDH,MAAM,oBACJ,CAAA,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,yBAAyB,CAC9C,AAED,AAAA,kBAAkB,AAAC,CACjB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CAClB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CAClB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,yBAAyB,CAC9C,AAED,AAAA,sBAAsB,AAAC,CACrB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CAClB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,oBAAoB,AAAC,CACnB,OAAO,CAAE,IAAI,CACd,CA5CA,AAgDH,MAAM,4CACJ,CAAA,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,IAAI,CACX,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACvB,AAED,AAAA,IAAI,CAAC,OAAO,AAAC,CACX,QAAQ,CAAE,KAAK,CACf,KAAK,CAAE,CAAC,CACR,KAAK,CAAE,GAAG,CACX,CAhBA,AAoBH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACvB,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACvB,AAED,AAAA,QAAQ,CAAC,OAAO,AAAC,CACf,QAAQ,CAAE,KAAK,CACf,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,GAAG,CACX,AAED,AAAA,IAAI,CAAC,OAAO,AAAC,CACX,QAAQ,CAAE,KAAK,CACf,KAAK,CAAE,CAAC,CACR,KAAK,CAAE,GAAG,CACX,CAtBA,AA0BH,MAAM,oBACJ,CAAA,AAAA,IAAI,AAAC,CACH,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,GAAG,CACf,OAAO,CAAE,GAAG,CACZ,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,iBAAiB,CAC7B,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,eAAkB,CAC1C,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,IAAI,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,IAAI,AAAC,CAC5D,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,qBAAwB,CAChD,AAED,AAAA,iBAAiB,CAAC,IAAI,AAAC,CACrB,SAAS,CAAE,2BAA2B,CACtC,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,YAAY,CAAE,IAAI,CAClB,OAAO,CAAE,EAAE,CACX,MAAM,CAAE,OAAO,CAChB,AAED,AAAA,iBAAiB,CAAC,MAAM,AAAC,CACvB,OAAO,CAAE,EAAE,CACX,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,CAAC,CACX,AAED,AAAA,iBAAiB,CAAC,IAAI,CAAC,UAAU,AAAC,CAChC,OAAO,CAAE,KAAK,CACd,QAAQ,CAAE,QAAQ,CAClB,GAAG,CAAE,IAAI,CACT,IAAI,CAAE,IAAI,CACX,AAED,AAAA,oBAAoB,AAAC,CACnB,OAAO,CAAE,IAAI,CACb,UAAU,CAAE,kBAAkB,CAC9B,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACnB,GAAG,CAAE,GAAG,CACR,KAAK,CAAE,aAAa,CACrB,CAzCA,AA6CH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,GAAG,CACf,OAAO,CAAE,GAAG,CACZ,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,iBAAiB,CAC7B,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,eAAkB,CAC1C,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,QAAQ,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,QAAQ,AAAC,CACpE,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,qBAAwB,CAChD,AAED,AAAA,qBAAqB,CAAC,QAAQ,AAAC,CAC7B,SAAS,CAAE,0BAA0B,CACrC,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,OAAO,CAAE,EAAE,CACX,MAAM,CAAE,OAAO,CAChB,AAED,AAAA,qBAAqB,CAAC,MAAM,AAAC,CAC3B,OAAO,CAAE,EAAE,CACX,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,CAAC,CACX,AAED,AAAA,qBAAqB,CAAC,QAAQ,CAAC,UAAU,AAAC,CACxC,OAAO,CAAE,KAAK,CACd,QAAQ,CAAE,QAAQ,CAClB,GAAG,CAAE,IAAI,CACT,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,YAAY,AAAC,CACX,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,QAAQ,CAAE,IAAI,CACd,eAAe,CAAE,aAAa,CAC9B,MAAM,CAAE,yBAAyB,CACjC,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,KAAK,CACf,AAED,AAAA,qBAAqB,AAAC,CACpB,OAAO,CAAE,IAAI,CACb,UAAU,CAAE,kBAAkB,CAC9B,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACnB,GAAG,CAAE,GAAG,CACR,KAAK,CAAE,aAAa,CACrB,CAlDA,AAqDH,AAAA,IAAI,AAAA,qBAAqB,CAAE,IAAI,AAAA,iBAAiB,AAAC,CAC/C,MAAM,CAAE,OAAO,CACf,QAAQ,CAAE,MAAM,CACjB,AAED,AAAA,qBAAqB,CAAC,MAAM,CAAE,iBAAiB,CAAC,MAAM,AAAC,CACrD,UAAU,CAAE,qBAAwB,CACpC,eAAe,CAAE,iBAAiB,CAClC,uBAAuB,CAAE,iBAAiB,CAC3C,AAED,UAAU,CAAV,aAAU,CACR,IAAI,CACF,SAAS,CAAE,aAAa,CAE1B,EAAE,CACA,SAAS,CAAE,gBAAgB,EAI/B,UAAU,CAAV,cAAU,CACR,IAAI,CACF,SAAS,CAAE,aAAa,CAE1B,EAAE,CACA,SAAS,CAAE,iBAAiB,EC7WhC,AAAA,kBAAkB,AAAC,CACjB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,WAAW,CAAE,wBAAwB,CACrC,SAAS,CAAE,KAAK,CAChB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,kBAAkB,CAAC,CAAC,AAAC,CACnB,OAAO,CAAE,IAAI,CACd,AAED,AAAA,iBAAiB,AAAC,CAChB,OAAO,CAAE,MAAM,CACf,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,iBAAiB,CAAC,GAAG,AAAC,CACpB,KAAK,CAAE,IAAI,CACX,QAAQ,CAAE,IAAI,CACf,AAED,AAAA,iBAAiB,CAAC,EAAE,AAAC,CACnB,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,IAAI,CACZ,WAAW,CAAE,MAAM,CACnB,GAAG,CAAE,IAAI,CACV,AAED,AAAA,iBAAiB,CAAC,CAAC,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,OAAO,CAAE,QAAQ,CACjB,GAAG,CAAE,GAAG,CACR,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,iBAAiB,CAAC,CAAC,CAAC,KAAK,CAAE,iBAAiB,CAAC,CAAC,CAAC,KAAK,CAAE,iBAAiB,CAAC,CAAC,AAAA,OAAO,AAAC,CAC/E,aAAa,CAAE,SAAS,CACzB,AAED,AAAA,qBAAqB,AAAC,CACpB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,IAAI,CACnB,eAAe,CAAE,QAAQ,CACzB,GAAG,CAAE,IACP,CAAC,AAED,AAAA,mBAAmB,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,QAAQ,CACtB,AAGD,MAAM,mBACJ,CAAA,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,MAAM,CACpB,CAAA,AC3DH,AAAA,mBAAmB,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,eAAe,CAAE,UAAU,CAC3B,YAAY,CAAE,IAAI,CAClB,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,iBAAiB,AAAC,CAChB,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,QAAQ,CAAE,IAAI,CACd,eAAe,CAAE,QAAQ,CACzB,aAAa,CAAE,IAAI,CACnB,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,sBAAsB,CAAE,iBAAiB,AAAC,CACxC,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,eAAe,CAAE,MAAM,CACvB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,sBAAsB,CAAC,CAAC,AAAC,CACvB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACpB,AAGD,MAAM,mBACJ,CAAA,AAAA,sBAAsB,AAAC,CACrB,eAAe,CAAE,UAAU,CAC3B,YAAY,CAAE,IAAI,CACnB,AAED,AAAA,mBAAmB,AAAC,CAClB,eAAe,CAAE,QAAQ,CACzB,aAAa,CAAE,IAAI,CACpB,CALA,AClCH,AAAA,QAAQ,AAAC,CACP,OAAO,CAAE,QAAQ,CAClB,AAED,AAAA,eAAe,AAAC,CACd,SAAS,CAAE,GAAG,CACd,WAAW,CAAE,GAAG,CAChB,aAAa,CAAE,GAAG,CAClB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,AAAC,CAClB,WAAW,CAAE,GAAG,CAChB,WAAW,CAAE,GAAG,CAChB,MAAM,CAAE,YAAY,CACpB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,SAAS,CAAE,KACb,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,SAAS,CAAE,KACb,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,SAAS,CAAE,KACb,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,SAAS,CAAE,KACb,CAAC,AAED,AAAA,gBAAgB,CAAC,UAAU,CAC3B,gBAAgB,CAAC,UAAU,CAC3B,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,MAAM,CACvB,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,CAAC,CAClB,gBAAgB,CAAC,KAAK,CACtB,gBAAgB,CAAC,EAAE,AAAC,CAClB,UAAU,CAAE,GAAG,CACf,WAAW,CAAE,MAAM,CACnB,cAAc,CAAE,KAAK,CACtB,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,CAAC,AAAC,CAC5B,MAAM,CAAE,KACV,CAAC,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,EAAE,CAC9B,gBAAgB,CAAC,UAAU,CAAC,EAAE,CAC9B,gBAAgB,CAAC,UAAU,CAAC,EAAE,AAAC,CAC7B,MAAM,CAAE,aACV,CAAC,AAED,AAAA,gBAAgB,CAAC,CAAC,AAAC,CACjB,KAAK,CAAE,mBAAmB,CAC1B,eAAe,CAAE,IACnB,CAAC,AAED,AAAA,gBAAgB,CAAC,CAAC,CAAC,KAAK,AAAC,CACvB,KAAK,CAAE,kBAAkB,CACzB,eAAe,CAAE,SACnB,CAAC,AAED,MAAM,MACJ,CAAA,AAAA,gBAAgB,CAAC,CAAC,AAAC,CACjB,KAAK,CAAE,OAAO,CACd,eAAe,CAAE,SACnB,CAAC,AAED,AAAA,gBAAgB,CAAC,CAAC,CAAC,KAAK,AAAC,CACvB,OAAO,CAAE,IAAI,CAAC,UAAU,CAAC,GAAG,CAC5B,SAAS,CAAE,GACb,CAAC,CALA,AAQH,AAAA,gBAAgB,CAAC,MAAM,CAAE,gBAAgB,CAAC,CAAC,CAAE,gBAAgB,CAAC,KAAK,CAAC,EAAE,AAAC,CACrE,WAAW,CAAE,GACf,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,UAAU,CAAE,MACd,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,CACnB,gBAAgB,CAAC,EAAE,AAAC,CAClB,WAAW,CAAE,IACf,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,CACtB,gBAAgB,CAAC,EAAE,CAAC,EAAE,AAAC,CACrB,UAAU,CAAE,CAAC,CACb,aAAa,CAAE,CACjB,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,UAAU,CAAE,IACd,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,UAAU,CAAE,OACd,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,AAAC,CAClB,UAAU,CAAE,MACd,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,CAAG,EAAE,AAAC,CACvB,UAAU,CAAE,MACd,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,CAAG,EAAE,AAAC,CACvB,UAAU,CAAE,WACd,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,CAAC,CAAC,AAAC,CACpB,MAAM,CAAE,CACV,CAAC,AAED,AAAA,gBAAgB,CAAC,EAAE,CAAC,UAAU,CAC9B,gBAAgB,CAAC,EAAE,CAAC,UAAU,CAC9B,gBAAgB,CAAC,EAAE,CAAC,MAAM,CAC1B,gBAAgB,CAAC,EAAE,CAAC,KAAK,AAAC,CACxB,MAAM,CAAE,KACV,CAAC,AAED,AAAA,gBAAgB,CAAC,GAAG,CACpB,gBAAgB,CAAC,KAAK,AAAC,CACrB,SAAS,CAAE,IAAI,CACf,aAAa,CAAE,GACjB,CAAC,AAED,AAAA,gBAAgB,CAAC,UAAU,AAAC,CAC1B,OAAO,CAAE,QAAQ,CACjB,QAAQ,CAAE,QAAQ,CAClB,UAAU,CAAE,oBAAoB,CAChC,WAAW,CAAE,GAAG,CAAC,KAAK,CAAC,mBAAmB,CAC1C,aAAa,CAAE,GAAG,CACnB,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,MAAM,AAAC,CACjC,MAAM,CAAE,KAAK,CACb,UAAU,CAAE,MACd,CAAC,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,AAAC,CAC7C,OAAO,CAAE,GAAG,CACZ,OAAO,CAAE,MACX,CAAC,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,AAAC,CACxC,KAAK,CAAE,mBAAmB,CAC3B,AAED,AAAA,gBAAgB,CAAC,IAAI,CAAE,gBAAgB,CAAC,GAAG,AAAC,CAC1C,WAAW,CAAE,uBAAuB,CACrC,AAED,AAAA,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,CAAC,CAAC,IAAI,CACvB,gBAAgB,CAAC,UAAU,CAAC,IAAI,CAChC,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,EAAE,CAAC,IAAI,CACxB,gBAAgB,CAAC,KAAK,CAAC,IAAI,AAAC,CAC1B,UAAU,CAAE,oBAAoB,CAChC,OAAO,CAAE,GAAG,CACZ,aAAa,CAAE,GAAG,CAClB,SAAS,CAAE,IAAI,CAChB,AAED,AAAA,gBAAgB,CAAC,GAAG,CAAA,GAAK,CAAA,OAAO,CAAE,CAChC,KAAK,CAAE,oBAAoB,CAC3B,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,GAAG,CAChB,cAAc,CAAE,KAAK,CACrB,gBAAgB,CAAE,oBAAoB,CACtC,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,SAAS,CAClB,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,GAAG,CAChB,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,IAAI,AAAC,CAC/B,UAAU,CAAE,qBAAqB,CACjC,OAAO,CAAE,EAAE,CACZ,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,OAAO,CAAE,gBAAgB,CAAC,UAAU,CAAC,GAAG,CAAA,GAAK,CAAA,OAAO,CAAE,CAChF,UAAU,CAAE,qBAAqB,CACjC,aAAa,CAAE,GAAG,CACnB,AAED,AAAA,gBAAgB,CAAC,UAAU,CAAC,OAAO,CAAC,IAAI,CAAE,gBAAgB,CAAC,UAAU,CAAC,GAAG,CAAA,GAAK,CAAA,OAAO,EAAE,IAAI,AAAC,CAC1F,OAAO,CAAE,CAAC,CACX,AAED,AAAA,gBAAgB,CAAC,KAAK,AAAC,CACrB,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,GAAG,CAAC,KAAK,CAAC,mBAAmB,CACtC,AAED,AAAA,gBAAgB,CAAC,KAAK,CAAC,EAAE,CACzB,gBAAgB,CAAC,KAAK,CAAC,EAAE,AAAC,CACxB,OAAO,CAAE,QACX,CAAC,AAED,AAAA,gBAAgB,CAAC,KAAK,CAAC,EAAE,CAAC,SAAU,CAAA,EAAE,CAAE,CACtC,UAAU,CAAE,oBAAoB,CACjC,AAED,AAAA,eAAe,AAAC,CACd,OAAO,CAAE,IAAI,CACb,qBAAqB,CAAE,OAAO,CAC9B,WAAW,CAAE,IAAI,CAClB,AAED,AAAA,qBAAqB,CAAE,kBAAkB,CAAE,kBAAkB,AAAC,CAC5D,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,MAAM,CAChB,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,KAAK,CAClB,eAAe,CAAE,MAAM,CACvB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,kBAAkB,CAAE,kBAAkB,AAAC,CACrC,KAAK,CAAE,mBAAmB,CAC3B,AAED,AAAA,kBAAkB,CAAC,KAAK,CAAE,kBAAkB,CAAC,KAAK,AAAC,CACjD,KAAK,CAAE,kBAAkB,CACzB,WAAW,CAAE,GAAG,CAChB,SAAS,CAAE,GAAG,CACf,AAED,AAAA,kBAAkB,AAAC,CACjB,eAAe,CAAE,QAAQ,CAC1B,AAED,AAAA,kBAAkB,CAAC,KAAK,AAAC,CACvB,aAAa,CAAE,GAAG,CACnB,AAED,AAAA,kBAAkB,CAAC,KAAK,AAAC,CACvB,YAAY,CAAE,GAAG,CAClB,AAED,MAAM,mBACJ,CAAA,AAAA,kBAAkB,CAAA,AAAA,eAAC,CAAgB,MAAM,AAAtB,CAAwB,CACzC,WAAW,CAAE,IAAI,CAClB,CAAA,AAGH,MAAM,mBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,OAAO,CAAE,SAAS,CACnB,AAED,AAAA,eAAe,AAAC,CACd,OAAO,CAAE,IAAI,CACb,qBAAqB,CAAE,cAAc,CACtC,AAED,AAAA,kBAAkB,AAAC,CACjB,WAAW,CAAE,IAAI,CACjB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,kBAAkB,AAAC,CACjB,WAAW,CAAE,KAAK,CACnB,CAlBA,AAqBH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,OAAO,CAAE,SAAS,CACnB,CAAA,AAGH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,OAAO,CAAE,SAAS,CACnB,CAAA,AAGH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,OAAO,CAAE,SAAS,CACnB,AAED,AAAA,gBAAgB,AAAC,CACf,KAAK,CAAE,GAAG,CACX,CAJA,AAOH,MAAM,oBACJ,CAAA,AAAA,gBAAgB,AAAC,CACf,KAAK,CAAE,GAAG,CACX,CAAA,AAGH,MAAM,oBACJ,CAAA,AAAA,gBAAgB,AAAC,CACf,KAAK,CAAE,GAAG,CACX,CAAA,AC7UH,AAAA,QAAQ,AAAC,CACP,OAAO,CAAE,MAAM,CAChB,AAED,AAAA,QAAQ,CAAC,OAAO,AAAC,CACf,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,OAAO,CAAE,MAAM,CACf,QAAQ,CAAE,IAAI,CACf,AAED,AAAA,gBAAgB,CAAE,aAAa,AAAC,CAC9B,OAAO,CAAE,KAAK,CACf,AAED,AAAA,gBAAgB,AAAC,CACf,UAAU,CAAE,IAAI,CAChB,WAAW,CAAE,GAAG,CAChB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,QAAQ,CAAC,gBAAgB,CAAC,WAAW,AAAC,CACpC,UAAU,CAAE,CAAC,CACd,AAED,AAAA,aAAa,AAAC,CACZ,YAAY,CAAE,IAAI,CAClB,KAAK,CAAE,aAAa,CACpB,WAAW,CAAE,GAAG,CAAC,KAAK,CAAC,mBAAmB,CAC1C,WAAW,CAAE,GAAG,CACjB,AAED,AAAA,aAAa,EAAE,MAAM,AAAC,CACpB,OAAO,CAAE,EAAE,CACX,OAAO,CAAE,YAAY,CACrB,KAAK,CAAE,GAAG,CACV,MAAM,CAAE,GAAG,CACX,UAAU,CAAE,iBAAiB,CAC7B,UAAU,CAAE,iBAAiB,CAC7B,aAAa,CAAE,GAAG,CAClB,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,OAAO,CACb,GAAG,CAAE,IAAI,CACV,AAED,AAAA,aAAa,CAAC,KAAK,AAAC,CAClB,KAAK,CAAE,kBAAkB,CACzB,WAAW,CAAE,GAAG,CAChB,SAAS,CAAE,GAAG,CACf,AAED,AAAA,aAAa,AAAA,QAAQ,AAAC,CACpB,KAAK,CAAE,mBAAmB,CAC1B,WAAW,CAAE,GAAG,CAChB,SAAS,CAAE,GAAG,CACf,AAED,AAAA,aAAa,AAAA,QAAQ,EAAE,MAAM,CAAE,aAAa,CAAC,KAAK,EAAE,MAAM,AAAC,CACzD,UAAU,CAAE,mBAAmB,CAChC,AC3DD,AAAA,IAAI,AAAC,CACH,WAAW,CAAE,IAAI,CACjB,cAAc,CAAE,IAAI,CACrB,AAED,AAAA,IAAI,CAAC,OAAO,AAAA,CACV,QAAQ,CAAE,IAAI,CACf,AAED,AAAA,IAAI,CAAC,MAAM,AAAC,CACV,WAAW,CAAE,GAAG,CAChB,OAAO,CAAE,cAAc,CACvB,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,GAAG,CACR,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,IAAI,CACV,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,IAAI,CAAC,EAAE,AAAC,CACN,WAAW,CAAE,IAAI,CACjB,WAAW,CAAE,GAAG,CAAC,KAAK,CAAC,mBAAmB,CAC3C,AAED,AAAA,IAAI,CAAC,EAAE,CAAC,EAAE,AAAC,CACT,WAAW,CAAE,GAAG,CACjB,AAED,AAAA,IAAI,CAAC,EAAE,CAAC,CAAC,AAAC,CACR,OAAO,CAAE,YAAY,CACrB,OAAO,CAAE,GAAG,CACZ,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,IAAI,CAAC,EAAE,CAAC,CAAC,AAAA,OAAO,CAAE,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,AAAC,CAChC,KAAK,CAAE,kBAAkB,CAC1B,AAED,AAAA,IAAI,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,AAAC,CAChB,OAAO,CAAE,EAAE,CACX,OAAO,CAAE,YAAY,CACrB,KAAK,CAAE,GAAG,CACV,MAAM,CAAE,GAAG,CACX,UAAU,CAAE,iBAAiB,CAC7B,UAAU,CAAE,iBAAiB,CAC7B,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,OAAO,CACb,GAAG,CAAE,IAAI,CACV,AAED,AAAA,IAAI,CAAC,EAAE,CAAC,CAAC,AAAA,OAAO,EAAE,MAAM,CAAE,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,EAAE,MAAM,AAAC,CAChD,UAAU,CAAE,kBAAkB,CAC/B,ACpDD,AAAA,WAAW,AAAC,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,GAAG,CAAE,GAAG,CACR,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,GAAG,CAChB,WAAW,CAAE,KAAK,CAClB,KAAK,CAAE,OAAO,CACd,UAAU,CAAE,OAAO,CACnB,MAAM,CAAE,iBAAiB,CACzB,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,OAAO,CACjB,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,WAAW,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,WAAW,AAAC,CAC1E,KAAK,CAAE,OAAO,CACd,UAAU,CAAE,OAAO,CACnB,MAAM,CAAE,iBAAiB,CAC1B,AAED,AAAA,WAAW,CAAC,KAAK,AAAC,CAChB,SAAS,CAAE,UAAS,CACrB,AAED,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,gBAAgB,CAAE,qkoCAAqkoC,CACvloC,aAAa,CAAE,GAAG,CAClB,UAAU,CAAE,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,eAAkB,CAC3C,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,iBAAiB,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,iBAAiB,AAAC,CACtF,UAAU,CAAE,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,qBAAwB,CACjD,AAED,AAAA,UAAU,AAAC,CACT,UAAU,CAAE,oBAAoB,CAChC,MAAM,CAAE,GAAG,CAAC,MAAM,CAAC,mBAAmB,CACtC,aAAa,CAAE,GAAG,CAClB,MAAM,CAAE,OAAO,CAChB,ACzCD,AAAA,SAAS,AAAC,CACR,QAAQ,CAAE,QAAQ,CACnB,AAED,AAAA,aAAa,AAAC,CACZ,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,UAAU,CAAE,iBAAiB,CAC7B,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,aAAa,CAAC,YAAY,AAAC,CACzB,OAAO,CAAE,EAAE,CACZ,AAED,AAAA,cAAc,AAAC,CACb,OAAO,CAAE,IAAI,CACb,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,CAAC,CACR,GAAG,CAAE,IAAI,CACT,SAAS,CAAE,KAAK,CAChB,UAAU,CAAE,KAAK,CACjB,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,iBAAiB,CAC7B,KAAK,CAAE,aAAa,CACpB,UAAU,CAAE,kBAAkB,CAC9B,OAAO,CAAE,CAAC,CACV,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACb,AAED,AAAA,cAAc,AAAA,KAAK,AAAC,CAClB,OAAO,CAAE,KAAK,CACf,AAED,AAAA,cAAc,CAAC,MAAM,CAAE,cAAc,CAAC,CAAC,AAAC,CACtC,KAAK,CAAE,IAAI,CACX,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,GAAG,CACR,OAAO,CAAE,GAAG,CACZ,WAAW,CAAE,MAAM,CACnB,eAAe,CAAE,MAAM,CACvB,MAAM,CAAE,OAAO,CAChB,AAED,AAAA,cAAc,CAAC,MAAM,CAAC,KAAK,CAAE,cAAc,CAAC,CAAC,CAAC,KAAK,AAAC,CAClD,UAAU,CAAE,oBAAoB,CACjC,AClDgB,AAAA,OAAO,AAAC,CAAE,SAAS,CAAE,IAAI,CAAE,KAAK,CAAE,oBAAoB,CAAE,gBAAgB,CAAE,oBAAoB,CAAE,aAAa,CAAE,GAAG,CAAE,OAAO,CAAE,SAAS,CAAE,UAAU,CAAE,IAAI,CAAI,AACjK,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,IAAI,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,KAAK,AAAC,CAAE,cAAc,CAAE,GAAG,CAAE,OAAO,CAAE,CAAC,CAAE,MAAM,CAAE,CAAC,CAAE,MAAM,CAAE,CAAC,CAAI,AAC3E,AAAA,OAAO,CAAC,QAAQ,AAAC,CAAE,cAAc,CAAE,CAAC,CAAE,OAAO,CAAE,CAAC,CAAE,MAAM,CAAE,CAAC,CAAE,MAAM,CAAE,CAAC,CAAE,KAAK,CAAE,IAAI,CAAE,QAAQ,CAAE,IAAI,CAAE,OAAO,CAAE,KAAK,CAAI,AACnH,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,OAAO,CAAE,KAAK,CAAE,KAAK,CAAE,IAAI,CAAE,gBAAgB,CAAE,oBAAoB,CAAG,AACjF,AAAA,OAAO,CAAC,IAAI,AAAC,CAAE,YAAY,CAAE,KAAK,CAAE,OAAO,CAAE,eAAe,CAAI,AACrE,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,YAAY,CAAE,KAAK,CAAE,OAAO,CAAE,eAAe,CAAE,YAAY,CAAE,GAAG,CAAC,KAAK,CAAC,oBAAoB,CAAI,AACpH,AAAA,OAAO,CAAC,KAAK,AAAC,CAAE,OAAO,CAAE,IAAI,CAAI,AAC9B,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACnC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACzC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC9C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC1C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAChD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACnD,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAClC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC9C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACtC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACpD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACzC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACzC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC7C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACpD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACxC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAChD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACzC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACjD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC1C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACtD,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC1C,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACtC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACzC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAChD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAClD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACzC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAClD,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACxC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC1C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC9C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACxC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACxC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACpD,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACpD,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACvC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC7C,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACnC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACxC,AAAA,OAAO,CAAC,IAAI,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACxD,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AACpC,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC/C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAE,UAAU,CAAE,MAAO,CAAE,AAC/D,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC1C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC3C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC9C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAE,WAAW,CAAE,IAAK,CAAE,AAC3D,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC7C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAG,AAC5C,AAAA,OAAO,CAAC,GAAG,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAE,eAAe,CAAE,SAAU,CAAE,AAC1E,AAAA,OAAO,CAAC,EAAE,AAAC,CAAE,KAAK,CAAE,oBAAoB,CAAI,AX9DjE,AAAA,IAAI,AAAC,CACH,WAAW,CAAE,kBAAkB,CAC/B,UAAU,CAAE,iBAAiB,CAC7B,KAAK,CAAE,YAAY,CACnB,eAAe,CAAE,MAAM,CACvB,cAAc,CAAE,GAAG,CACpB" -} \ No newline at end of file diff --git a/docs/scss/component/docsearch.css b/docs/scss/component/docsearch.css deleted file mode 100644 index 7b45a98..0000000 --- a/docs/scss/component/docsearch.css +++ /dev/null @@ -1,7 +0,0 @@ -/*! @docsearch/css 3.2.0 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com | https://cdn.jsdelivr.net/npm/@docsearch/css@3 */:root{--docsearch-primary-color: #5468ff;--docsearch-spacing: 12px;--docsearch-icon-stroke-width: 1.4;--docsearch-highlight-color: var(--docsearch-primary-color);--docsearch-muted-color: #969faf;--docsearch-container-background: rgba(255, 255, 255, 0.1);--docsearch-logo-color: #5468ff;--docsearch-modal-width: 560px;--docsearch-modal-height: 600px;--docsearch-modal-shadow: inset 1px 1px 0 0 hsla(0, 0%, 100%, 0.5), 0 3px 8px 0 #555a64;--docsearch-searchbox-height: 56px;--docsearch-searchbox-focus-background: #fff;--docsearch-searchbox-shadow: inset 0 0 0 2px var(--docsearch-primary-color);--docsearch-hit-height: 56px;--docsearch-hit-color: #444950;--docsearch-hit-active-color: #fff;--docsearch-hit-background: #fff;--docsearch-hit-shadow: 0 1px 3px 0 #d4d9e1;--docsearch-footer-height: 44px;--docsearch-footer-shadow: 0 -1px 0 0 #e0e3e8, 0 -3px 6px 0 rgba(69, 98, 155, 0.12) -}:root[data-color="dark"]{--docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309;--docsearch-searchbox-focus-background: #000;--docsearch-hit-color: #bec3c9;--docsearch-hit-shadow: none;--docsearch-hit-background: #090a11;--docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2);--docsearch-muted-color: #7f8497 -}:root[data-color="night"]{--docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309;--docsearch-searchbox-focus-background: #000;--docsearch-hit-color: #bec3c9;--docsearch-hit-shadow: none;--docsearch-hit-background: #090a11;--docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2);--docsearch-muted-color: #7f8497 -}.DocSearch-Button{width:100%;line-height:1.6em;align-items:center;box-shadow:var(--box-shadow);border-radius:24px;color:var(--color);cursor:pointer;display:flex;justify-content:space-between;margin:0 12px;padding:3px 6px;user-select:none}.DocSearch-Button:active,.DocSearch-Button:focus,.DocSearch-Button:hover{background:var(--docsearch-searchbox-focus-background);box-shadow:var(--docsearch-searchbox-shadow);color:var(--color);outline:none}.DocSearch-Button-Container{align-items:center;display:flex}.DocSearch-Search-Icon{stroke-width:1.6}.DocSearch-Button-Placeholder{font-size:1rem;padding:0 12px 0 6px;color:var(--color3)}.DocSearch-Button-Keys{display:flex;min-width:calc(40px + .8em)}.DocSearch-Button-Key{align-items:center;border-radius:3px;color:var(--docsearch-muted-color);display:flex;height:18px;justify-content:center;margin-right:.4em;position:relative;border:1px solid var(--border-color);width:20px}@media (min-width: 1278px){.DocSearch-Button{width:80%;margin:0}}@media (min-width: 2558px){.DocSearch-Button{width:60%}}@media (min-width: 3838px){.DocSearch-Button{width:40%}}.DocSearch--active{overflow:hidden !important}.DocSearch-Container,.DocSearch-Container *{box-sizing:border-box}.DocSearch-Container{background-color:var(--docsearch-container-background);height:100vh;left:0;position:fixed;top:0;width:100vw;z-index:200;backdrop-filter:blur(var(--blur));-webkit-backdrop-filter:blur(var(--blur))}.DocSearch-Container a{text-decoration:none}.DocSearch-Link{appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;font:inherit;margin:0;padding:0}.DocSearch-Modal{background:var(--background);border-radius:6px;box-shadow:var(--docsearch-modal-shadow);flex-direction:column;margin:60px auto auto;max-width:var(--docsearch-modal-width);position:relative}.DocSearch-SearchBar{display:flex;padding:var(--docsearch-spacing) var(--docsearch-spacing) 0}.DocSearch-Form{align-items:center;background:var(--docsearch-searchbox-focus-background);border-radius:4px;box-shadow:var(--docsearch-searchbox-shadow);display:flex;height:var(--docsearch-searchbox-height);margin:0;padding:0 var(--docsearch-spacing);position:relative;width:100%}.DocSearch-Input{appearance:none;background:transparent;border:0;color:var(--docsearch-text-color);flex:1;font:inherit;font-size:1.2em;height:100%;outline:none;padding:0 0 0 8px;width:80%}.DocSearch-Input::placeholder{color:var(--docsearch-muted-color);opacity:1}.DocSearch-Input::-webkit-search-cancel-button,.DocSearch-Input::-webkit-search-decoration,.DocSearch-Input::-webkit-search-results-button,.DocSearch-Input::-webkit-search-results-decoration{display:none}.DocSearch-LoadingIndicator,.DocSearch-MagnifierLabel,.DocSearch-Reset{margin:0;padding:0}.DocSearch-MagnifierLabel,.DocSearch-Reset{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}.DocSearch-Container--Stalled .DocSearch-MagnifierLabel,.DocSearch-LoadingIndicator{display:none}.DocSearch-Container--Stalled .DocSearch-LoadingIndicator{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Reset{animation:none;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;right:0;stroke-width:var(--docsearch-icon-stroke-width)}}.DocSearch-Reset{animation:fade-in .1s ease-in forwards;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;padding:2px;right:0;stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Reset[hidden]{display:none}.DocSearch-Reset:focus{outline:none}.DocSearch-Reset:hover{color:var(--docsearch-highlight-color)}.DocSearch-LoadingIndicator svg,.DocSearch-MagnifierLabel svg{height:24px;width:24px}.DocSearch-Cancel{display:none}.DocSearch-Dropdown{max-height:calc(var(--docsearch-modal-height) - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height));min-height:var(--docsearch-spacing);overflow-y:auto;overflow-y:overlay;padding:0 var(--docsearch-spacing);scrollbar-color:var(--docsearch-muted-color) var(--docsearch-modal-background);scrollbar-width:thin}.DocSearch-Dropdown::-webkit-scrollbar{width:12px}.DocSearch-Dropdown::-webkit-scrollbar-track{background:transparent}.DocSearch-Dropdown::-webkit-scrollbar-thumb{background-color:var(--docsearch-muted-color);border:3px solid var(--docsearch-modal-background);border-radius:20px}.DocSearch-Dropdown ul{list-style:none;margin:0;padding:0}.DocSearch-Label{font-size:.75em;line-height:1.6em}.DocSearch-Help,.DocSearch-Label{color:var(--docsearch-muted-color)}.DocSearch-Help{font-size:.9em;margin:0;user-select:none}.DocSearch-Title{font-size:1.2em}.DocSearch-Logo a{display:flex}.DocSearch-Logo svg{color:var(--docsearch-logo-color);margin-left:8px}.DocSearch-Hits:last-of-type{margin-bottom:24px}.DocSearch-Hits mark{background:none;color:var(--docsearch-highlight-color)}.DocSearch-HitsFooter{color:var(--docsearch-muted-color);display:flex;font-size:.85em;justify-content:center;margin-bottom:var(--docsearch-spacing);padding:var(--docsearch-spacing)}.DocSearch-HitsFooter a{border-bottom:1px solid;color:inherit}.DocSearch-Hit{border-radius:4px;display:flex;padding-bottom:4px;position:relative}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit--deleting{transition:none}}.DocSearch-Hit--deleting{opacity:0;transition:all .25s linear}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit--favoriting{transition:none}}.DocSearch-Hit--favoriting{transform:scale(0);transform-origin:top center;transition:all .25s linear;transition-delay:.25s}.DocSearch-Hit a{background:var(--docsearch-hit-background);border-radius:4px;box-shadow:var(--docsearch-hit-shadow);display:block;padding-left:var(--docsearch-spacing);width:100%}.DocSearch-Hit-source{background:var(--docsearch-modal-background);color:var(--docsearch-highlight-color);font-size:.85em;font-weight:600;line-height:32px;margin:0 -4px;padding:8px 4px 0;position:sticky;top:0;z-index:10}.DocSearch-Hit-Tree{color:var(--docsearch-muted-color);height:var(--docsearch-hit-height);opacity:.5;stroke-width:var(--docsearch-icon-stroke-width);width:24px}.DocSearch-Hit[aria-selected=true] a{background-color:var(--docsearch-highlight-color)}.DocSearch-Hit[aria-selected=true] mark{text-decoration:underline}.DocSearch-Hit-Container{align-items:center;color:var(--docsearch-hit-color);display:flex;flex-direction:row;height:var(--docsearch-hit-height);padding:0 var(--docsearch-spacing) 0 0}.DocSearch-Hit-icon{height:20px;width:20px}.DocSearch-Hit-action,.DocSearch-Hit-icon{color:var(--docsearch-muted-color);stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Hit-action{align-items:center;display:flex;height:22px;width:22px}.DocSearch-Hit-action svg{display:block;height:18px;width:18px}.DocSearch-Hit-action+.DocSearch-Hit-action{margin-left:6px}.DocSearch-Hit-action-button{appearance:none;background:none;border:0;border-radius:50%;color:inherit;cursor:pointer;padding:2px}svg.DocSearch-Hit-Select-Icon{display:none}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Select-Icon{display:block}.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:rgba(0,0,0,0.2);transition:background-color .1s ease-in}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{transition:none}}.DocSearch-Hit-action-button:focus path,.DocSearch-Hit-action-button:hover path{fill:#fff}.DocSearch-Hit-content-wrapper{display:flex;flex:1 1 auto;flex-direction:column;font-weight:500;justify-content:center;line-height:1.2em;margin:0 8px;overflow-x:hidden;position:relative;text-overflow:ellipsis;white-space:nowrap;width:80%}.DocSearch-Hit-title{font-size:.9em}.DocSearch-Hit-path{color:var(--docsearch-muted-color);font-size:.75em}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-action,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-icon,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-path,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-text,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-title,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Tree,.DocSearch-Hit[aria-selected=true] mark{color:var(--docsearch-hit-active-color) !important}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:rgba(0,0,0,0.2);transition:none}}.DocSearch-ErrorScreen,.DocSearch-NoResults,.DocSearch-StartScreen{font-size:.9em;margin:0 auto;padding:36px 0;text-align:center;width:80%}.DocSearch-Screen-Icon{color:var(--docsearch-muted-color);padding-bottom:12px}.DocSearch-NoResults-Prefill-List{display:inline-block;padding-bottom:24px;text-align:left}.DocSearch-NoResults-Prefill-List ul{display:inline-block;padding:8px 0 0}.DocSearch-NoResults-Prefill-List li{list-style-position:inside;list-style-type:"» "}.DocSearch-Prefill{appearance:none;background:none;border:0;border-radius:1em;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;font-size:1em;font-weight:700;padding:0}.DocSearch-Prefill:focus,.DocSearch-Prefill:hover{outline:none;text-decoration:underline}.DocSearch-Footer{align-items:center;border-radius:0 0 8px 8px;box-shadow:var(--docsearch-footer-shadow);display:flex;flex-direction:row-reverse;flex-shrink:0;height:var(--docsearch-footer-height);justify-content:space-between;padding:0 var(--docsearch-spacing);position:relative;user-select:none;width:100%;z-index:300}.DocSearch-Commands{color:var(--docsearch-muted-color);display:flex;list-style:none;margin:0;padding:0}.DocSearch-Commands li{align-items:center;display:flex}.DocSearch-Commands li:not(:last-of-type){margin-right:.8em}.DocSearch-Commands-Key{align-items:center;border-radius:2px;display:flex;height:18px;justify-content:center;margin-right:.4em;padding:0 0 1px;color:var(--docsearch-muted-color);border:1px solid var(--border-color);width:20px}@media (max-width: 768px){:root{--docsearch-spacing: 10px;--docsearch-footer-height: 40px - }.DocSearch-Dropdown{height:100%}.DocSearch-Container{height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh) * 100);position:absolute}.DocSearch-Footer{border-radius:0;bottom:0;position:absolute}.DocSearch-Hit-content-wrapper{display:flex;position:relative;width:80%}.DocSearch-Modal{border-radius:0;box-shadow:none;height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh) * 100);margin:0;max-width:100%;width:100%}.DocSearch-Dropdown{max-height:calc(var(--docsearch-vh, 1vh) * 100 - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height))}.DocSearch-Cancel{appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;flex:none;font:inherit;font-size:1em;font-weight:500;margin-left:var(--docsearch-spacing);outline:none;overflow:hidden;padding:0;user-select:none;white-space:nowrap}.DocSearch-Commands,.DocSearch-Hit-Tree{display:none}}@keyframes fade-in{0%{opacity:0}to{opacity:1}} - -/*# sourceMappingURL=docsearch.css.map */ \ No newline at end of file diff --git a/docs/scss/component/docsearch.css.map b/docs/scss/component/docsearch.css.map deleted file mode 100644 index 92eaadd..0000000 --- a/docs/scss/component/docsearch.css.map +++ /dev/null @@ -1,13 +0,0 @@ -{ - "version": 3, - "file": "docsearch.css", - "sourceRoot": "D:/project/gitlab/llm/external/ant_group/codefuse-ai.github.io", - "sources": [ - "themes/docura/assets/scss/component/docsearch.scss" - ], - "sourcesContent": [ - "/*! @docsearch/css 3.2.0 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com | https://cdn.jsdelivr.net/npm/@docsearch/css@3 */\n:root {\n --docsearch-primary-color: #5468ff;\n --docsearch-spacing: 12px;\n --docsearch-icon-stroke-width: 1.4;\n --docsearch-highlight-color: var(--docsearch-primary-color);\n --docsearch-muted-color: #969faf;\n --docsearch-container-background: rgba(255, 255, 255, 0.1);\n --docsearch-logo-color: #5468ff;\n --docsearch-modal-width: 560px;\n --docsearch-modal-height: 600px;\n --docsearch-modal-shadow: inset 1px 1px 0 0 hsla(0, 0%, 100%, 0.5), 0 3px 8px 0 #555a64;\n --docsearch-searchbox-height: 56px;\n --docsearch-searchbox-focus-background: #fff;\n --docsearch-searchbox-shadow: inset 0 0 0 2px var(--docsearch-primary-color);\n --docsearch-hit-height: 56px;\n --docsearch-hit-color: #444950;\n --docsearch-hit-active-color: #fff;\n --docsearch-hit-background: #fff;\n --docsearch-hit-shadow: 0 1px 3px 0 #d4d9e1;\n --docsearch-footer-height: 44px;\n --docsearch-footer-shadow: 0 -1px 0 0 #e0e3e8, 0 -3px 6px 0 rgba(69, 98, 155, 0.12)\n}\n\n:root[data-color=\"dark\"] {\n --docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309;\n --docsearch-searchbox-focus-background: #000;\n --docsearch-hit-color: #bec3c9;\n --docsearch-hit-shadow: none;\n --docsearch-hit-background: #090a11;\n --docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2);\n --docsearch-muted-color: #7f8497\n}\n\n:root[data-color=\"night\"] {\n --docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309;\n --docsearch-searchbox-focus-background: #000;\n --docsearch-hit-color: #bec3c9;\n --docsearch-hit-shadow: none;\n --docsearch-hit-background: #090a11;\n --docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2);\n --docsearch-muted-color: #7f8497\n}\n\n.DocSearch-Button {\n width: 100%;\n line-height: 1.6em;\n align-items: center;\n box-shadow: var(--box-shadow);\n border-radius: 24px;\n color: var(--color);\n cursor: pointer;\n display: flex;\n justify-content: space-between;\n margin: 0 12px;\n padding: 3px 6px;\n user-select: none;\n}\n\n.DocSearch-Button:active, .DocSearch-Button:focus, .DocSearch-Button:hover {\n background: var(--docsearch-searchbox-focus-background);\n box-shadow: var(--docsearch-searchbox-shadow);\n color: var(--color);\n outline: none\n}\n\n.DocSearch-Button-Container {\n align-items: center;\n display: flex\n}\n\n.DocSearch-Search-Icon {\n stroke-width: 1.6\n}\n\n.DocSearch-Button-Placeholder {\n font-size: 1rem;\n padding: 0 12px 0 6px;\n color: var(--color3)\n}\n\n.DocSearch-Button-Keys {\n display: flex;\n min-width: calc(40px + .8em)\n}\n\n.DocSearch-Button-Key {\n align-items: center;\n border-radius: 3px;\n color: var(--docsearch-muted-color);\n display: flex;\n height: 18px;\n justify-content: center;\n margin-right: .4em;\n position: relative;\n border: 1px solid var(--border-color);\n width: 20px\n}\n\n@media (min-width: 1278px) {\n .DocSearch-Button {\n width: 80%;\n margin: 0;\n }\n}\n\n@media (min-width: 2558px) {\n .DocSearch-Button {\n width: 60%;\n }\n}\n\n@media (min-width: 3838px) {\n .DocSearch-Button {\n width: 40%;\n }\n}\n\n.DocSearch--active {\n overflow: hidden !important\n}\n\n.DocSearch-Container, .DocSearch-Container * {\n box-sizing: border-box\n}\n\n.DocSearch-Container {\n background-color: var(--docsearch-container-background);\n height: 100vh;\n left: 0;\n position: fixed;\n top: 0;\n width: 100vw;\n z-index: 200;\n backdrop-filter: blur(var(--blur));\n -webkit-backdrop-filter: blur(var(--blur));\n}\n\n.DocSearch-Container a {\n text-decoration: none\n}\n\n.DocSearch-Link {\n appearance: none;\n background: none;\n border: 0;\n color: var(--docsearch-highlight-color);\n cursor: pointer;\n font: inherit;\n margin: 0;\n padding: 0\n}\n\n.DocSearch-Modal {\n background: var(--background);\n border-radius: 6px;\n box-shadow: var(--docsearch-modal-shadow);\n flex-direction: column;\n margin: 60px auto auto;\n max-width: var(--docsearch-modal-width);\n position: relative\n}\n\n.DocSearch-SearchBar {\n display: flex;\n padding: var(--docsearch-spacing) var(--docsearch-spacing) 0\n}\n\n.DocSearch-Form {\n align-items: center;\n background: var(--docsearch-searchbox-focus-background);\n border-radius: 4px;\n box-shadow: var(--docsearch-searchbox-shadow);\n display: flex;\n height: var(--docsearch-searchbox-height);\n margin: 0;\n padding: 0 var(--docsearch-spacing);\n position: relative;\n width: 100%\n}\n\n.DocSearch-Input {\n appearance: none;\n background: transparent;\n border: 0;\n color: var(--docsearch-text-color);\n flex: 1;\n font: inherit;\n font-size: 1.2em;\n height: 100%;\n outline: none;\n padding: 0 0 0 8px;\n width: 80%\n}\n\n.DocSearch-Input::placeholder {\n color: var(--docsearch-muted-color);\n opacity: 1\n}\n\n.DocSearch-Input::-webkit-search-cancel-button, .DocSearch-Input::-webkit-search-decoration, .DocSearch-Input::-webkit-search-results-button, .DocSearch-Input::-webkit-search-results-decoration {\n display: none\n}\n\n.DocSearch-LoadingIndicator, .DocSearch-MagnifierLabel, .DocSearch-Reset {\n margin: 0;\n padding: 0\n}\n\n.DocSearch-MagnifierLabel, .DocSearch-Reset {\n align-items: center;\n color: var(--docsearch-highlight-color);\n display: flex;\n justify-content: center\n}\n\n.DocSearch-Container--Stalled .DocSearch-MagnifierLabel, .DocSearch-LoadingIndicator {\n display: none\n}\n\n.DocSearch-Container--Stalled .DocSearch-LoadingIndicator {\n align-items: center;\n color: var(--docsearch-highlight-color);\n display: flex;\n justify-content: center\n}\n\n@media screen and (prefers-reduced-motion: reduce) {\n .DocSearch-Reset {\n animation: none;\n appearance: none;\n background: none;\n border: 0;\n border-radius: 50%;\n color: var(--docsearch-icon-color);\n cursor: pointer;\n right: 0;\n stroke-width: var(--docsearch-icon-stroke-width)\n }\n}\n\n.DocSearch-Reset {\n animation: fade-in .1s ease-in forwards;\n appearance: none;\n background: none;\n border: 0;\n border-radius: 50%;\n color: var(--docsearch-icon-color);\n cursor: pointer;\n padding: 2px;\n right: 0;\n stroke-width: var(--docsearch-icon-stroke-width)\n}\n\n.DocSearch-Reset[hidden] {\n display: none\n}\n\n.DocSearch-Reset:focus {\n outline: none\n}\n\n.DocSearch-Reset:hover {\n color: var(--docsearch-highlight-color)\n}\n\n.DocSearch-LoadingIndicator svg, .DocSearch-MagnifierLabel svg {\n height: 24px;\n width: 24px\n}\n\n.DocSearch-Cancel {\n display: none\n}\n\n.DocSearch-Dropdown {\n max-height: calc(var(--docsearch-modal-height) - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height));\n min-height: var(--docsearch-spacing);\n overflow-y: auto;\n overflow-y: overlay;\n padding: 0 var(--docsearch-spacing);\n scrollbar-color: var(--docsearch-muted-color) var(--docsearch-modal-background);\n scrollbar-width: thin\n}\n\n.DocSearch-Dropdown::-webkit-scrollbar {\n width: 12px\n}\n\n.DocSearch-Dropdown::-webkit-scrollbar-track {\n background: transparent\n}\n\n.DocSearch-Dropdown::-webkit-scrollbar-thumb {\n background-color: var(--docsearch-muted-color);\n border: 3px solid var(--docsearch-modal-background);\n border-radius: 20px\n}\n\n.DocSearch-Dropdown ul {\n list-style: none;\n margin: 0;\n padding: 0\n}\n\n.DocSearch-Label {\n font-size: .75em;\n line-height: 1.6em\n}\n\n.DocSearch-Help, .DocSearch-Label {\n color: var(--docsearch-muted-color)\n}\n\n.DocSearch-Help {\n font-size: .9em;\n margin: 0;\n user-select: none\n}\n\n.DocSearch-Title {\n font-size: 1.2em\n}\n\n.DocSearch-Logo a {\n display: flex\n}\n\n.DocSearch-Logo svg {\n color: var(--docsearch-logo-color);\n margin-left: 8px\n}\n\n.DocSearch-Hits:last-of-type {\n margin-bottom: 24px\n}\n\n.DocSearch-Hits mark {\n background: none;\n color: var(--docsearch-highlight-color)\n}\n\n.DocSearch-HitsFooter {\n color: var(--docsearch-muted-color);\n display: flex;\n font-size: .85em;\n justify-content: center;\n margin-bottom: var(--docsearch-spacing);\n padding: var(--docsearch-spacing)\n}\n\n.DocSearch-HitsFooter a {\n border-bottom: 1px solid;\n color: inherit\n}\n\n.DocSearch-Hit {\n border-radius: 4px;\n display: flex;\n padding-bottom: 4px;\n position: relative\n}\n\n@media screen and (prefers-reduced-motion: reduce) {\n .DocSearch-Hit--deleting {\n transition: none\n }\n}\n\n.DocSearch-Hit--deleting {\n opacity: 0;\n transition: all .25s linear\n}\n\n@media screen and (prefers-reduced-motion: reduce) {\n .DocSearch-Hit--favoriting {\n transition: none\n }\n}\n\n.DocSearch-Hit--favoriting {\n transform: scale(0);\n transform-origin: top center;\n transition: all .25s linear;\n transition-delay: .25s\n}\n\n.DocSearch-Hit a {\n background: var(--docsearch-hit-background);\n border-radius: 4px;\n box-shadow: var(--docsearch-hit-shadow);\n display: block;\n padding-left: var(--docsearch-spacing);\n width: 100%\n}\n\n.DocSearch-Hit-source {\n background: var(--docsearch-modal-background);\n color: var(--docsearch-highlight-color);\n font-size: .85em;\n font-weight: 600;\n line-height: 32px;\n margin: 0 -4px;\n padding: 8px 4px 0;\n position: sticky;\n top: 0;\n z-index: 10\n}\n\n.DocSearch-Hit-Tree {\n color: var(--docsearch-muted-color);\n height: var(--docsearch-hit-height);\n opacity: .5;\n stroke-width: var(--docsearch-icon-stroke-width);\n width: 24px\n}\n\n.DocSearch-Hit[aria-selected=true] a {\n background-color: var(--docsearch-highlight-color)\n}\n\n.DocSearch-Hit[aria-selected=true] mark {\n text-decoration: underline\n}\n\n.DocSearch-Hit-Container {\n align-items: center;\n color: var(--docsearch-hit-color);\n display: flex;\n flex-direction: row;\n height: var(--docsearch-hit-height);\n padding: 0 var(--docsearch-spacing) 0 0\n}\n\n.DocSearch-Hit-icon {\n height: 20px;\n width: 20px\n}\n\n.DocSearch-Hit-action, .DocSearch-Hit-icon {\n color: var(--docsearch-muted-color);\n stroke-width: var(--docsearch-icon-stroke-width)\n}\n\n.DocSearch-Hit-action {\n align-items: center;\n display: flex;\n height: 22px;\n width: 22px\n}\n\n.DocSearch-Hit-action svg {\n display: block;\n height: 18px;\n width: 18px\n}\n\n.DocSearch-Hit-action + .DocSearch-Hit-action {\n margin-left: 6px\n}\n\n.DocSearch-Hit-action-button {\n appearance: none;\n background: none;\n border: 0;\n border-radius: 50%;\n color: inherit;\n cursor: pointer;\n padding: 2px\n}\n\nsvg.DocSearch-Hit-Select-Icon {\n display: none\n}\n\n.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Select-Icon {\n display: block\n}\n\n.DocSearch-Hit-action-button:focus, .DocSearch-Hit-action-button:hover {\n background: rgba(0, 0, 0, .2);\n transition: background-color .1s ease-in\n}\n\n@media screen and (prefers-reduced-motion: reduce) {\n .DocSearch-Hit-action-button:focus, .DocSearch-Hit-action-button:hover {\n transition: none\n }\n}\n\n.DocSearch-Hit-action-button:focus path, .DocSearch-Hit-action-button:hover path {\n fill: #fff\n}\n\n.DocSearch-Hit-content-wrapper {\n display: flex;\n flex: 1 1 auto;\n flex-direction: column;\n font-weight: 500;\n justify-content: center;\n line-height: 1.2em;\n margin: 0 8px;\n overflow-x: hidden;\n position: relative;\n text-overflow: ellipsis;\n white-space: nowrap;\n width: 80%\n}\n\n.DocSearch-Hit-title {\n font-size: .9em\n}\n\n.DocSearch-Hit-path {\n color: var(--docsearch-muted-color);\n font-size: .75em\n}\n\n.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-action, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-icon, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-path, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-text, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-title, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Tree, .DocSearch-Hit[aria-selected=true] mark {\n color: var(--docsearch-hit-active-color) !important\n}\n\n@media screen and (prefers-reduced-motion: reduce) {\n .DocSearch-Hit-action-button:focus, .DocSearch-Hit-action-button:hover {\n background: rgba(0, 0, 0, .2);\n transition: none\n }\n}\n\n.DocSearch-ErrorScreen, .DocSearch-NoResults, .DocSearch-StartScreen {\n font-size: .9em;\n margin: 0 auto;\n padding: 36px 0;\n text-align: center;\n width: 80%\n}\n\n.DocSearch-Screen-Icon {\n color: var(--docsearch-muted-color);\n padding-bottom: 12px\n}\n\n.DocSearch-NoResults-Prefill-List {\n display: inline-block;\n padding-bottom: 24px;\n text-align: left\n}\n\n.DocSearch-NoResults-Prefill-List ul {\n display: inline-block;\n padding: 8px 0 0\n}\n\n.DocSearch-NoResults-Prefill-List li {\n list-style-position: inside;\n list-style-type: \"» \"\n}\n\n.DocSearch-Prefill {\n appearance: none;\n background: none;\n border: 0;\n border-radius: 1em;\n color: var(--docsearch-highlight-color);\n cursor: pointer;\n display: inline-block;\n font-size: 1em;\n font-weight: 700;\n padding: 0\n}\n\n.DocSearch-Prefill:focus, .DocSearch-Prefill:hover {\n outline: none;\n text-decoration: underline\n}\n\n.DocSearch-Footer {\n align-items: center;\n border-radius: 0 0 8px 8px;\n box-shadow: var(--docsearch-footer-shadow);\n display: flex;\n flex-direction: row-reverse;\n flex-shrink: 0;\n height: var(--docsearch-footer-height);\n justify-content: space-between;\n padding: 0 var(--docsearch-spacing);\n position: relative;\n user-select: none;\n width: 100%;\n z-index: 300\n}\n\n.DocSearch-Commands {\n color: var(--docsearch-muted-color);\n display: flex;\n list-style: none;\n margin: 0;\n padding: 0\n}\n\n.DocSearch-Commands li {\n align-items: center;\n display: flex\n}\n\n.DocSearch-Commands li:not(:last-of-type) {\n margin-right: .8em\n}\n\n.DocSearch-Commands-Key {\n align-items: center;\n border-radius: 2px;\n display: flex;\n height: 18px;\n justify-content: center;\n margin-right: .4em;\n padding: 0 0 1px;\n color: var(--docsearch-muted-color);\n border: 1px solid var(--border-color);\n width: 20px\n}\n\n@media (max-width: 768px) {\n :root {\n --docsearch-spacing: 10px;\n --docsearch-footer-height: 40px\n }\n .DocSearch-Dropdown {\n height: 100%\n }\n .DocSearch-Container {\n height: 100vh;\n height: -webkit-fill-available;\n height: calc(var(--docsearch-vh, 1vh) * 100);\n position: absolute\n }\n .DocSearch-Footer {\n border-radius: 0;\n bottom: 0;\n position: absolute\n }\n .DocSearch-Hit-content-wrapper {\n display: flex;\n position: relative;\n width: 80%\n }\n .DocSearch-Modal {\n border-radius: 0;\n box-shadow: none;\n height: 100vh;\n height: -webkit-fill-available;\n height: calc(var(--docsearch-vh, 1vh) * 100);\n margin: 0;\n max-width: 100%;\n width: 100%\n }\n .DocSearch-Dropdown {\n max-height: calc(var(--docsearch-vh, 1vh) * 100 - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height))\n }\n .DocSearch-Cancel {\n appearance: none;\n background: none;\n border: 0;\n color: var(--docsearch-highlight-color);\n cursor: pointer;\n display: inline-block;\n flex: none;\n font: inherit;\n font-size: 1em;\n font-weight: 500;\n margin-left: var(--docsearch-spacing);\n outline: none;\n overflow: hidden;\n padding: 0;\n user-select: none;\n white-space: nowrap\n }\n .DocSearch-Commands, .DocSearch-Hit-Tree {\n display: none\n }\n}\n\n@keyframes fade-in {\n 0% {\n opacity: 0\n }\n to {\n opacity: 1\n }\n}" - ], - "names": [], - "mappings": "AAAA,4JAA4J,CAC3J,AAAD,IAAK,AAAC,CACJ,yBAAyB,CAAA,QAAC,CAC1B,mBAAmB,CAAA,KAAC,CACpB,6BAA6B,CAAA,IAAC,CAC9B,2BAA2B,CAAA,+BAAC,CAC5B,uBAAuB,CAAA,QAAC,CACxB,gCAAgC,CAAA,yBAAC,CACjC,sBAAsB,CAAA,QAAC,CACvB,uBAAuB,CAAA,MAAC,CACxB,wBAAwB,CAAA,MAAC,CACzB,wBAAwB,CAAA,8DAAC,CACzB,4BAA4B,CAAA,KAAC,CAC7B,sCAAsC,CAAA,KAAC,CACvC,4BAA4B,CAAA,+CAAC,CAC7B,sBAAsB,CAAA,KAAC,CACvB,qBAAqB,CAAA,QAAC,CACtB,4BAA4B,CAAA,KAAC,CAC7B,0BAA0B,CAAA,KAAC,CAC3B,sBAAsB,CAAA,oBAAC,CACvB,yBAAyB,CAAA,KAAC,CAC1B,yBAAyB,CAAA;AAAC,CAC3B,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,CAAmB,CACvB,wBAAwB,CAAA,+CAAC,CACzB,sCAAsC,CAAA,KAAC,CACvC,qBAAqB,CAAA,QAAC,CACtB,sBAAsB,CAAA,KAAC,CACvB,0BAA0B,CAAA,QAAC,CAC3B,yBAAyB,CAAA,wEAAC,CAC1B,uBAAuB,CAAA;AAAC,CACzB,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,CAAoB,CACxB,wBAAwB,CAAA,+CAAC,CACzB,sCAAsC,CAAA,KAAC,CACvC,qBAAqB,CAAA,QAAC,CACtB,sBAAsB,CAAA,KAAC,CACvB,0BAA0B,CAAA,QAAC,CAC3B,yBAAyB,CAAA,wEAAC,CAC1B,uBAAuB,CAAA;AAAC,CACzB,AAED,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,IAAI,CACX,WAAW,CAAE,KAAK,CAClB,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,iBAAiB,CAC7B,aAAa,CAAE,IAAI,CACnB,KAAK,CAAE,YAAY,CACnB,MAAM,CAAE,OAAO,CACf,OAAO,CAAE,IAAI,CACb,eAAe,CAAE,aAAa,CAC9B,MAAM,CAAE,MAAM,CACd,OAAO,CAAE,OAAO,CAChB,WAAW,CAAE,IAAI,CAClB,AAED,AAAA,iBAAiB,CAAC,MAAM,CAAE,iBAAiB,CAAC,KAAK,CAAE,iBAAiB,CAAC,KAAK,AAAC,CACzE,UAAU,CAAE,2CAA2C,CACvD,UAAU,CAAE,iCAAiC,CAC7C,KAAK,CAAE,YAAY,CACnB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,2BAA2B,AAAC,CAC1B,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,sBAAsB,AAAC,CACrB,YAAY,CAAE,GAChB,CAAC,AAED,AAAA,6BAA6B,AAAC,CAC5B,SAAS,CAAE,IAAI,CACf,OAAO,CAAE,YAAY,CACrB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,sBAAsB,AAAC,CACrB,OAAO,CAAE,IAAI,CACb,SAAS,CAAE,iBAAiB,CAC7B,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,GAAG,CAClB,KAAK,CAAE,4BAA4B,CACnC,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,IAAI,CACZ,eAAe,CAAE,MAAM,CACvB,YAAY,CAAE,IAAI,CAClB,QAAQ,CAAE,QAAQ,CAClB,MAAM,CAAE,GAAG,CAAC,KAAK,CAAC,mBAAmB,CACrC,KAAK,CAAE,IACT,CAAC,AAED,MAAM,oBACJ,CAAA,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,GAAG,CACV,MAAM,CAAE,CAAC,CACV,CAAA,AAGH,MAAM,oBACJ,CAAA,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,GAAG,CACX,CAAA,AAGH,MAAM,oBACJ,CAAA,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,GAAG,CACX,CAAA,AAGH,AAAA,kBAAkB,AAAC,CACjB,QAAQ,CAAE,iBACZ,CAAC,AAED,AAAA,oBAAoB,CAAE,oBAAoB,CAAC,CAAC,AAAC,CAC3C,UAAU,CAAE,UACd,CAAC,AAED,AAAA,oBAAoB,AAAC,CACnB,gBAAgB,CAAE,qCAAqC,CACvD,MAAM,CAAE,KAAK,CACb,IAAI,CAAE,CAAC,CACP,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,KAAK,CAAE,KAAK,CACZ,OAAO,CAAE,GAAG,CACZ,eAAe,CAAE,iBAAiB,CAClC,uBAAuB,CAAE,iBAAiB,CAC3C,AAED,AAAA,oBAAoB,CAAC,CAAC,AAAC,CACrB,eAAe,CAAE,IACnB,CAAC,AAED,AAAA,eAAe,AAAC,CACd,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,KAAK,CAAE,gCAAgC,CACvC,MAAM,CAAE,OAAO,CACf,IAAI,CAAE,OAAO,CACb,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CACX,CAAC,AAED,AAAA,gBAAgB,AAAC,CACf,UAAU,CAAE,iBAAiB,CAC7B,aAAa,CAAE,GAAG,CAClB,UAAU,CAAE,6BAA6B,CACzC,cAAc,CAAE,MAAM,CACtB,MAAM,CAAE,cAAc,CACtB,SAAS,CAAE,4BAA4B,CACvC,QAAQ,CAAE,QACZ,CAAC,AAED,AAAA,oBAAoB,AAAC,CACnB,OAAO,CAAE,IAAI,CACb,OAAO,CAAE,wBAAwB,CAAC,wBAAwB,CAAC,CAAC,CAC7D,AAED,AAAA,eAAe,AAAC,CACd,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,2CAA2C,CACvD,aAAa,CAAE,GAAG,CAClB,UAAU,CAAE,iCAAiC,CAC7C,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,iCAAiC,CACzC,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CAAC,CAAC,wBAAwB,CACnC,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,IACT,CAAC,AAED,AAAA,gBAAgB,AAAC,CACf,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,WAAW,CACvB,MAAM,CAAE,CAAC,CACT,KAAK,CAAE,2BAA2B,CAClC,IAAI,CAAE,CAAC,CACP,IAAI,CAAE,OAAO,CACb,SAAS,CAAE,KAAK,CAChB,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,IAAI,CACb,OAAO,CAAE,SAAS,CAClB,KAAK,CAAE,GACT,CAAC,AAED,AAAA,gBAAgB,EAAE,WAAW,AAAC,CAC5B,KAAK,CAAE,4BAA4B,CACnC,OAAO,CAAE,CACX,CAAC,AAED,AAAA,gBAAgB,EAAE,4BAA4B,CAAE,gBAAgB,EAAE,yBAAyB,CAAE,gBAAgB,EAAE,6BAA6B,CAAE,gBAAgB,EAAE,iCAAiC,AAAC,CAChM,OAAO,CAAE,IACX,CAAC,AAED,AAAA,2BAA2B,CAAE,yBAAyB,CAAE,gBAAgB,AAAC,CACvE,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CACX,CAAC,AAED,AAAA,yBAAyB,CAAE,gBAAgB,AAAC,CAC1C,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,gCAAgC,CACvC,OAAO,CAAE,IAAI,CACb,eAAe,CAAE,MACnB,CAAC,AAED,AAAA,6BAA6B,CAAC,yBAAyB,CAAE,2BAA2B,AAAC,CACnF,OAAO,CAAE,IACX,CAAC,AAED,AAAA,6BAA6B,CAAC,2BAA2B,AAAC,CACxD,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,gCAAgC,CACvC,OAAO,CAAE,IAAI,CACb,eAAe,CAAE,MACnB,CAAC,AAED,MAAM,4CACJ,CAAA,AAAA,gBAAgB,AAAC,CACf,SAAS,CAAE,IAAI,CACf,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,aAAa,CAAE,GAAG,CAClB,KAAK,CAAE,2BAA2B,CAClC,MAAM,CAAE,OAAO,CACf,KAAK,CAAE,CAAC,CACR,YAAY,CAAE,kCAAkC,CACjD,CAAA,AAGH,AAAA,gBAAgB,AAAC,CACf,SAAS,CAAE,4BAA4B,CACvC,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,aAAa,CAAE,GAAG,CAClB,KAAK,CAAE,2BAA2B,CAClC,MAAM,CAAE,OAAO,CACf,OAAO,CAAE,GAAG,CACZ,KAAK,CAAE,CAAC,CACR,YAAY,CAAE,kCAAkC,CACjD,AAED,AAAA,gBAAgB,CAAA,AAAA,MAAC,AAAA,CAAQ,CACvB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,gBAAgB,CAAC,KAAK,AAAC,CACrB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,gBAAgB,CAAC,KAAK,AAAC,CACrB,KAAK,CAAE,gCAAgC,CACxC,AAED,AAAA,2BAA2B,CAAC,GAAG,CAAE,yBAAyB,CAAC,GAAG,AAAC,CAC7D,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IACT,CAAC,AAED,AAAA,iBAAiB,AAAC,CAChB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,mBAAmB,AAAC,CAClB,UAAU,CAAE,mIAAmI,CAC/I,UAAU,CAAE,wBAAwB,CACpC,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,OAAO,CACnB,OAAO,CAAE,CAAC,CAAC,wBAAwB,CACnC,eAAe,CAAE,4BAA4B,CAAC,iCAAiC,CAC/E,eAAe,CAAE,IACnB,CAAC,AAED,AAAA,mBAAmB,EAAE,iBAAiB,AAAC,CACrC,KAAK,CAAE,IACT,CAAC,AAED,AAAA,mBAAmB,EAAE,uBAAuB,AAAC,CAC3C,UAAU,CAAE,WACd,CAAC,AAED,AAAA,mBAAmB,EAAE,uBAAuB,AAAC,CAC3C,gBAAgB,CAAE,4BAA4B,CAC9C,MAAM,CAAE,GAAG,CAAC,KAAK,CAAC,iCAAiC,CACnD,aAAa,CAAE,IACjB,CAAC,AAED,AAAA,mBAAmB,CAAC,EAAE,AAAC,CACrB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CACX,CAAC,AAED,AAAA,gBAAgB,AAAC,CACf,SAAS,CAAE,KAAK,CAChB,WAAW,CAAE,KACf,CAAC,AAED,AAAA,eAAe,CAAE,gBAAgB,AAAC,CAChC,KAAK,CAAE,4BAA4B,CACpC,AAED,AAAA,eAAe,AAAC,CACd,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,CAAC,CACT,WAAW,CAAE,IACf,CAAC,AAED,AAAA,gBAAgB,AAAC,CACf,SAAS,CAAE,KACb,CAAC,AAED,AAAA,eAAe,CAAC,CAAC,AAAC,CAChB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,eAAe,CAAC,GAAG,AAAC,CAClB,KAAK,CAAE,2BAA2B,CAClC,WAAW,CAAE,GACf,CAAC,AAED,AAAA,eAAe,CAAC,YAAY,AAAC,CAC3B,aAAa,CAAE,IACjB,CAAC,AAED,AAAA,eAAe,CAAC,IAAI,AAAC,CACnB,UAAU,CAAE,IAAI,CAChB,KAAK,CAAE,gCAAgC,CACxC,AAED,AAAA,qBAAqB,AAAC,CACpB,KAAK,CAAE,4BAA4B,CACnC,OAAO,CAAE,IAAI,CACb,SAAS,CAAE,KAAK,CAChB,eAAe,CAAE,MAAM,CACvB,aAAa,CAAE,wBAAwB,CACvC,OAAO,CAAE,wBAAwB,CAClC,AAED,AAAA,qBAAqB,CAAC,CAAC,AAAC,CACtB,aAAa,CAAE,SAAS,CACxB,KAAK,CAAE,OACT,CAAC,AAED,AAAA,cAAc,AAAC,CACb,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,QAAQ,CAAE,QACZ,CAAC,AAED,MAAM,4CACJ,CAAA,AAAA,wBAAwB,AAAC,CACvB,UAAU,CAAE,IACd,CAAC,CAAA,AAGH,AAAA,wBAAwB,AAAC,CACvB,OAAO,CAAE,CAAC,CACV,UAAU,CAAE,eACd,CAAC,AAED,MAAM,4CACJ,CAAA,AAAA,0BAA0B,AAAC,CACzB,UAAU,CAAE,IACd,CAAC,CAAA,AAGH,AAAA,0BAA0B,AAAC,CACzB,SAAS,CAAE,QAAQ,CACnB,gBAAgB,CAAE,UAAU,CAC5B,UAAU,CAAE,eAAe,CAC3B,gBAAgB,CAAE,IACpB,CAAC,AAED,AAAA,cAAc,CAAC,CAAC,AAAC,CACf,UAAU,CAAE,+BAA+B,CAC3C,aAAa,CAAE,GAAG,CAClB,UAAU,CAAE,2BAA2B,CACvC,OAAO,CAAE,KAAK,CACd,YAAY,CAAE,wBAAwB,CACtC,KAAK,CAAE,IACT,CAAC,AAED,AAAA,qBAAqB,AAAC,CACpB,UAAU,CAAE,iCAAiC,CAC7C,KAAK,CAAE,gCAAgC,CACvC,SAAS,CAAE,KAAK,CAChB,WAAW,CAAE,GAAG,CAChB,WAAW,CAAE,IAAI,CACjB,MAAM,CAAE,MAAM,CACd,OAAO,CAAE,SAAS,CAClB,QAAQ,CAAE,MAAM,CAChB,GAAG,CAAE,CAAC,CACN,OAAO,CAAE,EACX,CAAC,AAED,AAAA,mBAAmB,AAAC,CAClB,KAAK,CAAE,4BAA4B,CACnC,MAAM,CAAE,2BAA2B,CACnC,OAAO,CAAE,EAAE,CACX,YAAY,CAAE,kCAAkC,CAChD,KAAK,CAAE,IACT,CAAC,AAED,AAAA,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,CAAC,AAAC,CACnC,gBAAgB,CAAE,gCAAgC,CACnD,AAED,AAAA,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,IAAI,AAAC,CACtC,eAAe,CAAE,SACnB,CAAC,AAED,AAAA,wBAAwB,AAAC,CACvB,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,0BAA0B,CACjC,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,MAAM,CAAE,2BAA2B,CACnC,OAAO,CAAE,CAAC,CAAC,wBAAwB,CAAC,CAAC,CAAC,CAAC,CACxC,AAED,AAAA,mBAAmB,AAAC,CAClB,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IACT,CAAC,AAED,AAAA,qBAAqB,CAAE,mBAAmB,AAAC,CACzC,KAAK,CAAE,4BAA4B,CACnC,YAAY,CAAE,kCAAkC,CACjD,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IACT,CAAC,AAED,AAAA,qBAAqB,CAAC,GAAG,AAAC,CACxB,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IACT,CAAC,AAED,AAAA,qBAAqB,CAAG,qBAAqB,AAAC,CAC5C,WAAW,CAAE,GACf,CAAC,AAED,AAAA,4BAA4B,AAAC,CAC3B,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,aAAa,CAAE,GAAG,CAClB,KAAK,CAAE,OAAO,CACd,MAAM,CAAE,OAAO,CACf,OAAO,CAAE,GACX,CAAC,AAED,AAAA,GAAG,AAAA,0BAA0B,AAAC,CAC5B,OAAO,CAAE,IACX,CAAC,AAED,AAAA,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,0BAA0B,AAAC,CAC5D,OAAO,CAAE,KACX,CAAC,AAED,AAAA,4BAA4B,CAAC,KAAK,CAAE,4BAA4B,CAAC,KAAK,AAAC,CACrE,UAAU,CAAE,eAAiB,CAC7B,UAAU,CAAE,4BACd,CAAC,AAED,MAAM,4CACJ,CAAA,AAAA,4BAA4B,CAAC,KAAK,CAAE,4BAA4B,CAAC,KAAK,AAAC,CACrE,UAAU,CAAE,IACd,CAAC,CAAA,AAGH,AAAA,4BAA4B,CAAC,KAAK,CAAC,IAAI,CAAE,4BAA4B,CAAC,KAAK,CAAC,IAAI,AAAC,CAC/E,IAAI,CAAE,IACR,CAAC,AAED,AAAA,8BAA8B,AAAC,CAC7B,OAAO,CAAE,IAAI,CACb,IAAI,CAAE,QAAQ,CACd,cAAc,CAAE,MAAM,CACtB,WAAW,CAAE,GAAG,CAChB,eAAe,CAAE,MAAM,CACvB,WAAW,CAAE,KAAK,CAClB,MAAM,CAAE,KAAK,CACb,UAAU,CAAE,MAAM,CAClB,QAAQ,CAAE,QAAQ,CAClB,aAAa,CAAE,QAAQ,CACvB,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,GACT,CAAC,AAED,AAAA,oBAAoB,AAAC,CACnB,SAAS,CAAE,IACb,CAAC,AAED,AAAA,mBAAmB,AAAC,CAClB,KAAK,CAAE,4BAA4B,CACnC,SAAS,CAAE,KACb,CAAC,AAED,AAAA,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,qBAAqB,CAAE,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,mBAAmB,CAAE,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,mBAAmB,CAAE,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,mBAAmB,CAAE,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,oBAAoB,CAAE,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,mBAAmB,CAAE,cAAc,CAAA,AAAA,aAAC,CAAD,IAAC,AAAA,EAAoB,IAAI,AAAC,CACzX,KAAK,CAAE,iCAAiC,CAAC,UAAU,CACpD,AAED,MAAM,4CACJ,CAAA,AAAA,4BAA4B,CAAC,KAAK,CAAE,4BAA4B,CAAC,KAAK,AAAC,CACrE,UAAU,CAAE,eAAiB,CAC7B,UAAU,CAAE,IACd,CAAC,CAAA,AAGH,AAAA,sBAAsB,CAAE,oBAAoB,CAAE,sBAAsB,AAAC,CACnE,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,MAAM,CACd,OAAO,CAAE,MAAM,CACf,UAAU,CAAE,MAAM,CAClB,KAAK,CAAE,GACT,CAAC,AAED,AAAA,sBAAsB,AAAC,CACrB,KAAK,CAAE,4BAA4B,CACnC,cAAc,CAAE,IAClB,CAAC,AAED,AAAA,iCAAiC,AAAC,CAChC,OAAO,CAAE,YAAY,CACrB,cAAc,CAAE,IAAI,CACpB,UAAU,CAAE,IACd,CAAC,AAED,AAAA,iCAAiC,CAAC,EAAE,AAAC,CACnC,OAAO,CAAE,YAAY,CACrB,OAAO,CAAE,OACX,CAAC,AAED,AAAA,iCAAiC,CAAC,EAAE,AAAC,CACnC,mBAAmB,CAAE,MAAM,CAC3B,eAAe,CAAE,IACnB,CAAC,AAED,AAAA,kBAAkB,AAAC,CACjB,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,aAAa,CAAE,GAAG,CAClB,KAAK,CAAE,gCAAgC,CACvC,MAAM,CAAE,OAAO,CACf,OAAO,CAAE,YAAY,CACrB,SAAS,CAAE,GAAG,CACd,WAAW,CAAE,GAAG,CAChB,OAAO,CAAE,CACX,CAAC,AAED,AAAA,kBAAkB,CAAC,KAAK,CAAE,kBAAkB,CAAC,KAAK,AAAC,CACjD,OAAO,CAAE,IAAI,CACb,eAAe,CAAE,SACnB,CAAC,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,WAAW,CAC1B,UAAU,CAAE,8BAA8B,CAC1C,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,WAAW,CAC3B,WAAW,CAAE,CAAC,CACd,MAAM,CAAE,8BAA8B,CACtC,eAAe,CAAE,aAAa,CAC9B,OAAO,CAAE,CAAC,CAAC,wBAAwB,CACnC,QAAQ,CAAE,QAAQ,CAClB,WAAW,CAAE,IAAI,CACjB,KAAK,CAAE,IAAI,CACX,OAAO,CAAE,GACX,CAAC,AAED,AAAA,mBAAmB,AAAC,CAClB,KAAK,CAAE,4BAA4B,CACnC,OAAO,CAAE,IAAI,CACb,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CACX,CAAC,AAED,AAAA,mBAAmB,CAAC,EAAE,AAAC,CACrB,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,IACX,CAAC,AAED,AAAA,mBAAmB,CAAC,EAAE,CAAA,GAAK,EAAC,YAAY,CAAE,CACxC,YAAY,CAAE,IAChB,CAAC,AAED,AAAA,uBAAuB,AAAC,CACtB,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,IAAI,CACZ,eAAe,CAAE,MAAM,CACvB,YAAY,CAAE,IAAI,CAClB,OAAO,CAAE,OAAO,CAChB,KAAK,CAAE,4BAA4B,CACnC,MAAM,CAAE,GAAG,CAAC,KAAK,CAAC,mBAAmB,CACrC,KAAK,CAAE,IACT,CAAC,AAED,MAAM,mBACJ,EAAC,AAAD,IAAK,AAAC,CACJ,mBAAmB,CAAA,KAAC,CACpB,yBAAyB,CAAA;EAAC,CAC3B,AACD,AAAA,mBAAmB,AAAC,CAClB,MAAM,CAAE,IACV,CAAC,AACD,AAAA,oBAAoB,AAAC,CACnB,MAAM,CAAE,KAAK,CACb,MAAM,CAAE,sBAAsB,CAC9B,MAAM,CAAE,oCAAoC,CAC5C,QAAQ,CAAE,QACZ,CAAC,AACD,AAAA,iBAAiB,AAAC,CAChB,aAAa,CAAE,CAAC,CAChB,MAAM,CAAE,CAAC,CACT,QAAQ,CAAE,QACZ,CAAC,AACD,AAAA,8BAA8B,AAAC,CAC7B,OAAO,CAAE,IAAI,CACb,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,GACT,CAAC,AACD,AAAA,gBAAgB,AAAC,CACf,aAAa,CAAE,CAAC,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,KAAK,CACb,MAAM,CAAE,sBAAsB,CAC9B,MAAM,CAAE,oCAAoC,CAC5C,MAAM,CAAE,CAAC,CACT,SAAS,CAAE,IAAI,CACf,KAAK,CAAE,IACT,CAAC,AACD,AAAA,mBAAmB,AAAC,CAClB,UAAU,CAAE,oIAAoI,CACjJ,AACD,AAAA,iBAAiB,AAAC,CAChB,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CACT,KAAK,CAAE,gCAAgC,CACvC,MAAM,CAAE,OAAO,CACf,OAAO,CAAE,YAAY,CACrB,IAAI,CAAE,IAAI,CACV,IAAI,CAAE,OAAO,CACb,SAAS,CAAE,GAAG,CACd,WAAW,CAAE,GAAG,CAChB,WAAW,CAAE,wBAAwB,CACrC,OAAO,CAAE,IAAI,CACb,QAAQ,CAAE,MAAM,CAChB,OAAO,CAAE,CAAC,CACV,WAAW,CAAE,IAAI,CACjB,WAAW,CAAE,MACf,CAAC,AACD,AAAA,mBAAmB,CAAE,mBAAmB,AAAC,CACvC,OAAO,CAAE,IACX,CAAC,CArDA,AAwDH,UAAU,CAAV,OAAU,CACR,EAAE,CACA,OAAO,CAAE,CACX,CACA,EAAE,CACA,OAAO,CAAE,CACX" -} \ No newline at end of file diff --git a/docs/scss/home.css b/docs/scss/home.css deleted file mode 100644 index 8de9d5c..0000000 --- a/docs/scss/home.css +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Docura (https://docura.github.io/) - * Copyright 2022-2023 Dumindu Madunuwan - * Licensed under the MIT License. - */*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)){all:unset;display:revert}*,*::before,*::after{box-sizing:border-box}html{-moz-text-size-adjust:none;-webkit-text-size-adjust:none;text-size-adjust:none}a,button{cursor:revert}ol,ul,menu{list-style:none}img{max-inline-size:100%;max-block-size:100%}table{border-collapse:collapse}input,textarea{-webkit-user-select:auto}textarea{white-space:revert}meter{-webkit-appearance:revert;appearance:revert}:where(pre){all:revert;box-sizing:border-box}::placeholder{color:unset}::marker{content:initial}:where([hidden]){display:none}:where([contenteditable]:not([contenteditable="false"])){-moz-user-modify:read-write;-webkit-user-modify:read-write;overflow-wrap:break-word;-webkit-line-break:after-white-space;-webkit-user-select:auto}:where([draggable="true"]){-webkit-user-drag:element}:where(dialog:modal){all:revert;box-sizing:border-box}pre,code{margin:0}:root{--site-header-height: 46px;--site-footer-height: 46px}@media (min-width: 1025px) and (max-width: 1280px),(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait){:root{--site-header-height: 60px;--site-footer-height: 60px}}@media (min-width: 1281px){:root{--site-header-height: 80px;--site-footer-height: 80px}}body{font-family:var(--font-family);background:var(--background);color:var(--color);display:flex;flex-direction:column;min-height:100svh}#site-header{display:grid;grid-template-columns:2fr 1fr;grid-template-rows:repeat(3, var(--site-header-height))}#site-header-menu,#site-header-search{grid-column:1 / 3}#site-footer{display:grid;grid-template-columns:1fr 1fr;grid-template-rows:repeat(3, var(--site-footer-height))}#site-footer-copyright,#site-footer-love{grid-column:1 / 3}#site-main-content-wrapper{display:flex;flex:1}#sidebar,#toc,#article-nav,#sidebar .btn-close,#toc .btn-close{display:none}main{flex:1;display:flex;overflow:auto}#article{flex:1;width:100vw}#sidebar{width:85%;left:-85%}#toc{width:85%;right:-85%}@media (min-width: 768px) and (max-width: 1023px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:repeat(2, var(--site-header-height))}#site-header-brand{grid-column:1 / 6}#site-header-controls{grid-column:6 / 7}#site-header-menu{grid-column:1 / 5}#site-header-search{grid-column:5 / 7}#site-footer{grid-template-columns:repeat(4, 1fr);grid-template-rows:repeat(2, var(--site-footer-height))}#site-footer-copyright{grid-column:1 / 3}#site-footer-social{grid-column:3 / 4}#site-footer-fund{grid-column:4 / 5}#site-footer-love{grid-column:1 / 5}#sidebar{width:50%;left:-50%}#toc{width:50%;right:-50%}}@media (min-width: 1024px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:var(--site-header-height)}#site-header-brand{grid-column:1 / 2}#site-header-menu{grid-column:2 / 5;grid-row:1}#site-header-search{grid-column:5 / 6;grid-row:1}#site-header-controls{grid-column:6 / 7}#site-footer{grid-template-columns:repeat(5, 1fr);grid-template-rows:var(--site-footer-height)}#site-footer-copyright{grid-column:1 / 3}#site-footer-love{grid-column:3 / 4;grid-row:1}#site-footer-social{grid-column:4 / 5}#site-footer-fund{grid-column:5 / 6}#article-nav-toc-btn{display:none}}@media (min-width: 1024px) and (max-width: 1279px){#sidebar{width:33%;left:-33%}#article{width:75vw}#toc{width:25%;display:flex;flex-direction:column}#toc .sticky{position:fixed;right:0;width:25%}}@media (min-width: 1280px){#sidebar{width:20%;display:flex;flex-direction:column}#article{width:60vw}#toc{width:25%;display:flex;flex-direction:column}#sidebar .sticky{position:fixed;left:0;width:20%}#toc .sticky{position:fixed;right:0;width:20%}}@media (max-width: 1023px){#toc{position:fixed;top:0;height:100%;transition:.3s;z-index:300;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #toc,:root[data-color="night"] #toc{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-toc-on #toc{animation:slide-in-right .3s forwards;display:flex;flex-direction:column;padding-left:16px;z-index:10;cursor:default}.offcanvas-toc-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-toc-on #toc .btn-close{display:block;position:absolute;top:10px;left:10px}#article-nav-toc-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}@media (max-width: 1279px){#sidebar{position:fixed;top:0;height:100%;transition:.3s;z-index:200;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #sidebar,:root[data-color="night"] #sidebar{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-sidebar-on #sidebar{animation:slide-in-left .3s forwards;display:flex;flex-direction:column;z-index:10;cursor:default}.offcanvas-sidebar-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-sidebar-on #sidebar .btn-close{display:block;position:absolute;top:10px;right:10px}#article-nav{display:flex;gap:12px;overflow:auto;justify-content:space-between;height:var(--site-header-height);align-items:center;padding:0 2px}#article-nav-menu-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}body.offcanvas-sidebar-on,body.offcanvas-toc-on{cursor:pointer;overflow:hidden}.offcanvas-sidebar-on:before,.offcanvas-toc-on:before{background:rgba(255,255,255,0.1);backdrop-filter:blur(var(--blur));-webkit-backdrop-filter:blur(var(--blur))}@keyframes slide-in-left{from{transform:translateX(0)}to{transform:translateX(100%)}}@keyframes slide-in-right{from{transform:translateX(0)}to{transform:translateX(-100%)}}#site-header-brand{display:flex;align-items:center;font-family:var(--font-family-brand);font-size:1.4em;color:var(--color2)}#site-header-brand a{padding:12px}#site-header-menu{padding:0 12px;display:flex;align-items:center;color:var(--color3)}#site-header-menu nav{width:100%;overflow:auto}#site-header-menu ul{display:flex;height:100%;align-items:center;gap:12px}#site-header-menu a{display:flex;padding:12px 6px;gap:3px;white-space:nowrap}#site-header-menu a:focus,#site-header-menu a:hover,#site-header-menu a.active{border-bottom:3px solid}#site-header-controls{display:flex;align-items:center;padding-right:12px;justify-content:flex-end;gap:12px}#site-header-search{display:flex;align-items:flex-end}@media (min-width: 768px){#site-header-search{align-items:center}}#site-footer-social{display:flex;gap:12px;justify-content:flex-start;padding-left:12px;align-items:center}#site-footer-fund{display:flex;gap:12px;overflow:auto;justify-content:flex-end;padding-right:12px;align-items:center}#site-footer-copyright,#site-footer-love{display:flex;align-items:center;justify-content:center;color:var(--color3)}#site-footer-copyright a{display:flex;align-items:center}@media (min-width: 768px){#site-footer-copyright{justify-content:flex-start;padding-left:12px}#site-footer-social{justify-content:flex-end;padding-right:12px}}.cover{padding:40px 20px;width:100vw;flex:1;display:flex;align-items:center;justify-content:center;flex-direction:column;background:var(--home-cover-background);position:relative;color:var(--color2)}.cover::after{content:"";position:absolute;top:0;left:0;right:0;bottom:0;z-index:-1;background:inherit;filter:blur(1rem)}.cover h1{font-family:var(--font-family-brand);font-size:4em;text-align:center}.cover h2{font-family:var(--font-family-brand);font-size:2em;text-align:center}.cover h3{font-family:var(--font-family-brand);font-size:1.5em;text-align:center;padding-top:.8em}.cover p{font-size:1em;padding-top:.8em}.github-buttons{display:flex;gap:10px;padding-top:20px;justify-content:center}.github-repos-grid{display:flex;flex-wrap:wrap;padding-top:4em;padding-bottom:2em;gap:4em;width:100%}.github-repo-tile{width:100%}.github-repo-tile .icon{width:80px;height:80px;background-size:5em}.github-repo-tile a{display:flex;flex-direction:column;align-items:center}@media (min-width: 768px){.github-repos-grid{flex-direction:row;width:80%;padding-top:4em;gap:0}.github-repo-tile{width:50%}}@media (min-width: 1024px){.github-repos-grid{width:60%;padding-top:6em}.github-repo-tile .icon{width:100px;height:100px;background-size:6.25em}}@media (min-width: 1281px){.github-repos-grid{width:50%}.github-repo-tile .icon{width:120px;height:120px;background-size:7.5em}}@media (min-width: 1920px){.github-repos-grid{width:40%}.github-repo-tile .icon{width:160px;height:160px;background-size:10em}}.btn-github{display:flex;flex-direction:row;gap:2px;font-size:.7em;font-weight:700;line-height:1.8em;color:#576060;background:#f6f8fa;border:1px solid #d5d7da;border-radius:6px;padding:2px 4px}:root[data-color="dark"] .btn-github,:root[data-color="night"] .btn-github{color:#c9d1d9;background:#21262d;border:1px solid #576060}.btn-github .icon{transform:scale(0.8)}.btn-buymeacoffee{width:86px;height:24px;background-image:url("data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A");border-radius:6px;box-shadow:0px 2px 3px rgba(0,0,0,0.1)}:root[data-color="dark"] .btn-buymeacoffee,:root[data-color="night"] .btn-buymeacoffee{box-shadow:0px 2px 3px rgba(255,255,255,0.1)}.btn-close{background:var(--background-fg);border:1px dotted var(--border-color);border-radius:4px;cursor:pointer}.dropdown{position:relative}.dropdown-btn{display:flex;flex-direction:row;box-shadow:var(--box-shadow);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap}.dropdown-btn .icon-select{opacity:.4}.dropdown-menu{display:none;position:absolute;right:0;top:34px;min-width:100px;max-height:240px;overflow-x:auto;background:var(--background);color:var(--color3);box-shadow:var(--box-shadow2);z-index:1;border-radius:6px;padding:3px}.dropdown-menu.show{display:block}.dropdown-menu button,.dropdown-menu a{width:100%;display:flex;gap:2px;padding:6px;align-items:center;justify-content:center;cursor:pointer}.dropdown-menu button:hover,.dropdown-menu a:hover{background:var(--background-fg)} - -/*# sourceMappingURL=home.css.map */ \ No newline at end of file diff --git a/docs/scss/home.css.map b/docs/scss/home.css.map deleted file mode 100644 index 4071831..0000000 --- a/docs/scss/home.css.map +++ /dev/null @@ -1,29 +0,0 @@ -{ - "version": 3, - "file": "home.css", - "sourceRoot": "D:/project/gitlab/llm/external/ant_group/codefuse-ai.github.io", - "sources": [ - "themes/docura/assets/scss/home.scss", - "themes/docura/assets/scss/reset.scss", - "themes/docura/assets/scss/variables.scss", - "themes/docura/assets/scss/layout.scss", - "themes/docura/assets/scss/component/site-header.scss", - "themes/docura/assets/scss/component/site-footer.scss", - "themes/docura/assets/scss/component/home.scss", - "themes/docura/assets/scss/component/_button.scss", - "themes/docura/assets/scss/component/_dropdown.scss" - ], - "sourcesContent": [ - "/*!\n * Docura (https://docura.github.io/)\n * Copyright 2022-2023 Dumindu Madunuwan\n * Licensed under the MIT License.\n */\n\n@import \"reset\";\n@import \"variables\";\n@import \"layout\";\n\n@import \"component/site-header\";\n@import \"component/site-footer\";\n@import \"component/home\";\n\n@import \"component/button\";\n@import \"component/dropdown\";", - "/* https://github.com/elad2412/the-new-css-reset v1.11 */\n/* custom styles for: pre, code */\n\n*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)) {\n all: unset;\n display: revert;\n}\n\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\n\nhtml {\n -moz-text-size-adjust: none;\n -webkit-text-size-adjust: none;\n text-size-adjust: none;\n}\n\na, button {\n cursor: revert;\n}\n\nol, ul, menu {\n list-style: none;\n}\n\nimg {\n max-inline-size: 100%;\n max-block-size: 100%;\n}\n\ntable {\n border-collapse: collapse;\n}\n\ninput, textarea {\n -webkit-user-select: auto;\n}\n\ntextarea {\n white-space: revert;\n}\n\nmeter {\n -webkit-appearance: revert;\n appearance: revert;\n}\n\n:where(pre) {\n all: revert;\n box-sizing: border-box;\n}\n\n::placeholder {\n color: unset;\n}\n\n::marker {\n content: initial;\n}\n\n:where([hidden]) {\n display: none;\n}\n\n:where([contenteditable]:not([contenteditable=\"false\"])) {\n -moz-user-modify: read-write;\n -webkit-user-modify: read-write;\n overflow-wrap: break-word;\n -webkit-line-break: after-white-space;\n -webkit-user-select: auto;\n}\n\n:where([draggable=\"true\"]) {\n -webkit-user-drag: element;\n}\n\n:where(dialog:modal) {\n all: revert;\n box-sizing: border-box;\n}\n\npre, code {\n margin: 0;\n}", - ":root {\n --site-header-height: 46px;\n --site-footer-height: 46px;\n}\n\n@media (min-width: 1025px) and (max-width: 1280px),\n(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait) {\n :root {\n --site-header-height: 60px;\n --site-footer-height: 60px;\n }\n}\n\n@media (min-width: 1281px) {\n :root {\n --site-header-height: 80px;\n --site-footer-height: 80px;\n }\n}", - "body {\n font-family: var(--font-family);\n background: var(--background);\n color: var(--color);\n display: flex;\n flex-direction: column;\n min-height: 100svh;\n}\n\n#site-header {\n display: grid;\n grid-template-columns: 2fr 1fr;\n grid-template-rows: repeat(3, var(--site-header-height));\n}\n\n#site-header-menu, #site-header-search {\n grid-column: 1 / 3;\n}\n\n#site-footer {\n display: grid;\n grid-template-columns: 1fr 1fr;\n grid-template-rows: repeat(3, var(--site-footer-height));\n}\n\n#site-footer-copyright, #site-footer-love {\n grid-column: 1 / 3;\n}\n\n#site-main-content-wrapper {\n display: flex;\n flex: 1;\n}\n\n#sidebar, #toc, #article-nav, #sidebar .btn-close, #toc .btn-close {\n display: none;\n}\n\nmain {\n flex: 1;\n display: flex;\n overflow: auto;\n}\n\n#article {\n flex: 1;\n width: 100vw;\n}\n\n#sidebar {\n width: 85%;\n left: -85%;\n}\n\n#toc {\n width: 85%;\n right: -85%;\n}\n\n/* Small Tablet */\n@media (min-width: 768px) and (max-width: 1023px) {\n #site-header {\n grid-template-columns: repeat(6, 1fr);\n grid-template-rows: repeat(2, var(--site-header-height));\n }\n\n #site-header-brand {\n grid-column: 1 / 6;\n }\n\n #site-header-controls {\n grid-column: 6 / 7;\n }\n\n #site-header-menu {\n grid-column: 1 / 5;\n }\n\n #site-header-search {\n grid-column: 5 / 7;\n }\n\n #site-footer {\n grid-template-columns: repeat(4, 1fr);\n grid-template-rows: repeat(2, var(--site-footer-height));\n }\n\n #site-footer-copyright {\n grid-column: 1 / 3;\n }\n\n #site-footer-social {\n grid-column: 3 / 4;\n }\n\n #site-footer-fund {\n grid-column: 4 / 5;\n }\n\n #site-footer-love {\n grid-column: 1 / 5;\n }\n\n #sidebar {\n width: 50%;\n left: -50%;\n }\n\n #toc {\n width: 50%;\n right: -50%;\n }\n}\n\n/* From Large Tablet */\n@media (min-width: 1024px) {\n #site-header {\n grid-template-columns: repeat(6, 1fr);\n grid-template-rows: var(--site-header-height);\n }\n\n #site-header-brand {\n grid-column: 1 / 2;\n }\n\n #site-header-menu {\n grid-column: 2 / 5;\n grid-row: 1;\n }\n\n #site-header-search {\n grid-column: 5 / 6;\n grid-row: 1;\n }\n\n #site-header-controls {\n grid-column: 6 / 7;\n }\n\n #site-footer {\n grid-template-columns: repeat(5, 1fr);\n grid-template-rows: var(--site-footer-height);\n }\n\n #site-footer-copyright {\n grid-column: 1 / 3;\n }\n\n #site-footer-love {\n grid-column: 3 / 4;\n grid-row: 1;\n }\n\n #site-footer-social {\n grid-column: 4 / 5;\n }\n\n #site-footer-fund {\n grid-column: 5 / 6;\n }\n\n #article-nav-toc-btn {\n display: none;\n }\n}\n\n/* Large Tablet */\n@media (min-width: 1024px) and (max-width: 1279px) {\n #sidebar {\n width: 33%;\n left: -33%;\n }\n\n #article {\n width: 75vw;\n }\n\n #toc {\n width: 25%;\n display: flex;\n flex-direction: column;\n }\n\n #toc .sticky {\n position: fixed;\n right: 0;\n width: 25%;\n }\n}\n\n/* From Desktop */\n@media (min-width: 1280px) {\n #sidebar {\n width: 20%;\n display: flex;\n flex-direction: column;\n }\n\n #article {\n width: 60vw;\n }\n\n #toc {\n width: 25%;\n display: flex;\n flex-direction: column;\n }\n\n #sidebar .sticky {\n position: fixed;\n left: 0;\n width: 20%;\n }\n\n #toc .sticky {\n position: fixed;\n right: 0;\n width: 20%;\n }\n}\n\n/* Upto Large Tablet */\n@media (max-width: 1023px) {\n #toc {\n position: fixed;\n top: 0;\n height: 100%;\n transition: .3s;\n z-index: 300;\n overflow-x: auto;\n background: var(--background);\n box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);\n }\n\n :root[data-color=\"dark\"] #toc, :root[data-color=\"night\"] #toc {\n box-shadow: 0 4px 30px rgba(255, 255, 255, 0.1);\n }\n\n .offcanvas-toc-on #toc {\n animation: slide-in-right .3s forwards;\n display: flex;\n flex-direction: column;\n padding-left: 16px;\n z-index: 10;\n cursor: default;\n }\n\n .offcanvas-toc-on:before {\n content: \"\";\n position: fixed;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n z-index: 5;\n }\n\n .offcanvas-toc-on #toc .btn-close {\n display: block;\n position: absolute;\n top: 10px;\n left: 10px;\n }\n\n #article-nav-toc-btn {\n display: flex;\n box-shadow: var(--box-shadow2);\n border-radius: 6px;\n padding: 6px;\n cursor: pointer;\n white-space: nowrap;\n gap: 6px;\n color: var(--color2);\n }\n}\n\n/* Upto Desktop */\n@media (max-width: 1279px) {\n #sidebar {\n position: fixed;\n top: 0;\n height: 100%;\n transition: .3s;\n z-index: 200;\n overflow-x: auto;\n background: var(--background);\n box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);\n }\n\n :root[data-color=\"dark\"] #sidebar, :root[data-color=\"night\"] #sidebar {\n box-shadow: 0 4px 30px rgba(255, 255, 255, 0.1);\n }\n\n .offcanvas-sidebar-on #sidebar {\n animation: slide-in-left .3s forwards;\n display: flex;\n flex-direction: column;\n z-index: 10;\n cursor: default;\n }\n\n .offcanvas-sidebar-on:before {\n content: \"\";\n position: fixed;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n z-index: 5;\n }\n\n .offcanvas-sidebar-on #sidebar .btn-close {\n display: block;\n position: absolute;\n top: 10px;\n right: 10px;\n }\n\n #article-nav {\n display: flex;\n gap: 12px;\n overflow: auto;\n justify-content: space-between;\n height: var(--site-header-height);\n align-items: center;\n padding: 0 2px;\n }\n\n #article-nav-menu-btn {\n display: flex;\n box-shadow: var(--box-shadow2);\n border-radius: 6px;\n padding: 6px;\n cursor: pointer;\n white-space: nowrap;\n gap: 6px;\n color: var(--color2);\n }\n}\n\nbody.offcanvas-sidebar-on, body.offcanvas-toc-on {\n cursor: pointer;\n overflow: hidden;\n}\n\n.offcanvas-sidebar-on:before, .offcanvas-toc-on:before {\n background: rgba(255, 255, 255, 0.1);\n backdrop-filter: blur(var(--blur));\n -webkit-backdrop-filter: blur(var(--blur));\n}\n\n@keyframes slide-in-left {\n from {\n transform: translateX(0);\n }\n to {\n transform: translateX(100%);\n }\n}\n\n@keyframes slide-in-right {\n from {\n transform: translateX(0);\n }\n to {\n transform: translateX(-100%);\n }\n}", - "#site-header-brand {\n display: flex;\n align-items: center;\n font-family: var(--font-family-brand);\n font-size: 1.4em;\n color: var(--color2);\n}\n\n#site-header-brand a {\n padding: 12px;\n}\n\n#site-header-menu {\n padding: 0 12px;\n display: flex;\n align-items: center;\n color: var(--color3);\n}\n\n#site-header-menu nav {\n width: 100%;\n overflow: auto;\n}\n\n#site-header-menu ul {\n display: flex;\n height: 100%;\n align-items: center;\n gap: 12px;\n}\n\n#site-header-menu a {\n display: flex;\n padding: 12px 6px;\n gap: 3px;\n white-space: nowrap;\n}\n\n#site-header-menu a:focus, #site-header-menu a:hover, #site-header-menu a.active {\n border-bottom: 3px solid;\n}\n\n#site-header-controls {\n display: flex;\n align-items: center;\n padding-right: 12px;\n justify-content: flex-end;\n gap: 12px\n}\n\n#site-header-search {\n display: flex;\n align-items: flex-end;\n}\n\n/* From Small Tablet */\n@media (min-width: 768px) {\n #site-header-search {\n align-items: center;\n }\n}", - "#site-footer-social {\n display: flex;\n gap: 12px;\n justify-content: flex-start;\n padding-left: 12px;\n align-items: center;\n}\n\n#site-footer-fund {\n display: flex;\n gap: 12px;\n overflow: auto;\n justify-content: flex-end;\n padding-right: 12px;\n align-items: center;\n}\n\n#site-footer-copyright, #site-footer-love {\n display: flex;\n align-items: center;\n justify-content: center;\n color: var(--color3)\n}\n\n#site-footer-copyright a {\n display: flex;\n align-items: center;\n}\n\n/* From Small Tablet */\n@media (min-width: 768px) {\n #site-footer-copyright {\n justify-content: flex-start;\n padding-left: 12px;\n }\n\n #site-footer-social {\n justify-content: flex-end;\n padding-right: 12px;\n }\n}\n", - ".cover {\n padding: 40px 20px;\n width: 100vw;\n flex: 1;\n display: flex;\n align-items: center;\n justify-content: center;\n flex-direction: column;\n background: var(--home-cover-background);\n position: relative;\n color: var(--color2)\n}\n\n.cover::after {\n content: \"\";\n position: absolute;\n top: 0;\n left: 0;\n right: 0;\n bottom: 0;\n z-index: -1;\n background: inherit;\n filter: blur(1rem);\n}\n\n.cover h1 {\n font-family: var(--font-family-brand);\n font-size: 4em;\n text-align: center;\n}\n\n.cover h2 {\n font-family: var(--font-family-brand);\n font-size: 2em;\n text-align: center;\n}\n\n.cover h3 {\n font-family: var(--font-family-brand);\n font-size: 1.5em;\n text-align: center;\n padding-top: .8em;\n}\n\n.cover p {\n font-size: 1em;\n padding-top: .8em;\n}\n\n.github-buttons {\n display: flex;\n gap: 10px;\n padding-top: 20px;\n justify-content: center;\n}\n\n.github-repos-grid {\n display: flex;\n flex-wrap: wrap;\n padding-top: 4em;\n padding-bottom: 2em;\n gap: 4em;\n width: 100%;\n}\n\n.github-repo-tile {\n width: 100%;\n}\n\n.github-repo-tile .icon {\n width: 80px;\n height: 80px;\n background-size: 5em;\n}\n\n.github-repo-tile a {\n display: flex;\n flex-direction: column;\n align-items: center;\n}\n\n@media (min-width: 768px) {\n .github-repos-grid {\n flex-direction: row;\n width: 80%;\n padding-top: 4em;\n gap: 0;\n }\n\n .github-repo-tile {\n width: 50%;\n }\n}\n\n@media (min-width: 1024px) {\n .github-repos-grid {\n width: 60%;\n padding-top: 6em;\n }\n\n .github-repo-tile .icon {\n width: 100px;\n height: 100px;\n background-size: 6.25em;\n }\n}\n\n@media (min-width: 1281px) {\n .github-repos-grid {\n width: 50%;\n }\n\n .github-repo-tile .icon {\n width: 120px;\n height: 120px;\n background-size: 7.5em;\n }\n}\n\n@media (min-width: 1920px) {\n .github-repos-grid {\n width: 40%;\n }\n\n .github-repo-tile .icon {\n width: 160px;\n height: 160px;\n background-size: 10em;\n }\n}", - ".btn-github {\n display: flex;\n flex-direction: row;\n gap: 2px;\n font-size: .7em; /*11 px*/\n font-weight: 700;\n line-height: 1.8em;\n color: #576060;\n background: #f6f8fa;\n border: 1px solid #d5d7da;\n border-radius: 6px;\n padding: 2px 4px;\n}\n\n:root[data-color=\"dark\"] .btn-github, :root[data-color=\"night\"] .btn-github {\n color: #c9d1d9;\n background: #21262d;\n border: 1px solid #576060;\n}\n\n.btn-github .icon {\n transform: scale(.8); /* 18px */\n}\n\n.btn-buymeacoffee {\n width: 86px;\n height: 24px;\n background-image: url(\"data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A\");\n border-radius: 6px;\n box-shadow: 0px 2px 3px rgba(0, 0, 0, 0.1);\n}\n\n:root[data-color=\"dark\"] .btn-buymeacoffee, :root[data-color=\"night\"] .btn-buymeacoffee {\n box-shadow: 0px 2px 3px rgba(255, 255, 255, 0.1);\n}\n\n.btn-close {\n background: var(--background-fg);\n border: 1px dotted var(--border-color);\n border-radius: 4px;\n cursor: pointer;\n}\n", - ".dropdown {\n position: relative;\n}\n\n.dropdown-btn {\n display: flex;\n flex-direction: row;\n box-shadow: var(--box-shadow);\n border-radius: 6px;\n padding: 6px;\n cursor: pointer;\n white-space: nowrap;\n}\n\n.dropdown-btn .icon-select {\n opacity: .4;\n}\n\n.dropdown-menu {\n display: none;\n position: absolute;\n right: 0;\n top: 34px;\n min-width: 100px;\n max-height: 240px;\n overflow-x: auto;\n background: var(--background);\n color: var(--color3);\n box-shadow: var(--box-shadow2);\n z-index: 1;\n border-radius: 6px;\n padding: 3px;\n}\n\n.dropdown-menu.show {\n display: block;\n}\n\n.dropdown-menu button, .dropdown-menu a {\n width: 100%;\n display: flex;\n gap: 2px;\n padding: 6px;\n align-items: center;\n justify-content: center;\n cursor: pointer;\n}\n\n.dropdown-menu button:hover, .dropdown-menu a:hover {\n background: var(--background-fg);\n}\n" - ], - "names": [], - "mappings": "AAAA;;;;GAIG,ACDH,AAAA,CAAC,CAAC,KAAM,CAAA,IAAI,AAAA,CAAC,AAAA,uDAAuD,AAAA,CAAC,AAAA,IAAI,AAAA,CAAC,AAAA,eAAe,AAAA,CAAC,CAAE,CAC1F,GAAG,CAAE,KAAK,CACV,OAAO,CAAE,MAAM,CAChB,AAED,AAAA,CAAC,CACD,CAAC,EAAE,MAAM,CACT,CAAC,EAAE,KAAK,AAAC,CACP,UAAU,CAAE,UAAU,CACvB,AAED,AAAA,IAAI,AAAC,CACH,qBAAqB,CAAE,IAAI,CAC3B,wBAAwB,CAAE,IAAI,CAC9B,gBAAgB,CAAE,IAAI,CACvB,AAED,AAAA,CAAC,CAAE,MAAM,AAAC,CACR,MAAM,CAAE,MAAM,CACf,AAED,AAAA,EAAE,CAAE,EAAE,CAAE,IAAI,AAAC,CACX,UAAU,CAAE,IAAI,CACjB,AAED,AAAA,GAAG,AAAC,CACF,eAAe,CAAE,IAAI,CACrB,cAAc,CAAE,IAAI,CACrB,AAED,AAAA,KAAK,AAAC,CACJ,eAAe,CAAE,QAAQ,CAC1B,AAED,AAAA,KAAK,CAAE,QAAQ,AAAC,CACd,mBAAmB,CAAE,IAAI,CAC1B,AAED,AAAA,QAAQ,AAAC,CACP,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,KAAK,AAAC,CACJ,kBAAkB,CAAE,MAAM,CAC1B,UAAU,CAAE,MAAM,CACnB,CAEA,AAAD,KAAO,CAAA,GAAG,CAAE,CACV,GAAG,CAAE,MAAM,CACX,UAAU,CAAE,UAAU,CACvB,EAEC,AAAF,WAAa,AAAC,CACZ,KAAK,CAAE,KAAK,CACb,EAEC,AAAF,MAAQ,AAAC,CACP,OAAO,CAAE,OAAO,CACjB,CAEA,AAAD,KAAO,CAAA,CAAC,AAAA,MAAM,AAAA,CAAC,CAAE,CACf,OAAO,CAAE,IAAI,CACd,CAEA,AAAD,KAAO,CAAA,CAAC,AAAA,eAAe,AAAA,CAAC,AAAA,IAAI,AAAA,CAAC,AAAA,CAAC,AAAA,gBAAgB,AAAA,OAAO,AAAA,CAAC,AAAA,CAAC,CAAE,CACvD,gBAAgB,CAAE,UAAU,CAC5B,mBAAmB,CAAE,UAAU,CAC/B,aAAa,CAAE,UAAU,CACzB,kBAAkB,CAAE,iBAAiB,CACrC,mBAAmB,CAAE,IAAI,CAC1B,CAEA,AAAD,KAAO,CAAA,CAAC,AAAA,UAAU,AAAA,MAAM,AAAA,CAAC,CAAE,CACzB,iBAAiB,CAAE,OAAO,CAC3B,CAEA,AAAD,KAAO,CAAA,YAAY,CAAE,CACnB,GAAG,CAAE,MAAM,CACX,UAAU,CAAE,UAAU,CACvB,AAED,AAAA,GAAG,CAAE,IAAI,AAAC,CACR,MAAM,CAAE,CAAC,CACV,CCtFA,AAAD,IAAK,AAAC,CACJ,oBAAoB,CAAA,KAAC,CACrB,oBAAoB,CAAA,KAAC,CACtB,AAED,MAAM,oHAEJ,EAAC,AAAD,IAAK,AAAC,CACJ,oBAAoB,CAAA,KAAC,CACrB,oBAAoB,CAAA,KAAC,CACtB,CAAA,AAGH,MAAM,oBACJ,EAAC,AAAD,IAAK,AAAC,CACJ,oBAAoB,CAAA,KAAC,CACrB,oBAAoB,CAAA,KAAC,CACtB,CAAA,ACjBH,AAAA,IAAI,AAAC,CACH,WAAW,CAAE,kBAAkB,CAC/B,UAAU,CAAE,iBAAiB,CAC7B,KAAK,CAAE,YAAY,CACnB,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,UAAU,CAAE,MAAM,CACnB,AAED,AAAA,YAAY,AAAC,CACX,OAAO,CAAE,IAAI,CACb,qBAAqB,CAAE,OAAO,CAC9B,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,iBAAiB,CAAE,mBAAmB,AAAC,CACrC,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,YAAY,AAAC,CACX,OAAO,CAAE,IAAI,CACb,qBAAqB,CAAE,OAAO,CAC9B,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,sBAAsB,CAAE,iBAAiB,AAAC,CACxC,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,0BAA0B,AAAC,CACzB,OAAO,CAAE,IAAI,CACb,IAAI,CAAE,CAAC,CACR,AAED,AAAA,QAAQ,CAAE,IAAI,CAAE,YAAY,CAAE,QAAQ,CAAC,UAAU,CAAE,IAAI,CAAC,UAAU,AAAC,CACjE,OAAO,CAAE,IAAI,CACd,AAED,AAAA,IAAI,AAAC,CACH,IAAI,CAAE,CAAC,CACP,OAAO,CAAE,IAAI,CACb,QAAQ,CAAE,IAAI,CACf,AAED,AAAA,QAAQ,AAAC,CACP,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,KAAK,CACb,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,IAAI,CACX,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,KAAK,CAAE,IAAI,CACZ,AAGD,MAAM,2CACJ,CAAA,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,kBAAkB,AAAC,CACjB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,oCAAoC,CACzD,AAED,AAAA,sBAAsB,AAAC,CACrB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,IAAI,CACX,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,KAAK,CAAE,IAAI,CACZ,CA/CA,AAmDH,MAAM,oBACJ,CAAA,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,yBAAyB,CAC9C,AAED,AAAA,kBAAkB,AAAC,CACjB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CAClB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CAClB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,qBAAqB,AAAC,CACpB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,YAAY,AAAC,CACX,qBAAqB,CAAE,cAAc,CACrC,kBAAkB,CAAE,yBAAyB,CAC9C,AAED,AAAA,sBAAsB,AAAC,CACrB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CAClB,QAAQ,CAAE,CAAC,CACZ,AAED,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,iBAAiB,AAAC,CAChB,WAAW,CAAE,KAAK,CACnB,AAED,AAAA,oBAAoB,AAAC,CACnB,OAAO,CAAE,IAAI,CACd,CA5CA,AAgDH,MAAM,4CACJ,CAAA,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,IAAI,CACX,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACvB,AAED,AAAA,IAAI,CAAC,OAAO,AAAC,CACX,QAAQ,CAAE,KAAK,CACf,KAAK,CAAE,CAAC,CACR,KAAK,CAAE,GAAG,CACX,CAhBA,AAoBH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACvB,AAED,AAAA,QAAQ,AAAC,CACP,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,IAAI,AAAC,CACH,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACvB,AAED,AAAA,QAAQ,CAAC,OAAO,AAAC,CACf,QAAQ,CAAE,KAAK,CACf,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,GAAG,CACX,AAED,AAAA,IAAI,CAAC,OAAO,AAAC,CACX,QAAQ,CAAE,KAAK,CACf,KAAK,CAAE,CAAC,CACR,KAAK,CAAE,GAAG,CACX,CAtBA,AA0BH,MAAM,oBACJ,CAAA,AAAA,IAAI,AAAC,CACH,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,GAAG,CACf,OAAO,CAAE,GAAG,CACZ,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,iBAAiB,CAC7B,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,eAAkB,CAC1C,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,IAAI,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,IAAI,AAAC,CAC5D,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,qBAAwB,CAChD,AAED,AAAA,iBAAiB,CAAC,IAAI,AAAC,CACrB,SAAS,CAAE,2BAA2B,CACtC,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,YAAY,CAAE,IAAI,CAClB,OAAO,CAAE,EAAE,CACX,MAAM,CAAE,OAAO,CAChB,AAED,AAAA,iBAAiB,CAAC,MAAM,AAAC,CACvB,OAAO,CAAE,EAAE,CACX,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,CAAC,CACX,AAED,AAAA,iBAAiB,CAAC,IAAI,CAAC,UAAU,AAAC,CAChC,OAAO,CAAE,KAAK,CACd,QAAQ,CAAE,QAAQ,CAClB,GAAG,CAAE,IAAI,CACT,IAAI,CAAE,IAAI,CACX,AAED,AAAA,oBAAoB,AAAC,CACnB,OAAO,CAAE,IAAI,CACb,UAAU,CAAE,kBAAkB,CAC9B,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACnB,GAAG,CAAE,GAAG,CACR,KAAK,CAAE,aAAa,CACrB,CAzCA,AA6CH,MAAM,oBACJ,CAAA,AAAA,QAAQ,AAAC,CACP,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,GAAG,CACf,OAAO,CAAE,GAAG,CACZ,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,iBAAiB,CAC7B,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,eAAkB,CAC1C,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,QAAQ,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,QAAQ,AAAC,CACpE,UAAU,CAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,qBAAwB,CAChD,AAED,AAAA,qBAAqB,CAAC,QAAQ,AAAC,CAC7B,SAAS,CAAE,0BAA0B,CACrC,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,OAAO,CAAE,EAAE,CACX,MAAM,CAAE,OAAO,CAChB,AAED,AAAA,qBAAqB,CAAC,MAAM,AAAC,CAC3B,OAAO,CAAE,EAAE,CACX,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,CAAC,CACX,AAED,AAAA,qBAAqB,CAAC,QAAQ,CAAC,UAAU,AAAC,CACxC,OAAO,CAAE,KAAK,CACd,QAAQ,CAAE,QAAQ,CAClB,GAAG,CAAE,IAAI,CACT,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,YAAY,AAAC,CACX,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,QAAQ,CAAE,IAAI,CACd,eAAe,CAAE,aAAa,CAC9B,MAAM,CAAE,yBAAyB,CACjC,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,KAAK,CACf,AAED,AAAA,qBAAqB,AAAC,CACpB,OAAO,CAAE,IAAI,CACb,UAAU,CAAE,kBAAkB,CAC9B,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACnB,GAAG,CAAE,GAAG,CACR,KAAK,CAAE,aAAa,CACrB,CAlDA,AAqDH,AAAA,IAAI,AAAA,qBAAqB,CAAE,IAAI,AAAA,iBAAiB,AAAC,CAC/C,MAAM,CAAE,OAAO,CACf,QAAQ,CAAE,MAAM,CACjB,AAED,AAAA,qBAAqB,CAAC,MAAM,CAAE,iBAAiB,CAAC,MAAM,AAAC,CACrD,UAAU,CAAE,qBAAwB,CACpC,eAAe,CAAE,iBAAiB,CAClC,uBAAuB,CAAE,iBAAiB,CAC3C,AAED,UAAU,CAAV,aAAU,CACR,IAAI,CACF,SAAS,CAAE,aAAa,CAE1B,EAAE,CACA,SAAS,CAAE,gBAAgB,EAI/B,UAAU,CAAV,cAAU,CACR,IAAI,CACF,SAAS,CAAE,aAAa,CAE1B,EAAE,CACA,SAAS,CAAE,iBAAiB,EC7WhC,AAAA,kBAAkB,AAAC,CACjB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,WAAW,CAAE,wBAAwB,CACrC,SAAS,CAAE,KAAK,CAChB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,kBAAkB,CAAC,CAAC,AAAC,CACnB,OAAO,CAAE,IAAI,CACd,AAED,AAAA,iBAAiB,AAAC,CAChB,OAAO,CAAE,MAAM,CACf,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,iBAAiB,CAAC,GAAG,AAAC,CACpB,KAAK,CAAE,IAAI,CACX,QAAQ,CAAE,IAAI,CACf,AAED,AAAA,iBAAiB,CAAC,EAAE,AAAC,CACnB,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,IAAI,CACZ,WAAW,CAAE,MAAM,CACnB,GAAG,CAAE,IAAI,CACV,AAED,AAAA,iBAAiB,CAAC,CAAC,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,OAAO,CAAE,QAAQ,CACjB,GAAG,CAAE,GAAG,CACR,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,iBAAiB,CAAC,CAAC,CAAC,KAAK,CAAE,iBAAiB,CAAC,CAAC,CAAC,KAAK,CAAE,iBAAiB,CAAC,CAAC,AAAA,OAAO,AAAC,CAC/E,aAAa,CAAE,SAAS,CACzB,AAED,AAAA,qBAAqB,AAAC,CACpB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,aAAa,CAAE,IAAI,CACnB,eAAe,CAAE,QAAQ,CACzB,GAAG,CAAE,IACP,CAAC,AAED,AAAA,mBAAmB,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,QAAQ,CACtB,AAGD,MAAM,mBACJ,CAAA,AAAA,mBAAmB,AAAC,CAClB,WAAW,CAAE,MAAM,CACpB,CAAA,AC3DH,AAAA,mBAAmB,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,eAAe,CAAE,UAAU,CAC3B,YAAY,CAAE,IAAI,CAClB,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,iBAAiB,AAAC,CAChB,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,QAAQ,CAAE,IAAI,CACd,eAAe,CAAE,QAAQ,CACzB,aAAa,CAAE,IAAI,CACnB,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,sBAAsB,CAAE,iBAAiB,AAAC,CACxC,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,eAAe,CAAE,MAAM,CACvB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,sBAAsB,CAAC,CAAC,AAAC,CACvB,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACpB,AAGD,MAAM,mBACJ,CAAA,AAAA,sBAAsB,AAAC,CACrB,eAAe,CAAE,UAAU,CAC3B,YAAY,CAAE,IAAI,CACnB,AAED,AAAA,mBAAmB,AAAC,CAClB,eAAe,CAAE,QAAQ,CACzB,aAAa,CAAE,IAAI,CACpB,CALA,AClCH,AAAA,MAAM,AAAC,CACL,OAAO,CAAE,SAAS,CAClB,KAAK,CAAE,KAAK,CACZ,IAAI,CAAE,CAAC,CACP,OAAO,CAAE,IAAI,CACb,WAAW,CAAE,MAAM,CACnB,eAAe,CAAE,MAAM,CACvB,cAAc,CAAE,MAAM,CACtB,UAAU,CAAE,4BAA4B,CACxC,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,aAAa,CACrB,AAED,AAAA,MAAM,EAAE,KAAK,AAAC,CACZ,OAAO,CAAE,EAAE,CACX,QAAQ,CAAE,QAAQ,CAClB,GAAG,CAAE,CAAC,CACN,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,CAAC,CACR,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,EAAE,CACX,UAAU,CAAE,OAAO,CACnB,MAAM,CAAE,UAAU,CACnB,AAED,AAAA,MAAM,CAAC,EAAE,AAAC,CACR,WAAW,CAAE,wBAAwB,CACrC,SAAS,CAAE,GAAG,CACd,UAAU,CAAE,MAAM,CACnB,AAED,AAAA,MAAM,CAAC,EAAE,AAAC,CACR,WAAW,CAAE,wBAAwB,CACrC,SAAS,CAAE,GAAG,CACd,UAAU,CAAE,MAAM,CACnB,AAED,AAAA,MAAM,CAAC,EAAE,AAAC,CACR,WAAW,CAAE,wBAAwB,CACrC,SAAS,CAAE,KAAK,CAChB,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,IAAI,CAClB,AAED,AAAA,MAAM,CAAC,CAAC,AAAC,CACP,SAAS,CAAE,GAAG,CACd,WAAW,CAAE,IAAI,CAClB,AAED,AAAA,eAAe,AAAC,CACd,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,IAAI,CACT,WAAW,CAAE,IAAI,CACjB,eAAe,CAAE,MAAM,CACxB,AAED,AAAA,kBAAkB,AAAC,CACjB,OAAO,CAAE,IAAI,CACb,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,GAAG,CAChB,cAAc,CAAE,GAAG,CACnB,GAAG,CAAE,GAAG,CACR,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,IAAI,CACZ,AAED,AAAA,iBAAiB,CAAC,KAAK,AAAC,CACtB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,eAAe,CAAE,GAAG,CACrB,AAED,AAAA,iBAAiB,CAAC,CAAC,AAAC,CAClB,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,MAAM,CACtB,WAAW,CAAE,MAAM,CACpB,AAED,MAAM,mBACJ,CAAA,AAAA,kBAAkB,AAAC,CACjB,cAAc,CAAE,GAAG,CACnB,KAAK,CAAE,GAAG,CACV,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,CAAC,CACP,AAED,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,GAAG,CACX,CAJA,AAOH,MAAM,oBACJ,CAAA,AAAA,kBAAkB,AAAC,CACjB,KAAK,CAAE,GAAG,CACV,WAAW,CAAE,GAAG,CACjB,AAED,AAAA,iBAAiB,CAAC,KAAK,AAAC,CACtB,KAAK,CAAE,KAAK,CACZ,MAAM,CAAE,KAAK,CACb,eAAe,CAAE,MAAM,CACxB,CANA,AASH,MAAM,oBACJ,CAAA,AAAA,kBAAkB,AAAC,CACjB,KAAK,CAAE,GAAG,CACX,AAED,AAAA,iBAAiB,CAAC,KAAK,AAAC,CACtB,KAAK,CAAE,KAAK,CACZ,MAAM,CAAE,KAAK,CACb,eAAe,CAAE,KAAK,CACvB,CANA,AASH,MAAM,oBACJ,CAAA,AAAA,kBAAkB,AAAC,CACjB,KAAK,CAAE,GAAG,CACX,AAED,AAAA,iBAAiB,CAAC,KAAK,AAAC,CACtB,KAAK,CAAE,KAAK,CACZ,MAAM,CAAE,KAAK,CACb,eAAe,CAAE,IAAI,CACtB,CANA,AC1HH,AAAA,WAAW,AAAC,CACV,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,GAAG,CAAE,GAAG,CACR,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,GAAG,CAChB,WAAW,CAAE,KAAK,CAClB,KAAK,CAAE,OAAO,CACd,UAAU,CAAE,OAAO,CACnB,MAAM,CAAE,iBAAiB,CACzB,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,OAAO,CACjB,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,WAAW,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,WAAW,AAAC,CAC1E,KAAK,CAAE,OAAO,CACd,UAAU,CAAE,OAAO,CACnB,MAAM,CAAE,iBAAiB,CAC1B,AAED,AAAA,WAAW,CAAC,KAAK,AAAC,CAChB,SAAS,CAAE,UAAS,CACrB,AAED,AAAA,iBAAiB,AAAC,CAChB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,gBAAgB,CAAE,qkoCAAqkoC,CACvloC,aAAa,CAAE,GAAG,CAClB,UAAU,CAAE,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,eAAkB,CAC3C,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,iBAAiB,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,iBAAiB,AAAC,CACtF,UAAU,CAAE,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,qBAAwB,CACjD,AAED,AAAA,UAAU,AAAC,CACT,UAAU,CAAE,oBAAoB,CAChC,MAAM,CAAE,GAAG,CAAC,MAAM,CAAC,mBAAmB,CACtC,aAAa,CAAE,GAAG,CAClB,MAAM,CAAE,OAAO,CAChB,ACzCD,AAAA,SAAS,AAAC,CACR,QAAQ,CAAE,QAAQ,CACnB,AAED,AAAA,aAAa,AAAC,CACZ,OAAO,CAAE,IAAI,CACb,cAAc,CAAE,GAAG,CACnB,UAAU,CAAE,iBAAiB,CAC7B,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACpB,AAED,AAAA,aAAa,CAAC,YAAY,AAAC,CACzB,OAAO,CAAE,EAAE,CACZ,AAED,AAAA,cAAc,AAAC,CACb,OAAO,CAAE,IAAI,CACb,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,CAAC,CACR,GAAG,CAAE,IAAI,CACT,SAAS,CAAE,KAAK,CAChB,UAAU,CAAE,KAAK,CACjB,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,iBAAiB,CAC7B,KAAK,CAAE,aAAa,CACpB,UAAU,CAAE,kBAAkB,CAC9B,OAAO,CAAE,CAAC,CACV,aAAa,CAAE,GAAG,CAClB,OAAO,CAAE,GAAG,CACb,AAED,AAAA,cAAc,AAAA,KAAK,AAAC,CAClB,OAAO,CAAE,KAAK,CACf,AAED,AAAA,cAAc,CAAC,MAAM,CAAE,cAAc,CAAC,CAAC,AAAC,CACtC,KAAK,CAAE,IAAI,CACX,OAAO,CAAE,IAAI,CACb,GAAG,CAAE,GAAG,CACR,OAAO,CAAE,GAAG,CACZ,WAAW,CAAE,MAAM,CACnB,eAAe,CAAE,MAAM,CACvB,MAAM,CAAE,OAAO,CAChB,AAED,AAAA,cAAc,CAAC,MAAM,CAAC,KAAK,CAAE,cAAc,CAAC,CAAC,CAAC,KAAK,AAAC,CAClD,UAAU,CAAE,oBAAoB,CACjC" -} \ No newline at end of file diff --git a/docs/scss/theme/default.css b/docs/scss/theme/default.css deleted file mode 100644 index 0c51d7a..0000000 --- a/docs/scss/theme/default.css +++ /dev/null @@ -1,3 +0,0 @@ -@font-face{font-family:'Inter';font-style:normal;font-weight:400;font-display:swap;src:url("/font/Inter-Regular.woff2?v=3.19") format("woff2"),url("/font/Inter-Regular.woff?v=3.19") format("woff")}@font-face{font-family:'Inter';font-style:italic;font-weight:400;font-display:swap;src:url("/font/Inter-Italic.woff2?v=3.19") format("woff2"),url("/font/Inter-Italic.woff?v=3.19") format("woff")}@font-face{font-family:'Inter';font-style:normal;font-weight:600;font-display:swap;src:url("/font/Inter-SemiBold.woff2?v=3.19") format("woff2"),url("/font/Inter-SemiBold.woff?v=3.19") format("woff")}@font-face{font-family:'Inter';font-style:italic;font-weight:600;font-display:swap;src:url("/font/Inter-SemiBoldItalic.woff2?v=3.19") format("woff2"),url("/font/Inter-SemiBoldItalic.woff?v=3.19") format("woff")}.icon{display:block;width:18px;height:18px}.icon-facebook{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 30 30' fill='%231877f2' %3E%3Cpath d='M30 15.091C30 6.756 23.285 0 15 0S0 6.756 0 15.091C0 22.625 5.484 28.868 12.656 30V19.454H8.848V15.09h3.808v-3.324c0-3.782 2.239-5.872 5.666-5.872 1.64 0 3.358.295 3.358.295v3.714h-1.893c-1.863 0-2.443 1.164-2.443 2.358v2.83h4.16l-.665 4.362h-3.495V30C24.516 28.868 30 22.625 30 15.091z'%3E%3C/path%3E%3C/svg%3E")}.icon-twitter{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%231d9bf0' %3E%3Cpath d='M24 4.557c-.883.392-1.832.656-2.828.775 1.017-.609 1.798-1.574 2.165-2.724-.951.564-2.005.974-3.127 1.195-.897-.957-2.178-1.555-3.594-1.555-3.179 0-5.515 2.966-4.797 6.045-4.091-.205-7.719-2.165-10.148-5.144-1.29 2.213-.669 5.108 1.523 6.574-.806-.026-1.566-.247-2.229-.616-.054 2.281 1.581 4.415 3.949 4.89-.693.188-1.452.232-2.224.084.626 1.956 2.444 3.379 4.6 3.419-2.07 1.623-4.678 2.348-7.29 2.04 2.179 1.397 4.768 2.212 7.548 2.212 9.142 0 14.307-7.721 13.995-14.646.962-.695 1.797-1.562 2.457-2.549z'/%3E%3C/svg%3E");transform:scale(1.1)}.icon-youtube{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%23ff0000' %3E%3Cpath d='M23.498 6.186a3.016 3.016 0 0 0-2.122-2.136C19.505 3.545 12 3.545 12 3.545s-7.505 0-9.377.505A3.017 3.017 0 0 0 .502 6.186C0 8.07 0 12 0 12s0 3.93.502 5.814a3.016 3.016 0 0 0 2.122 2.136c1.871.505 9.376.505 9.376.505s7.505 0 9.377-.505a3.015 3.015 0 0 0 2.122-2.136C24 15.93 24 12 24 12s0-3.93-.502-5.814zM9.545 15.568V8.432L15.818 12l-6.273 3.568z'%3E%3C/path%3E%3C/svg%3E");transform:scale(1.1)}.icon-github{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%2324292f' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E")}:root[data-color="dark"] .icon-github,:root[data-color="night"] .icon-github{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%236e7681' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E")}.icon-menu{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3Cpath d='M4,18h11c0.55,0,1-0.45,1-1v0c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,17.55,3.45,18,4,18z M4,13h8c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,12.55,3.45,13,4,13z M3,7L3,7c0,0.55,0.45,1,1,1h11c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4C3.45,6,3,6.45,3,7z M20.3,14.88L17.42,12l2.88-2.88c0.39-0.39,0.39-1.02,0-1.41l0,0 c-0.39-0.39-1.02-0.39-1.41,0l-3.59,3.59c-0.39,0.39-0.39,1.02,0,1.41l3.59,3.59c0.39,0.39,1.02,0.39,1.41,0l0,0 C20.68,15.91,20.69,15.27,20.3,14.88z'/%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3C/svg%3E")}.icon-toc{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23000000'%3E%3Cpath d='M0 0h24v24H0V0zm0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M3 9h14V7H3v2zm0 4h14v-2H3v2zm0 4h14v-2H3v2zm16 0h2v-2h-2v2zm0-10v2h2V7h-2zm0 6h2v-2h-2v2z'/%3E%3C/svg%3E")}.icon-close{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z'/%3E%3C/svg%3E")}.icon-home{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Crect fill='none' height='24' width='24'/%3E%3Cpolygon opacity='.3' points='18,19 13,19 13,15 11,15 11,19 6,19 6,10.1 12,5.52 18,10.1'/%3E%3Cpath d='M12,3L6,7.58V6H4v3.11L1,11.4l1.21,1.59L4,11.62V21h16v-9.38l1.79,1.36L23,11.4L12,3z M18,19h-5v-4h-2v4H6v-8.9l6-4.58 l6,4.58V19z M10,1c0,1.66-1.34,3-3,3C6.45,4,6,4.45,6,5H4c0-1.66,1.34-3,3-3c0.55,0,1-0.45,1-1H10z'/%3E%3C/svg%3E")}.icon-book{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg/%3E%3Cg%3E%3Cpath d='M21,5c-1.11-0.35-2.33-0.5-3.5-0.5c-1.95,0-4.05,0.4-5.5,1.5c-1.45-1.1-3.55-1.5-5.5-1.5S2.45,4.9,1,6v14.65 c0,0.25,0.25,0.5,0.5,0.5c0.1,0,0.15-0.05,0.25-0.05C3.1,20.45,5.05,20,6.5,20c1.95,0,4.05,0.4,5.5,1.5c1.35-0.85,3.8-1.5,5.5-1.5 c1.65,0,3.35,0.3,4.75,1.05c0.1,0.05,0.15,0.05,0.25,0.05c0.25,0,0.5-0.25,0.5-0.5V6C22.4,5.55,21.75,5.25,21,5z M3,18.5V7 c1.1-0.35,2.3-0.5,3.5-0.5c1.34,0,3.13,0.41,4.5,0.99v11.5C9.63,18.41,7.84,18,6.5,18C5.3,18,4.1,18.15,3,18.5z M21,18.5 c-1.1-0.35-2.3-0.5-3.5-0.5c-1.34,0-3.13,0.41-4.5,0.99V7.49c1.37-0.59,3.16-0.99,4.5-0.99c1.2,0,2.4,0.15,3.5,0.5V18.5z'/%3E%3Cpath d='M11,7.49C9.63,6.91,7.84,6.5,6.5,6.5C5.3,6.5,4.1,6.65,3,7v11.5C4.1,18.15,5.3,18,6.5,18 c1.34,0,3.13,0.41,4.5,0.99V7.49z' opacity='.3'/%3E%3C/g%3E%3Cg%3E%3Cpath d='M17.5,10.5c0.88,0,1.73,0.09,2.5,0.26V9.24C19.21,9.09,18.36,9,17.5,9c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,10.69,16.18,10.5,17.5,10.5z'/%3E%3Cpath d='M17.5,13.16c0.88,0,1.73,0.09,2.5,0.26V11.9c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,13.36,16.18,13.16,17.5,13.16z'/%3E%3Cpath d='M17.5,15.83c0.88,0,1.73,0.09,2.5,0.26v-1.52c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,16.02,16.18,15.83,17.5,15.83z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E")}.icon-theme{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0z' fill='none'/%3E%3Cpath d='M12 3c-4.97 0-9 4.03-9 9s4.03 9 9 9c.83 0 1.5-.67 1.5-1.5 0-.39-.15-.74-.39-1.01-.23-.26-.38-.61-.38-.99 0-.83.67-1.5 1.5-1.5H16c2.76 0 5-2.24 5-5 0-4.42-4.03-8-9-8zm-5.5 9c-.83 0-1.5-.67-1.5-1.5S5.67 9 6.5 9 8 9.67 8 10.5 7.33 12 6.5 12zm3-4C8.67 8 8 7.33 8 6.5S8.67 5 9.5 5s1.5.67 1.5 1.5S10.33 8 9.5 8zm5 0c-.83 0-1.5-.67-1.5-1.5S13.67 5 14.5 5s1.5.67 1.5 1.5S15.33 8 14.5 8zm3 4c-.83 0-1.5-.67-1.5-1.5S16.67 9 17.5 9s1.5.67 1.5 1.5-.67 1.5-1.5 1.5z'/%3E%3C/svg%3E")}.icon-brightness{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M18 9.52V6h-3.52L12 3.52 9.52 6H6v3.52L3.52 12 6 14.48V18h3.52L12 20.48 14.48 18H18v-3.52L20.48 12 18 9.52zm-6 7.98v-11c3.03 0 5.5 2.47 5.5 5.5s-2.47 5.5-5.5 5.5z' opacity='.3'/%3E%3Cpath d='M20 8.69V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12 20 8.69zm-2 5.79V18h-3.52L12 20.48 9.52 18H6v-3.52L3.52 12 6 9.52V6h3.52L12 3.52 14.48 6H18v3.52L20.48 12 18 14.48zM12 6.5v11c3.03 0 5.5-2.47 5.5-5.5S15.03 6.5 12 6.5z'/%3E%3C/svg%3E")}.icon-light-mode{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Ccircle cx='12' cy='12' opacity='.3' r='3'/%3E%3Cpath d='M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z'/%3E%3C/svg%3E")}.icon-dark-mode{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27 C17.45,17.19,14.93,19,12,19c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z' opacity='.3'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z'/%3E%3C/svg%3E")}.icon-night-mode{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg%3E%3Cpath d='M8.1,14.15C9.77,14.63,11,16.17,11,18c0,0.68-0.19,1.31-0.48,1.87c0.48,0.09,0.97,0.14,1.48,0.14 c1.48,0,2.9-0.41,4.13-1.15c-2.62-0.92-5.23-2.82-6.8-5.86C7.74,9.94,7.78,7.09,8.29,4.9c-2.57,1.33-4.3,4.01-4.3,7.1c0,0,0,0,0,0 c0.01,0,0.01,0,0.02,0C5.66,12,7.18,12.83,8.1,14.15z' opacity='.3'/%3E%3Cpath d='M19.78,17.51c-2.47,0-6.57-1.33-8.68-5.43C8.77,7.57,10.6,3.6,11.63,2.01C6.27,2.2,1.98,6.59,1.98,12 c0,0.14,0.02,0.28,0.02,0.42C2.61,12.16,3.28,12,3.98,12c0,0,0,0,0,0c0-3.09,1.73-5.77,4.3-7.1C7.78,7.09,7.74,9.94,9.32,13 c1.57,3.04,4.18,4.95,6.8,5.86c-1.23,0.74-2.65,1.15-4.13,1.15c-0.5,0-1-0.05-1.48-0.14c-0.37,0.7-0.94,1.27-1.64,1.64 c0.98,0.32,2.03,0.5,3.11,0.5c3.5,0,6.58-1.8,8.37-4.52C20.18,17.5,19.98,17.51,19.78,17.51z'/%3E%3Cpath d='M7,16l-0.18,0C6.4,14.84,5.3,14,4,14c-1.66,0-3,1.34-3,3s1.34,3,3,3c0.62,0,2.49,0,3,0c1.1,0,2-0.9,2-2 C9,16.9,8.1,16,7,16z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E")}.icon-translate{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12.65 15.67c.14-.36.05-.77-.23-1.05l-2.09-2.06.03-.03c1.74-1.94 2.98-4.17 3.71-6.53h1.94c.54 0 .99-.45.99-.99v-.02c0-.54-.45-.99-.99-.99H10V3c0-.55-.45-1-1-1s-1 .45-1 1v1H1.99c-.54 0-.99.45-.99.99 0 .55.45.99.99.99h10.18C11.5 7.92 10.44 9.75 9 11.35c-.81-.89-1.49-1.86-2.06-2.88-.16-.29-.45-.47-.78-.47-.69 0-1.13.75-.79 1.35.63 1.13 1.4 2.21 2.3 3.21L3.3 16.87c-.4.39-.4 1.03 0 1.42.39.39 1.02.39 1.42 0L9 14l2.02 2.02c.51.51 1.38.32 1.63-.35zM17.5 10c-.6 0-1.14.37-1.35.94l-3.67 9.8c-.24.61.22 1.26.87 1.26.39 0 .74-.24.88-.61l.89-2.39h4.75l.9 2.39c.14.36.49.61.88.61.65 0 1.11-.65.88-1.26l-3.67-9.8c-.22-.57-.76-.94-1.36-.94zm-1.62 7l1.62-4.33L19.12 17h-3.24z'/%3E%3C/svg%3E")}.icon-search{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M15.5 14h-.79l-.28-.27c1.2-1.4 1.82-3.31 1.48-5.34-.47-2.78-2.79-5-5.59-5.34-4.23-.52-7.79 3.04-7.27 7.27.34 2.8 2.56 5.12 5.34 5.59 2.03.34 3.94-.28 5.34-1.48l.27.28v.79l4.25 4.25c.41.41 1.08.41 1.49 0 .41-.41.41-1.08 0-1.49L15.5 14zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E")}.icon-select{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12 5.83L15.17 9l1.41-1.41L12 3 7.41 7.59 8.83 9 12 5.83zm0 12.34L8.83 15l-1.41 1.41L12 21l4.59-4.59L15.17 15 12 18.17z'/%3E%3C/svg%3E")}.icon-calendar{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Crect height='2' opacity='.3' width='14' x='5' y='6'/%3E%3Cpath d='M19,4h-1V2h-2v2H8V2H6v2H5C3.89,4,3.01,4.9,3.01,6L3,20c0,1.1,0.89,2,2,2h14c1.1,0,2-0.9,2-2V6C21,4.9,20.1,4,19,4z M19,20 H5V10h14V20z M19,8H5V6h14V8z M9,14H7v-2h2V14z M13,14h-2v-2h2V14z M17,14h-2v-2h2V14z M9,18H7v-2h2V18z M13,18h-2v-2h2V18z M17,18 h-2v-2h2V18z'/%3E%3C/g%3E%3C/svg%3E")}.icon-next{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M24 24H0V0h24v24z' fill='none' opacity='.87'/%3E%3Cpath d='M7.38 21.01c.49.49 1.28.49 1.77 0l8.31-8.31c.39-.39.39-1.02 0-1.41L9.15 2.98c-.49-.49-1.28-.49-1.77 0s-.49 1.28 0 1.77L14.62 12l-7.25 7.25c-.48.48-.48 1.28.01 1.76z' fill='%23328ac1'/%3E%3C/svg%3E")}.icon-prev{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cg%3E%3Cpath d='M16.88,2.88L16.88,2.88c-0.49-0.49-1.28-0.49-1.77,0l-8.41,8.41c-0.39,0.39-0.39,1.02,0,1.41l8.41,8.41 c0.49,0.49,1.28,0.49,1.77,0l0,0c0.49-0.49,0.49-1.28,0-1.77L9.54,12l7.35-7.35C17.37,4.16,17.37,3.37,16.88,2.88z' fill='%23328ac1'/%3E%3C/g%3E%3C/svg%3E")}.icon-copyright{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M10.08 10.86c.05-.33.16-.62.3-.87s.34-.46.59-.62c.24-.15.54-.22.91-.23.23.01.44.05.63.13.2.09.38.21.52.36s.25.33.34.53.13.42.14.64h1.79c-.02-.47-.11-.9-.28-1.29s-.4-.73-.7-1.01-.66-.5-1.08-.66-.88-.23-1.39-.23c-.65 0-1.22.11-1.7.34s-.88.53-1.2.92-.56.84-.71 1.36S8 11.29 8 11.87v.27c0 .58.08 1.12.23 1.64s.39.97.71 1.35.72.69 1.2.91c.48.22 1.05.34 1.7.34.47 0 .91-.08 1.32-.23s.77-.36 1.08-.63.56-.58.74-.94.29-.74.3-1.15h-1.79c-.01.21-.06.4-.15.58s-.21.33-.36.46-.32.23-.52.3c-.19.07-.39.09-.6.1-.36-.01-.66-.08-.89-.23-.25-.16-.45-.37-.59-.62s-.25-.55-.3-.88-.08-.67-.08-1v-.27c0-.35.03-.68.08-1.01zM12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8z'/%3E%3C/svg%3E")}.icon-love{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23ff4d4d' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M13.35 20.13c-.76.69-1.93.69-2.69-.01l-.11-.1C5.3 15.27 1.87 12.16 2 8.28c.06-1.7.93-3.33 2.34-4.29 2.64-1.8 5.9-.96 7.66 1.1 1.76-2.06 5.02-2.91 7.66-1.1 1.41.96 2.28 2.59 2.34 4.29.14 3.88-3.3 6.99-8.55 11.76l-.1.09z'/%3E%3C/svg%3E")}:root{--font-family: 'Inter', sans-serif;--font-family-brand: 'Times', serif;--font-family-code: 'Menlo', monospace;--background: #ffffff;--color: #355265;--color2: #274457;--color3: #476d86;--color-anchor: #328ac1;--color-hover: #4b9dd0;--background-fg: #f7f7f7;--background-fg2: #ebebeb;--border-color: #dddddd;--box-shadow: 0 0 1px rgba(0, 0, 0, .7);--box-shadow2: 0 0 3px rgba(0, 0, 0, .2);--blur: 10px;--home-cover-background: radial-gradient(circle, rgba(255,255,255,1) 0%, rgba(255,255,250,1) 25%, rgba(214,219,220,1) 50%, rgba(255,255,250,1) 75%, rgba(255,255,255,1) 100%);--icon-filter: invert(41%) sepia(19%) saturate(840%) hue-rotate(161deg) brightness(92%) contrast(92%);--chroma-base00: #f9f9f9;--chroma-base01: #e0e0e0;--chroma-base02: rgba(159, 218, 159, .2);--chroma-base03: #8e908c;--chroma-base04: #969896;--chroma-base05: #4d4d4c;--chroma-base06: #282a2e;--chroma-base07: #1d1f21;--chroma-base08: #c82829;--chroma-base09: #f5871f;--chroma-base0A: #eab700;--chroma-base0B: #718c00;--chroma-base0C: #3e999f;--chroma-base0D: #4271ae;--chroma-base0E: #8959a8;--chroma-base0F: #a3685a}:root[data-color="dark"]{--background: #121212;--color: #efefef;--color2: #ffffff;--color3: #b3b3b3;--background-fg: #333333;--background-fg2: #1f1f1f;--border-color: rgba(255, 255, 255, .4);--box-shadow: 0 0 1px rgba(255, 255, 255, 1);--box-shadow2: 0 0 3px rgba(255, 255, 255, .6);--home-cover-background: radial-gradient(circle, rgba(23,23,25,1) 0%, rgba(18,18,0,1) 25%, rgba(32,32,32,1) 50%, rgba(18,18,0,1) 75%, rgba(23,23,25,1) 100%);--icon-filter: invert(83%) sepia(0%) saturate(1582%) hue-rotate(126deg) brightness(86%) contrast(80%);--chroma-base00: #080808;--chroma-base01: #393939;--chroma-base02: rgba(159, 218, 159, .1);--chroma-base03: #999999;--chroma-base04: #b4b7b4;--chroma-base05: #cccccc;--chroma-base06: #e0e0e0;--chroma-base07: #ffffff;--chroma-base08: #f2777a;--chroma-base09: #f99157;--chroma-base0A: #ffcc66;--chroma-base0B: #99cc99;--chroma-base0C: #66cccc;--chroma-base0D: #6699cc;--chroma-base0E: #cc99cc;--chroma-base0F: #a3685a}:root[data-color="night"]{--background: #333333;--color: #cccccc;--color2: #dedede;--color3: #9d9d9d;--background-fg: #444444;--background-fg2: #303030;--border-color: rgba(255, 255, 255, 0.2);--box-shadow: 0 0 1px rgba(225, 255, 255, 1);--box-shadow2: 0 0 3px rgba(255, 255, 255, .6);--home-cover-background: radial-gradient(circle, rgba(52,52,52,1) 0%, rgba(42,42,42,1) 25%, rgba(57,57,57,1) 50%, rgba(42,42,42,1) 75%, rgba(52,52,52,1) 100%);--icon-filter: invert(60%) sepia(25%) saturate(20%) hue-rotate(343deg) brightness(98%) contrast(94%);--chroma-base00: #1e1e1e;--chroma-base01: #323537;--chroma-base02: rgba(159, 218, 159, .1);--chroma-base03: #5f5a60;--chroma-base04: #838184;--chroma-base05: #a7a7a7;--chroma-base06: #c3c3c3;--chroma-base07: #ffffff;--chroma-base08: #cf6a4c;--chroma-base09: #cda869;--chroma-base0A: #f9ee98;--chroma-base0B: #8f9d6a;--chroma-base0C: #afc4db;--chroma-base0D: #7587a6;--chroma-base0E: #9b859d;--chroma-base0F: #9b703f}.icon:not(.icon-colored){filter:var(--icon-filter)} - -/*# sourceMappingURL=default.css.map */ \ No newline at end of file diff --git a/docs/scss/theme/default.css.map b/docs/scss/theme/default.css.map deleted file mode 100644 index 708eb38..0000000 --- a/docs/scss/theme/default.css.map +++ /dev/null @@ -1,17 +0,0 @@ -{ - "version": 3, - "file": "default.css", - "sourceRoot": "D:/project/gitlab/llm/external/ant_group/codefuse-ai.github.io", - "sources": [ - "themes/docura/assets/scss/theme/default.scss", - "themes/docura/assets/scss/font/inter.scss", - "themes/docura/assets/scss/icon/default.scss" - ], - "sourcesContent": [ - "@import \"../font/inter\";\n@import \"../icon/default\";\n\n:root {\n --font-family: 'Inter', sans-serif;\n --font-family-brand: 'Times', serif;\n --font-family-code: 'Menlo', monospace;\n\n --background: #ffffff;\n --color: #355265;\n --color2: #274457;\n --color3: #476d86;\n\n --color-anchor: #328ac1;\n --color-hover: #4b9dd0;\n\n --background-fg: #f7f7f7;\n --background-fg2: #ebebeb;\n --border-color: #dddddd;\n\n --box-shadow: 0 0 1px rgba(0, 0, 0, .7);\n --box-shadow2: 0 0 3px rgba(0, 0, 0, .2);\n\n --blur: 10px;\n\n --home-cover-background: radial-gradient(circle, rgba(255,255,255,1) 0%, rgba(255,255,250,1) 25%, rgba(214,219,220,1) 50%, rgba(255,255,250,1) 75%, rgba(255,255,255,1) 100%);\n\n --icon-filter: invert(41%) sepia(19%) saturate(840%) hue-rotate(161deg) brightness(92%) contrast(92%);\n\n /* base16 tomorrow */\n --chroma-base00: #f9f9f9;\n --chroma-base01: #e0e0e0;\n --chroma-base02: rgba(159, 218, 159, .2);\n --chroma-base03: #8e908c;\n --chroma-base04: #969896;\n --chroma-base05: #4d4d4c;\n --chroma-base06: #282a2e;\n --chroma-base07: #1d1f21;\n --chroma-base08: #c82829;\n --chroma-base09: #f5871f;\n --chroma-base0A: #eab700;\n --chroma-base0B: #718c00;\n --chroma-base0C: #3e999f;\n --chroma-base0D: #4271ae;\n --chroma-base0E: #8959a8;\n --chroma-base0F: #a3685a;\n}\n\n:root[data-color=\"dark\"] {\n --background: #121212;\n --color: #efefef;\n --color2: #ffffff;\n --color3: #b3b3b3;\n\n --background-fg: #333333;\n --background-fg2: #1f1f1f;\n --border-color: rgba(255, 255, 255, .4);\n\n --box-shadow: 0 0 1px rgba(255, 255, 255, 1);\n --box-shadow2: 0 0 3px rgba(255, 255, 255, .6);\n\n --home-cover-background: radial-gradient(circle, rgba(23,23,25,1) 0%, rgba(18,18,0,1) 25%, rgba(32,32,32,1) 50%, rgba(18,18,0,1) 75%, rgba(23,23,25,1) 100%);\n\n --icon-filter: invert(83%) sepia(0%) saturate(1582%) hue-rotate(126deg) brightness(86%) contrast(80%);\n\n /* base16 tomorrow night */\n --chroma-base00: #080808;\n --chroma-base01: #393939;\n --chroma-base02: rgba(159, 218, 159, .1);\n --chroma-base03: #999999;\n --chroma-base04: #b4b7b4;\n --chroma-base05: #cccccc;\n --chroma-base06: #e0e0e0;\n --chroma-base07: #ffffff;\n --chroma-base08: #f2777a;\n --chroma-base09: #f99157;\n --chroma-base0A: #ffcc66;\n --chroma-base0B: #99cc99;\n --chroma-base0C: #66cccc;\n --chroma-base0D: #6699cc;\n --chroma-base0E: #cc99cc;\n --chroma-base0F: #a3685a;\n}\n\n:root[data-color=\"night\"] {\n --background: #333333;\n --color: #cccccc;\n --color2: #dedede;\n --color3: #9d9d9d;\n\n --background-fg: #444444;\n --background-fg2: #303030;\n --border-color: rgba(255, 255, 255, 0.2);\n\n --box-shadow: 0 0 1px rgba(225, 255, 255, 1);\n --box-shadow2: 0 0 3px rgba(255, 255, 255, .6);\n\n --home-cover-background: radial-gradient(circle, rgba(52,52,52,1) 0%, rgba(42,42,42,1) 25%, rgba(57,57,57,1) 50%, rgba(42,42,42,1) 75%, rgba(52,52,52,1) 100%);\n\n --icon-filter: invert(60%) sepia(25%) saturate(20%) hue-rotate(343deg) brightness(98%) contrast(94%);\n\n /* base16 twilight */\n --chroma-base00: #1e1e1e;\n --chroma-base01: #323537;\n --chroma-base02: rgba(159, 218, 159, .1);\n --chroma-base03: #5f5a60;\n --chroma-base04: #838184;\n --chroma-base05: #a7a7a7;\n --chroma-base06: #c3c3c3;\n --chroma-base07: #ffffff;\n --chroma-base08: #cf6a4c;\n --chroma-base09: #cda869;\n --chroma-base0A: #f9ee98;\n --chroma-base0B: #8f9d6a;\n --chroma-base0C: #afc4db;\n --chroma-base0D: #7587a6;\n --chroma-base0E: #9b859d;\n --chroma-base0F: #9b703f;\n}\n\n.icon:not(.icon-colored) {\n filter: var(--icon-filter);\n}", - "@font-face {\n font-family: 'Inter';\n font-style: normal;\n font-weight: 400;\n font-display: swap;\n src: url(\"/font/Inter-Regular.woff2?v=3.19\") format(\"woff2\"),\n url(\"/font/Inter-Regular.woff?v=3.19\") format(\"woff\");\n}\n\n@font-face {\n font-family: 'Inter';\n font-style: italic;\n font-weight: 400;\n font-display: swap;\n src: url(\"/font/Inter-Italic.woff2?v=3.19\") format(\"woff2\"),\n url(\"/font/Inter-Italic.woff?v=3.19\") format(\"woff\");\n}\n\n@font-face {\n font-family: 'Inter';\n font-style: normal;\n font-weight: 600;\n font-display: swap;\n src: url(\"/font/Inter-SemiBold.woff2?v=3.19\") format(\"woff2\"),\n url(\"/font/Inter-SemiBold.woff?v=3.19\") format(\"woff\");\n}\n\n@font-face {\n font-family: 'Inter';\n font-style: italic;\n font-weight: 600;\n font-display: swap;\n src: url(\"/font/Inter-SemiBoldItalic.woff2?v=3.19\") format(\"woff2\"),\n url(\"/font/Inter-SemiBoldItalic.woff?v=3.19\") format(\"woff\");\n}\n", - ".icon {\n display: block;\n width: 18px;\n height: 18px;\n}\n\n/* -- social icons: add `.icon-colored` with `.icon` -- */\n.icon-facebook {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 30 30' fill='%231877f2' %3E%3Cpath d='M30 15.091C30 6.756 23.285 0 15 0S0 6.756 0 15.091C0 22.625 5.484 28.868 12.656 30V19.454H8.848V15.09h3.808v-3.324c0-3.782 2.239-5.872 5.666-5.872 1.64 0 3.358.295 3.358.295v3.714h-1.893c-1.863 0-2.443 1.164-2.443 2.358v2.83h4.16l-.665 4.362h-3.495V30C24.516 28.868 30 22.625 30 15.091z'%3E%3C/path%3E%3C/svg%3E\");\n}\n\n.icon-twitter {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%231d9bf0' %3E%3Cpath d='M24 4.557c-.883.392-1.832.656-2.828.775 1.017-.609 1.798-1.574 2.165-2.724-.951.564-2.005.974-3.127 1.195-.897-.957-2.178-1.555-3.594-1.555-3.179 0-5.515 2.966-4.797 6.045-4.091-.205-7.719-2.165-10.148-5.144-1.29 2.213-.669 5.108 1.523 6.574-.806-.026-1.566-.247-2.229-.616-.054 2.281 1.581 4.415 3.949 4.89-.693.188-1.452.232-2.224.084.626 1.956 2.444 3.379 4.6 3.419-2.07 1.623-4.678 2.348-7.29 2.04 2.179 1.397 4.768 2.212 7.548 2.212 9.142 0 14.307-7.721 13.995-14.646.962-.695 1.797-1.562 2.457-2.549z'/%3E%3C/svg%3E\");\n transform: scale(1.1);\n}\n\n.icon-youtube {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%23ff0000' %3E%3Cpath d='M23.498 6.186a3.016 3.016 0 0 0-2.122-2.136C19.505 3.545 12 3.545 12 3.545s-7.505 0-9.377.505A3.017 3.017 0 0 0 .502 6.186C0 8.07 0 12 0 12s0 3.93.502 5.814a3.016 3.016 0 0 0 2.122 2.136c1.871.505 9.376.505 9.376.505s7.505 0 9.377-.505a3.015 3.015 0 0 0 2.122-2.136C24 15.93 24 12 24 12s0-3.93-.502-5.814zM9.545 15.568V8.432L15.818 12l-6.273 3.568z'%3E%3C/path%3E%3C/svg%3E\");\n transform: scale(1.1);\n}\n\n.icon-github {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%2324292f' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E\");\n}\n\n:root[data-color=\"dark\"] .icon-github, :root[data-color=\"night\"] .icon-github {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%236e7681' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E\");\n}\n\n\n/* -- template icons -- */\n.icon-menu {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3Cpath d='M4,18h11c0.55,0,1-0.45,1-1v0c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,17.55,3.45,18,4,18z M4,13h8c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,12.55,3.45,13,4,13z M3,7L3,7c0,0.55,0.45,1,1,1h11c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4C3.45,6,3,6.45,3,7z M20.3,14.88L17.42,12l2.88-2.88c0.39-0.39,0.39-1.02,0-1.41l0,0 c-0.39-0.39-1.02-0.39-1.41,0l-3.59,3.59c-0.39,0.39-0.39,1.02,0,1.41l3.59,3.59c0.39,0.39,1.02,0.39,1.41,0l0,0 C20.68,15.91,20.69,15.27,20.3,14.88z'/%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3C/svg%3E\");\n}\n\n.icon-toc {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23000000'%3E%3Cpath d='M0 0h24v24H0V0zm0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M3 9h14V7H3v2zm0 4h14v-2H3v2zm0 4h14v-2H3v2zm16 0h2v-2h-2v2zm0-10v2h2V7h-2zm0 6h2v-2h-2v2z'/%3E%3C/svg%3E\");\n}\n\n.icon-close {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z'/%3E%3C/svg%3E\");\n}\n\n.icon-home {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Crect fill='none' height='24' width='24'/%3E%3Cpolygon opacity='.3' points='18,19 13,19 13,15 11,15 11,19 6,19 6,10.1 12,5.52 18,10.1'/%3E%3Cpath d='M12,3L6,7.58V6H4v3.11L1,11.4l1.21,1.59L4,11.62V21h16v-9.38l1.79,1.36L23,11.4L12,3z M18,19h-5v-4h-2v4H6v-8.9l6-4.58 l6,4.58V19z M10,1c0,1.66-1.34,3-3,3C6.45,4,6,4.45,6,5H4c0-1.66,1.34-3,3-3c0.55,0,1-0.45,1-1H10z'/%3E%3C/svg%3E\");\n}\n\n.icon-book {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg/%3E%3Cg%3E%3Cpath d='M21,5c-1.11-0.35-2.33-0.5-3.5-0.5c-1.95,0-4.05,0.4-5.5,1.5c-1.45-1.1-3.55-1.5-5.5-1.5S2.45,4.9,1,6v14.65 c0,0.25,0.25,0.5,0.5,0.5c0.1,0,0.15-0.05,0.25-0.05C3.1,20.45,5.05,20,6.5,20c1.95,0,4.05,0.4,5.5,1.5c1.35-0.85,3.8-1.5,5.5-1.5 c1.65,0,3.35,0.3,4.75,1.05c0.1,0.05,0.15,0.05,0.25,0.05c0.25,0,0.5-0.25,0.5-0.5V6C22.4,5.55,21.75,5.25,21,5z M3,18.5V7 c1.1-0.35,2.3-0.5,3.5-0.5c1.34,0,3.13,0.41,4.5,0.99v11.5C9.63,18.41,7.84,18,6.5,18C5.3,18,4.1,18.15,3,18.5z M21,18.5 c-1.1-0.35-2.3-0.5-3.5-0.5c-1.34,0-3.13,0.41-4.5,0.99V7.49c1.37-0.59,3.16-0.99,4.5-0.99c1.2,0,2.4,0.15,3.5,0.5V18.5z'/%3E%3Cpath d='M11,7.49C9.63,6.91,7.84,6.5,6.5,6.5C5.3,6.5,4.1,6.65,3,7v11.5C4.1,18.15,5.3,18,6.5,18 c1.34,0,3.13,0.41,4.5,0.99V7.49z' opacity='.3'/%3E%3C/g%3E%3Cg%3E%3Cpath d='M17.5,10.5c0.88,0,1.73,0.09,2.5,0.26V9.24C19.21,9.09,18.36,9,17.5,9c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,10.69,16.18,10.5,17.5,10.5z'/%3E%3Cpath d='M17.5,13.16c0.88,0,1.73,0.09,2.5,0.26V11.9c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,13.36,16.18,13.16,17.5,13.16z'/%3E%3Cpath d='M17.5,15.83c0.88,0,1.73,0.09,2.5,0.26v-1.52c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,16.02,16.18,15.83,17.5,15.83z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E\");\n}\n\n.icon-theme {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0z' fill='none'/%3E%3Cpath d='M12 3c-4.97 0-9 4.03-9 9s4.03 9 9 9c.83 0 1.5-.67 1.5-1.5 0-.39-.15-.74-.39-1.01-.23-.26-.38-.61-.38-.99 0-.83.67-1.5 1.5-1.5H16c2.76 0 5-2.24 5-5 0-4.42-4.03-8-9-8zm-5.5 9c-.83 0-1.5-.67-1.5-1.5S5.67 9 6.5 9 8 9.67 8 10.5 7.33 12 6.5 12zm3-4C8.67 8 8 7.33 8 6.5S8.67 5 9.5 5s1.5.67 1.5 1.5S10.33 8 9.5 8zm5 0c-.83 0-1.5-.67-1.5-1.5S13.67 5 14.5 5s1.5.67 1.5 1.5S15.33 8 14.5 8zm3 4c-.83 0-1.5-.67-1.5-1.5S16.67 9 17.5 9s1.5.67 1.5 1.5-.67 1.5-1.5 1.5z'/%3E%3C/svg%3E\");\n}\n\n.icon-brightness {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M18 9.52V6h-3.52L12 3.52 9.52 6H6v3.52L3.52 12 6 14.48V18h3.52L12 20.48 14.48 18H18v-3.52L20.48 12 18 9.52zm-6 7.98v-11c3.03 0 5.5 2.47 5.5 5.5s-2.47 5.5-5.5 5.5z' opacity='.3'/%3E%3Cpath d='M20 8.69V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12 20 8.69zm-2 5.79V18h-3.52L12 20.48 9.52 18H6v-3.52L3.52 12 6 9.52V6h3.52L12 3.52 14.48 6H18v3.52L20.48 12 18 14.48zM12 6.5v11c3.03 0 5.5-2.47 5.5-5.5S15.03 6.5 12 6.5z'/%3E%3C/svg%3E\");\n}\n\n.icon-light-mode {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Ccircle cx='12' cy='12' opacity='.3' r='3'/%3E%3Cpath d='M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z'/%3E%3C/svg%3E\");\n}\n\n.icon-dark-mode {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27 C17.45,17.19,14.93,19,12,19c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z' opacity='.3'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z'/%3E%3C/svg%3E\");\n}\n\n.icon-night-mode {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg%3E%3Cpath d='M8.1,14.15C9.77,14.63,11,16.17,11,18c0,0.68-0.19,1.31-0.48,1.87c0.48,0.09,0.97,0.14,1.48,0.14 c1.48,0,2.9-0.41,4.13-1.15c-2.62-0.92-5.23-2.82-6.8-5.86C7.74,9.94,7.78,7.09,8.29,4.9c-2.57,1.33-4.3,4.01-4.3,7.1c0,0,0,0,0,0 c0.01,0,0.01,0,0.02,0C5.66,12,7.18,12.83,8.1,14.15z' opacity='.3'/%3E%3Cpath d='M19.78,17.51c-2.47,0-6.57-1.33-8.68-5.43C8.77,7.57,10.6,3.6,11.63,2.01C6.27,2.2,1.98,6.59,1.98,12 c0,0.14,0.02,0.28,0.02,0.42C2.61,12.16,3.28,12,3.98,12c0,0,0,0,0,0c0-3.09,1.73-5.77,4.3-7.1C7.78,7.09,7.74,9.94,9.32,13 c1.57,3.04,4.18,4.95,6.8,5.86c-1.23,0.74-2.65,1.15-4.13,1.15c-0.5,0-1-0.05-1.48-0.14c-0.37,0.7-0.94,1.27-1.64,1.64 c0.98,0.32,2.03,0.5,3.11,0.5c3.5,0,6.58-1.8,8.37-4.52C20.18,17.5,19.98,17.51,19.78,17.51z'/%3E%3Cpath d='M7,16l-0.18,0C6.4,14.84,5.3,14,4,14c-1.66,0-3,1.34-3,3s1.34,3,3,3c0.62,0,2.49,0,3,0c1.1,0,2-0.9,2-2 C9,16.9,8.1,16,7,16z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E\");\n}\n\n.icon-translate {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12.65 15.67c.14-.36.05-.77-.23-1.05l-2.09-2.06.03-.03c1.74-1.94 2.98-4.17 3.71-6.53h1.94c.54 0 .99-.45.99-.99v-.02c0-.54-.45-.99-.99-.99H10V3c0-.55-.45-1-1-1s-1 .45-1 1v1H1.99c-.54 0-.99.45-.99.99 0 .55.45.99.99.99h10.18C11.5 7.92 10.44 9.75 9 11.35c-.81-.89-1.49-1.86-2.06-2.88-.16-.29-.45-.47-.78-.47-.69 0-1.13.75-.79 1.35.63 1.13 1.4 2.21 2.3 3.21L3.3 16.87c-.4.39-.4 1.03 0 1.42.39.39 1.02.39 1.42 0L9 14l2.02 2.02c.51.51 1.38.32 1.63-.35zM17.5 10c-.6 0-1.14.37-1.35.94l-3.67 9.8c-.24.61.22 1.26.87 1.26.39 0 .74-.24.88-.61l.89-2.39h4.75l.9 2.39c.14.36.49.61.88.61.65 0 1.11-.65.88-1.26l-3.67-9.8c-.22-.57-.76-.94-1.36-.94zm-1.62 7l1.62-4.33L19.12 17h-3.24z'/%3E%3C/svg%3E\");\n}\n\n.icon-search {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M15.5 14h-.79l-.28-.27c1.2-1.4 1.82-3.31 1.48-5.34-.47-2.78-2.79-5-5.59-5.34-4.23-.52-7.79 3.04-7.27 7.27.34 2.8 2.56 5.12 5.34 5.59 2.03.34 3.94-.28 5.34-1.48l.27.28v.79l4.25 4.25c.41.41 1.08.41 1.49 0 .41-.41.41-1.08 0-1.49L15.5 14zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E\");\n}\n\n.icon-select {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12 5.83L15.17 9l1.41-1.41L12 3 7.41 7.59 8.83 9 12 5.83zm0 12.34L8.83 15l-1.41 1.41L12 21l4.59-4.59L15.17 15 12 18.17z'/%3E%3C/svg%3E\");\n}\n\n.icon-calendar {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Crect height='2' opacity='.3' width='14' x='5' y='6'/%3E%3Cpath d='M19,4h-1V2h-2v2H8V2H6v2H5C3.89,4,3.01,4.9,3.01,6L3,20c0,1.1,0.89,2,2,2h14c1.1,0,2-0.9,2-2V6C21,4.9,20.1,4,19,4z M19,20 H5V10h14V20z M19,8H5V6h14V8z M9,14H7v-2h2V14z M13,14h-2v-2h2V14z M17,14h-2v-2h2V14z M9,18H7v-2h2V18z M13,18h-2v-2h2V18z M17,18 h-2v-2h2V18z'/%3E%3C/g%3E%3C/svg%3E\");\n}\n\n.icon-next {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M24 24H0V0h24v24z' fill='none' opacity='.87'/%3E%3Cpath d='M7.38 21.01c.49.49 1.28.49 1.77 0l8.31-8.31c.39-.39.39-1.02 0-1.41L9.15 2.98c-.49-.49-1.28-.49-1.77 0s-.49 1.28 0 1.77L14.62 12l-7.25 7.25c-.48.48-.48 1.28.01 1.76z' fill='%23328ac1'/%3E%3C/svg%3E\");\n}\n\n.icon-prev {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cg%3E%3Cpath d='M16.88,2.88L16.88,2.88c-0.49-0.49-1.28-0.49-1.77,0l-8.41,8.41c-0.39,0.39-0.39,1.02,0,1.41l8.41,8.41 c0.49,0.49,1.28,0.49,1.77,0l0,0c0.49-0.49,0.49-1.28,0-1.77L9.54,12l7.35-7.35C17.37,4.16,17.37,3.37,16.88,2.88z' fill='%23328ac1'/%3E%3C/g%3E%3C/svg%3E\");\n}\n\n.icon-copyright {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M10.08 10.86c.05-.33.16-.62.3-.87s.34-.46.59-.62c.24-.15.54-.22.91-.23.23.01.44.05.63.13.2.09.38.21.52.36s.25.33.34.53.13.42.14.64h1.79c-.02-.47-.11-.9-.28-1.29s-.4-.73-.7-1.01-.66-.5-1.08-.66-.88-.23-1.39-.23c-.65 0-1.22.11-1.7.34s-.88.53-1.2.92-.56.84-.71 1.36S8 11.29 8 11.87v.27c0 .58.08 1.12.23 1.64s.39.97.71 1.35.72.69 1.2.91c.48.22 1.05.34 1.7.34.47 0 .91-.08 1.32-.23s.77-.36 1.08-.63.56-.58.74-.94.29-.74.3-1.15h-1.79c-.01.21-.06.4-.15.58s-.21.33-.36.46-.32.23-.52.3c-.19.07-.39.09-.6.1-.36-.01-.66-.08-.89-.23-.25-.16-.45-.37-.59-.62s-.25-.55-.3-.88-.08-.67-.08-1v-.27c0-.35.03-.68.08-1.01zM12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8z'/%3E%3C/svg%3E\");\n}\n\n/* -- add `.icon-colored` -- */\n.icon-love {\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23ff4d4d' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M13.35 20.13c-.76.69-1.93.69-2.69-.01l-.11-.1C5.3 15.27 1.87 12.16 2 8.28c.06-1.7.93-3.33 2.34-4.29 2.64-1.8 5.9-.96 7.66 1.1 1.76-2.06 5.02-2.91 7.66-1.1 1.41.96 2.28 2.59 2.34 4.29.14 3.88-3.3 6.99-8.55 11.76l-.1.09z'/%3E%3C/svg%3E\");\n}\n" - ], - "names": [], - "mappings": "ACAA,UAAU,CACR,WAAW,CAAE,OAAO,CACpB,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,YAAY,CAAE,IAAI,CAClB,GAAG,CAAE,uCAAuC,CAAC,eAAe,CAC5D,sCAAsC,CAAC,cAAc,CAGvD,UAAU,CACR,WAAW,CAAE,OAAO,CACpB,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,YAAY,CAAE,IAAI,CAClB,GAAG,CAAE,sCAAsC,CAAC,eAAe,CAC3D,qCAAqC,CAAC,cAAc,CAGtD,UAAU,CACR,WAAW,CAAE,OAAO,CACpB,UAAU,CAAG,MAAM,CACnB,WAAW,CAAE,GAAG,CAChB,YAAY,CAAE,IAAI,CAClB,GAAG,CAAE,wCAAwC,CAAC,eAAe,CAC7D,uCAAuC,CAAC,cAAc,CAGxD,UAAU,CACR,WAAW,CAAE,OAAO,CACpB,UAAU,CAAG,MAAM,CACnB,WAAW,CAAE,GAAG,CAChB,YAAY,CAAE,IAAI,CAClB,GAAG,CAAE,8CAA8C,CAAC,eAAe,CACnE,6CAA6C,CAAC,cAAc,CCjC9D,AAAA,KAAK,AAAC,CACJ,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACb,AAGD,AAAA,cAAc,AAAC,CACb,gBAAgB,CAAE,qcAAqc,CACxd,AAED,AAAA,aAAa,AAAC,CACZ,gBAAgB,CAAE,upBAAupB,CACzqB,SAAS,CAAE,UAAU,CACtB,AAED,AAAA,aAAa,AAAC,CACZ,gBAAgB,CAAE,mgBAAmgB,CACrhB,SAAS,CAAE,UAAU,CACtB,AAED,AAAA,YAAY,AAAC,CACX,gBAAgB,CAAE,+tBAA+tB,CAClvB,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,YAAY,EAAG,IAAI,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,EAAoB,YAAY,AAAC,CAC5E,gBAAgB,CAAE,+tBAA+tB,CAClvB,AAID,AAAA,UAAU,AAAC,CACT,gBAAgB,CAAE,ssBAAssB,CACztB,AAED,AAAA,SAAS,AAAC,CACR,gBAAgB,CAAE,oTAAoT,CACvU,AAED,AAAA,WAAW,AAAC,CACV,gBAAgB,CAAE,uSAAuS,CAC1T,AAED,AAAA,UAAU,AAAC,CACT,gBAAgB,CAAE,6eAA6e,CAChgB,AAED,AAAA,UAAU,AAAC,CACT,gBAAgB,CAAE,46CAA46C,CAC/7C,AAED,AAAA,WAAW,AAAC,CACV,gBAAgB,CAAE,6nBAA6nB,CAChpB,AAED,AAAA,gBAAgB,AAAC,CACf,gBAAgB,CAAE,koBAAkoB,CACrpB,AAED,AAAA,gBAAgB,AAAC,CACf,gBAAgB,CAAE,itCAAitC,CACpuC,AAED,AAAA,eAAe,AAAC,CACd,gBAAgB,CAAE,0tBAA0tB,CAC7uB,AAED,AAAA,gBAAgB,AAAC,CACf,gBAAgB,CAAE,qkCAAqkC,CACxlC,AAED,AAAA,eAAe,AAAC,CACd,gBAAgB,CAAE,k1BAAk1B,CACr2B,AAED,AAAA,YAAY,AAAC,CACX,gBAAgB,CAAE,6eAA6e,CAChgB,AAED,AAAA,YAAY,AAAC,CACX,gBAAgB,CAAE,iTAAiT,CACpU,AAED,AAAA,cAAc,AAAC,CACb,gBAAgB,CAAE,uhBAAuhB,CAC1iB,AAED,AAAA,UAAU,AAAC,CACT,gBAAgB,CAAE,gYAAgY,CACnZ,AAED,AAAA,UAAU,AAAC,CACT,gBAAgB,CAAE,gbAAgb,CACnc,AAED,AAAA,eAAe,AAAC,CACd,gBAAgB,CAAE,g5BAAg5B,CACn6B,AAGD,AAAA,UAAU,AAAC,CACT,gBAAgB,CAAE,saAAsa,CACzb,CFnGA,AAAD,IAAK,AAAC,CACJ,aAAa,CAAA,oBAAC,CACd,mBAAmB,CAAA,eAAC,CACpB,kBAAkB,CAAA,mBAAC,CAEnB,YAAY,CAAA,QAAC,CACb,OAAO,CAAA,QAAC,CACR,QAAQ,CAAA,QAAC,CACT,QAAQ,CAAA,QAAC,CAET,cAAc,CAAA,QAAC,CACf,aAAa,CAAA,QAAC,CAEd,eAAe,CAAA,QAAC,CAChB,gBAAgB,CAAA,QAAC,CACjB,cAAc,CAAA,QAAC,CAEf,YAAY,CAAA,0BAAC,CACb,aAAa,CAAA,0BAAC,CAEd,MAAM,CAAA,KAAC,CAEP,uBAAuB,CAAA,qJAAC,CAExB,aAAa,CAAA,uFAAC,CAGd,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,wBAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CACjB,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,CAAmB,CACvB,YAAY,CAAA,QAAC,CACb,OAAO,CAAA,QAAC,CACR,QAAQ,CAAA,QAAC,CACT,QAAQ,CAAA,QAAC,CAET,eAAe,CAAA,QAAC,CAChB,gBAAgB,CAAA,QAAC,CACjB,cAAc,CAAA,wBAAC,CAEf,YAAY,CAAA,+BAAC,CACb,aAAa,CAAA,gCAAC,CAEd,uBAAuB,CAAA,oIAAC,CAExB,aAAa,CAAA,uFAAC,CAGd,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,wBAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CACjB,CAEA,AAAD,IAAK,CAAA,AAAA,UAAC,CAAW,OAAO,AAAlB,CAAoB,CACxB,YAAY,CAAA,QAAC,CACb,OAAO,CAAA,QAAC,CACR,QAAQ,CAAA,QAAC,CACT,QAAQ,CAAA,QAAC,CAET,eAAe,CAAA,QAAC,CAChB,gBAAgB,CAAA,QAAC,CACjB,cAAc,CAAA,yBAAC,CAEf,YAAY,CAAA,+BAAC,CACb,aAAa,CAAA,gCAAC,CAEd,uBAAuB,CAAA,sIAAC,CAExB,aAAa,CAAA,sFAAC,CAGd,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,wBAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CAChB,eAAe,CAAA,QAAC,CACjB,AAED,AAAA,KAAK,CAAA,GAAK,CAAA,aAAa,CAAE,CACvB,MAAM,CAAE,kBAAkB,CAC3B" -} \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml deleted file mode 100644 index 7338675..0000000 --- a/docs/sitemap.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - en/sitemap.xml - - 2024-04-09T13:54:32+08:00 - - - - - zh/sitemap.xml - - 2024-04-09T13:54:32+08:00 - - - - diff --git a/docs/images/chatbot/agent-flow.png b/docs/static/api-docs/muAgent/agent-flow.png similarity index 100% rename from docs/images/chatbot/agent-flow.png rename to docs/static/api-docs/muAgent/agent-flow.png diff --git a/docs/images/muagent/baseagent.png b/docs/static/api-docs/muAgent/baseagent.png similarity index 100% rename from docs/images/muagent/baseagent.png rename to docs/static/api-docs/muAgent/baseagent.png diff --git a/docs/images/muagent/executoragent.png b/docs/static/api-docs/muAgent/executoragent.png similarity index 100% rename from docs/images/muagent/executoragent.png rename to docs/static/api-docs/muAgent/executoragent.png diff --git a/docs/static/api-docs/muAgent/muagent_framework.png b/docs/static/api-docs/muAgent/muagent_framework.png new file mode 100644 index 0000000..e0245c9 Binary files /dev/null and b/docs/static/api-docs/muAgent/muagent_framework.png differ diff --git a/docs/images/muagent/reactagent.webp b/docs/static/api-docs/muAgent/reactagent.webp similarity index 100% rename from docs/images/muagent/reactagent.webp rename to docs/static/api-docs/muAgent/reactagent.webp diff --git a/docs/images/muagent/selectoragent.webp b/docs/static/api-docs/muAgent/selectoragent.webp similarity index 100% rename from docs/images/muagent/selectoragent.webp rename to docs/static/api-docs/muAgent/selectoragent.webp diff --git a/docs/sw.js b/docs/sw.js deleted file mode 100644 index e13c4c6..0000000 --- a/docs/sw.js +++ /dev/null @@ -1,61 +0,0 @@ -const cacheName = 'docura-{{ now.Format "2006-01-02" }}'; -const staticAssets = [ - './', - './index.html', - './manifest.json', - './docs/**/*', - './font/*', - './img/icon/favicon.ico', - './img/icon/icon-16.png', - './img/icon/icon-32.png', - './img/icon/icon-180.png', - './img/icon/icon-192.png', - './img/icon/icon-512.png', - './img/icon/icon-vector.svg', - './img/icon/maskable-icon-192.png', - './img/icon/maskable-icon-512.png', - './js/base.min.js', - './js/component/docsearch.min.js', - './scss/base.css', - './scss/component/docsearch.css', - './scss/home.css', -]; - -self.addEventListener('install', async e => { - const cache = await caches.open(cacheName); - await cache.addAll(staticAssets); - return self.skipWaiting(); -}); - -self.addEventListener('activate', e => { - self.clients.claim(); -}); - -self.addEventListener('fetch', async e => { - const req = e.request; - const url = new URL(req.url); - - if (url.origin === location.origin) { - e.respondWith(cacheFirst(req)); - } else { - e.respondWith(networkFirst(req)); - } -}); - -async function cacheFirst(req) { - const cache = await caches.open(cacheName); - const cached = await cache.match(req); - return cached || fetch(req); -} - -async function networkFirst(req) { - const cache = await caches.open(cacheName); - try { - const fresh = await fetch(req); - cache.put(req, fresh.clone()); - return fresh; - } catch (e) { - const cached = await cache.match(req); - return cached; - } -} \ No newline at end of file diff --git a/docs/tags/index.xml b/docs/tags/index.xml deleted file mode 100644 index 0db91a2..0000000 --- a/docs/tags/index.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - Tags on CodeFuse-AI - /tags/ - Recent content in Tags on CodeFuse-AI - Hugo -- gohugo.io - en-US - - - diff --git a/docs/zh/categories/index.xml b/docs/zh/categories/index.xml deleted file mode 100644 index 6dd3e45..0000000 --- a/docs/zh/categories/index.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - Categories on CodeFuse-AI - /zh/categories/ - Recent content in Categories on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - diff --git "a/docs/zh/coagent/agent-\347\274\226\346\216\222/index.html" "b/docs/zh/coagent/agent-\347\274\226\346\216\222/index.html" deleted file mode 100644 index 0777224..0000000 --- "a/docs/zh/coagent/agent-\347\274\226\346\216\222/index.html" +++ /dev/null @@ -1,386 +0,0 @@ - - - - - - - - -Agent 编排 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Agent 编排

    -
    -
    - - -

    核心Connector介绍

    -

    为了便于大家理解整个 CoAgent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建

    -
    - 图片 -
    -


    下面,我们先介绍相关的核心组件

    -

    Agent

    -

    在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用

    -
      -
    1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出
    2. -
    -
    - 图片 -
    -
      -
    1. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务
    2. -
    3. ReactAgent:提供标准React的功能,根据问题实现当前任务
    4. -
    5. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答.
    6. -
    -

    输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理

    -

    Chain

    -

    基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理

    -

    Phase

    -

    基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理

    -

    Prompt Manager

    -

    Mutli-Agent链路中每一个agent的prompt创建

    -
      -
    • 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置
    • -
    • 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt
    • -
    -

    Memory Manager

    -

    主要用于 chat history 的管理,暂未完成

    -
      -
    • 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval
    • -
    • 对 chat history 进行关键信息总结 summary context,作为 prompt context
    • -
    • 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/coagent/coagent-\346\246\202\350\247\210/index.html" "b/docs/zh/coagent/coagent-\346\246\202\350\247\210/index.html" deleted file mode 100644 index 349f68e..0000000 --- "a/docs/zh/coagent/coagent-\346\246\202\350\247\210/index.html" +++ /dev/null @@ -1,370 +0,0 @@ - - - - - - - - -CoAgent 概览 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CoAgent 概览

    -
    -
    - - -

    简介

    -

    为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。

    -

    但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。

    -

    经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。

    -

    因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。

    -

    本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。

    -
    - 图片 -
    -

    以下模块将从5个方面介绍Multi Agent框架所需要素:

    -
      -
    • Agent Communication在Multi Agent框架中,确保Agent可以有效地进行信息交流对于管理上下文以及提高问答效率至关重要。 -a. 遵循简洁直观易于理解的链式对话原则,将Agent以线性方式排列串连成一个执行链路。 -b. 借鉴metaGPT中的Message Pool框架,允许Agent对Message Pool进行推送和订阅,使链路更加灵活。有利于精细化Prompt工程的场景,但难以把握复杂链路的关系分析。
    • -
    • Standard Operation Process(SOP):对LLM的生成结果进行标准化解析和处理。 -a. 定义Agent的 Input 和 Output 范围,能够组装和解析相关Action和Status,保证框架运行的稳定性 -b. 封装多种基础Action执行模块,如Tool Using、Planning、Coding、Direct Answering、final answer等SOP标识,以满足Agent的基本工作需求。
    • -
    • Plan and Executor:增加LLM的Tool使用、Agent调度、代码的生成。设置了几种基本链路,例如: -a. 单轮问答,也可以扩展到CoT、ToT、GoT等形式。 -b. ReAct,基础的响应决策过程,模型设置SOP 状态以终止循环 -c. TaskPlaning - Executor,任务完成即可结束
    • -
    • Long-short term memory Management:Multi-Agent与单Agent的关键区别在于,Multi-Agent需要处理大量的交流信息,类似人类团队协作的过程。增加一个专门负责内容总结(类似于会议助理)的Agent,对长期记忆进行总结并提更有效信息传递给下一位Agent,而非传递所有内容给下一位Agent。
    • -
    • Human-agent interaction:面对复杂场景时,需要人类介入Agent交互过程并提供反馈。通过上述 Long-short term memory Management 和 Agent Communication 过程,使LLM能准确理解人类的意图,从而更有效地完成任务。
    • -
    -

    总的来说,这五个要素共同构建了一个Multi Agent框架,确保Agent之间的协作更加紧密和高效,同时也能够适应更复杂的任务需求和更多样的交互场景。通过组合多个Agent链路来实现一个完整且复杂的项目上线场景(Dev Phase),如Demand Chain(CEO)、Product Arguement Chain(CPO、CFO、CTO)、Engineer Group Chain(Selector、Developer1~N)、QA Engineer Chain(Developer、Tester)、Deploy Chain(Developer、Deploer)。

    -

    模块分类

    -
      -
    • connector
    • -
    • document_loaders
    • -
    • embeddings
    • -
    • llm_models
    • -
    • orm
    • -
    • sandbox
    • -
    • service
    • -
    • text_splitter
    • -
    • tools
    • -
    • utils
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/connector-agent-zh/index.html b/docs/zh/coagent/connector-agent-zh/index.html deleted file mode 100644 index 56f8e1c..0000000 --- a/docs/zh/coagent/connector-agent-zh/index.html +++ /dev/null @@ -1,586 +0,0 @@ - - - - - - - - -Connector Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Agent

    -
    -
    - - -

    快速构建一个Agent

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.configs import AGETN_CONFIGS
    -from coagent.connector.agents import BaseAgent
    -from coagent.connector.schema import Message, load_role_configs
    -
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    # LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
      -
    • 这里从已有的agent配置选一个role来做示例
    • -
    -
    # 从已有的配置中选择一个config,具体参数细节见下面
    -role_configs = load_role_configs(AGETN_CONFIGS)
    -agent_config = role_configs["general_planner"]
    -# 生成agent实例
    -base_agent = BaseAgent(
    -    role=agent_config.role, 
    -    prompt_config = agent_config.prompt_config,
    -    prompt_manager_type=agent_config.prompt_manager_type,
    -    chat_turn=agent_config.chat_turn,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config,
    -    embed_config=embed_config,
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -    kb_root_path=KB_ROOT_PATH,
    -    ) 
    -# round-1
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message = base_agent.step(query)
    -print(output_message.to_str_content(content_key="parsed_output_list"))
    -

    Agent 参数配置

    -
    # 配置结构在这个目录
    -from coagent.connector.schema import Role, PromptField
    -

    Agent Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    roleRole角色描述
    prompt_configList[PromptField]Enum:PromptManager 也可以继承以上几种Agent然后去构造相关的Agent
    prompt_manager_typeStringEnum:PromptManager 也可以继承以上几种Agent然后去构造自定义的Enum:PromptManager
    focus_agentsList[String]metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name
    focus_message_keysList[String]额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys
    chat_turnint只针对ReactAgent有效
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    -

    Role

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    role_typestr角色类型, Enum: system、user、assistant、function、observation、summary
    role_namestr角色名称
    role_descstr角色描述
    agent_typestr代理类型
    role_promptstr角色提示
    template_promptstr模板提示
    -

    PromptField

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    field_namestr
    function_namestr
    titlestr
    descriptionstr
    is_contextbool
    omit_if_emptybool
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/connector-chain-zh/index.html b/docs/zh/coagent/connector-chain-zh/index.html deleted file mode 100644 index 3887838..0000000 --- a/docs/zh/coagent/connector-chain-zh/index.html +++ /dev/null @@ -1,508 +0,0 @@ - - - - - - - - -Connector Chain · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Chain

    -
    -
    - - -

    快速构建一个 agent chain

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    # 设置openai的api-key
    -import os, sys
    -import openai
    -import importlib
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xxxx"
    -openai.api_key = "sk-xxxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    # LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
      -
    • 这里从已有的agent配置选多个role组合成 agent chain
    • -
    -
    from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.configs import AGETN_CONFIGS
    -from coagent.connector.chains import BaseChain
    -from coagent.connector.schema import Message, load_role_configs
    -
    -# 构建 agent chain 链路
    -role_configs = load_role_configs(AGETN_CONFIGS)
    -agent_config = role_configs["general_planner"]
    -role1 = role_configs["general_planner"]
    -role2 = role_configs["executor"]
    -agent_module = importlib.import_module("examples.connector.agents")
    -agents = [
    -    getattr(agent_module, role1.role.agent_type)(
    -            role=role1.role, 
    -            prompt_config = role1.prompt_config,
    -            prompt_manager_type=role1.prompt_manager_type,
    -            chat_turn=role1.chat_turn,
    -            focus_agents=role1.focus_agents,
    -            focus_message_keys=role1.focus_message_keys,
    -            llm_config=llm_config,
    -            embed_config=embed_config,
    -            jupyter_work_path=JUPYTER_WORK_PATH,
    -            kb_root_path=KB_ROOT_PATH,
    -        ),
    -    getattr(agent_module, role2.role.agent_type)(
    -            role=role2.role, 
    -            prompt_config = role2.prompt_config,
    -            prompt_manager_type=role2.prompt_manager_type,
    -            chat_turn=role2.chat_turn,
    -            focus_agents=role2.focus_agents,
    -            focus_message_keys=role2.focus_message_keys,
    -            llm_config=llm_config,
    -            embed_config=embed_config,
    -            jupyter_work_path=JUPYTER_WORK_PATH,
    -            kb_root_path=KB_ROOT_PATH,
    -        ),
    -    ]
    -
    -chain = BaseChain(
    -    agents, 
    -    chat_turn=1, 
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -    kb_root_path=KB_ROOT_PATH,
    -    llm_config=llm_config,
    -    embed_config=embed_config,
    -    )
    -
      -
    • 开始执行
    • -
    -
    # round-1
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = chain.step(query)
    -print(output_memory.to_str_messages(content_key="parsed_output_list"))
    -

    Chain 参数配置

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    agentsList[BaseAgent]
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/connector-memory-zh/index.html b/docs/zh/coagent/connector-memory-zh/index.html deleted file mode 100644 index 0c49f5e..0000000 --- a/docs/zh/coagent/connector-memory-zh/index.html +++ /dev/null @@ -1,473 +0,0 @@ - - - - - - - - -Connector Memory · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Memory

    -
    -
    - - -

    Memory Manager

    -

    主要用于 chat history 的管理,暂未完成

    -
      -
    • 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval
    • -
    • 对 chat history 进行关键信息总结 summary context,作为 prompt context
    • -
    • 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    • -
    -

    使用示例

    -

    创建 memory manager 实例

    -
    import os
    -import openai
    -
    -from coagent.base_configs.env_config import KB_ROOT_PATH
    -from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.schema import Message
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
    -# LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -# 
    -phase_name = "test"
    -memory_manager = LocalMemoryManager(
    -            unique_name=phase_name, 
    -            do_init=True, 
    -            kb_root_path = KB_ROOT_PATH, 
    -            embed_config=embed_config, 
    -            llm_config=llm_config
    -        )
    -

    支持Message管理

    -
    message1 = Message(
    -    role_name="test1", role_type="user", input_query="hello", origin_query="hello",
    -    parsed_output_list=[{"input": "hello"}]
    -)
    -
    -text = "hi! how can I help you?"
    -message2 = Message(
    -    role_name="test2", role_type="assistant", input_query=text, origin_query=text,
    -    role_content=text, step_content=text, parsed_output_list=[{"answer": text}]
    -)
    -
    -text = "they say hello and hi to each other"
    -message3 = Message(
    -    role_name="test3", role_type="summary",
    -    role_content=text, step_content=text,
    -    parsed_output_list=[{"summary": text}]
    -    )
    -

    支持 memory 检索

    -
    # embedding retrieval test
    -text = "say hi, i want some help"
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "datetime"))
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "embedding"))
    -print(memory_manager.router_retrieval(text=text, datetime="2024-01-08 20:22:00", n=4, top_k=5, retrieval_type= "text"))
    -

    支持 memory 总结

    -
    # recursive_summary test
    -print(memory_manager.recursive_summary(local_memory_manager.recall_memory.messages, split_n=1))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/connector-phase-zh/index.html b/docs/zh/coagent/connector-phase-zh/index.html deleted file mode 100644 index b1db071..0000000 --- a/docs/zh/coagent/connector-phase-zh/index.html +++ /dev/null @@ -1,508 +0,0 @@ - - - - - - - - -Connector Phase · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Phase

    -
    -
    - - -

    快速构建一个 agent phase

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH
    -from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.configs import AGETN_CONFIGS
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.schema import Message, load_role_configs
    -
    -
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    # LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
      -
    • 这里从已有的 phase 配置中选一个 phase 来做示例
    • -
    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "searchChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1
    -query_content1 = "美国当前总统是谁?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content1, input_query=query_content1, origin_query=query_content1,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content2 = "美国上一任总统是谁,两个人有什么关系没?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content2, input_query=query_content2, origin_query=query_content2,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Phase 参数配置

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    phase_nameString场景名称
    phase_configCompletePhaseConfig默认为None,可直接指定完整的phaseconfig, 暂未实现
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    base_phase_configUnion[dict, str]默认配置:PHASE_CONFIGS,可通过实现对这个变量新增来实现自定义配置
    base_chain_configUnion[dict, str]默认配置:CHAIN_CONFIGS,可通过实现对这个变量新增来实现自定义配置
    base_role_configUnion[dict, str]默认配置:AGETN_CONFIGS,可通过实现对这个变量新增来实现自定义配置
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/connector-prompt-zh/index.html b/docs/zh/coagent/connector-prompt-zh/index.html deleted file mode 100644 index 56d7e08..0000000 --- a/docs/zh/coagent/connector-prompt-zh/index.html +++ /dev/null @@ -1,615 +0,0 @@ - - - - - - - - -Connector Prompt · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Prompt

    -
    -
    - - -

    Prompt 的标准结构

    -

    在整个Prompt的整个结构中,我们需要去定义三个部分

    -
      -
    • Agent Profil
    • -
    • Input Format
    • -
    • Response Output Format
    • -
    -
    #### Agent Profile
    -
    -Agent Description ...
    -
    -#### Input Format
    -
    -**Origin Query:** the initial question or objective that the user wanted to achieve
    -
    -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved.
    -
    -#### Response Output Format
    -**Action Status:** finished or continued
    -If it's 'finished', the context can answer the origin query.
    -If it's 'continued', the context cant answer the origin query.
    -
    -**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step.
    -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion.
    -

    其中,我们整合了部分 Input Format 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。如下所示 -只需要定义如下字段和执行函数,

    -
    AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False}
    -]
    -

    未来我们会也会进一步将 Agent Profile和Response Output Format的部分,实现可配置化操作,降低Prompt编写难度

    -

    自定义 Input Format

    -

    同时,我们也支持 用户自定义 Input Format 的操作

    -
    from coagent.connector.prompt_manager import PromptManager
    -
    -# 增加了两个新处理函数,用于prompt组装
    -class CodeRetrievalPM(PromptManager):
    -    def handle_code_packages(self, **kwargs) -> str:
    -        if 'previous_agent_message' not in kwargs:
    -            return ""
    -        previous_agent_message: Message = kwargs['previous_agent_message']
    -        # 由于两个agent共用了同一个manager,所以临时性处理
    -        vertices = previous_agent_message.customed_kargs.get("RelatedVerticesRetrivalRes", {}).get("vertices", [])
    -        return ", ".join([str(v) for v in vertices])
    -
    -    def handle_retrieval_codes(self, **kwargs) -> str:
    -        if 'previous_agent_message' not in kwargs:
    -            return ""
    -        previous_agent_message: Message = kwargs['previous_agent_message']
    -        return '\n'.join(previous_agent_message.customed_kargs["Retrieval_Codes"])
    -
    -
    -# Design your personal PROMPT INPPUT FORMAT 
    -CODE_RETRIEVAL_PROMPT_CONFIGS = [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'reference_documents', "function_name": 'handle_doc_info'},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'retrieval_codes', "function_name": 'handle_retrieval_codes'},
    -    {"field_name": 'code_packages', "function_name": 'handle_code_packages'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False}
    -    ]
    -
    -# 进行注册
    -import importlib
    -prompt_manager_module = importlib.import_module("coagent.connector.prompt_manager")
    -setattr(prompt_manager_module, 'CodeRetrievalPM', CodeRetrievalPM)
    -
    -# 更新配置
    -from coagent.connector.configs import AGETN_CONFIGS
    -AGETN_CONFIGS.update({
    -    "codeRetrievalJudger": {
    -        "role": {
    -            "role_prompt": codeRetrievalJudger_PROMPT,
    -            "role_type": "assistant",
    -            "role_name": "codeRetrievalJudger",
    -            "role_desc": "",
    -            "agent_type": "CodeRetrievalJudger"
    -            # "agent_type": "BaseAgent"
    -        },
    -        "prompt_config": CODE_RETRIEVAL_PROMPT_CONFIGS,
    -        "prompt_manager_type": "CodeRetrievalPM",
    -        "chat_turn": 1,
    -        "focus_agents": [],
    -        "focus_message_keys": [],
    -    },
    -    })
    -

    在我们构建phase、chain或者agent之后,可以通过函数的预打印功能,实现agents链路确认,避免在执行后才发现问题,可提前进行debug

    -
    llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -phase.pre_print(query)
    -
    -## 完整信息确认 coagent.connector.configs中进行确认
    -##########################
    -<<<<baseGroup's prompt>>>>
    -##########################
    -
    -### Agent Profile
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -### Tool Information
    -
    -### Agent Infomation
    -        Please ensure your selection is one of the listed roles. Available roles for selection:
    -        "role name: tool_react
    -role description:  Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.,"
    -"role name: code_react
    -role description:  Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.,"
    -        Please ensure select the Role from agent names, such as tool_react, code_react
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Current Plan
    -
    -### Response Output Format
    -**Thoughts:** think the reason step by step about why you selecte one role
    -**Role:** Select the role from agent names.
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Role:**
    -
    -
    -###########################
    -<<<<tool_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.
    -Please note that all the tools you can use are listed below. You can only choose from these tools for use.
    -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.
    -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.
    -
    -### Tool Information
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Task Records
    -
    -### Response Output Format
    -**Thoughts:** According the previous observations, plan the approach for using the tool effectively.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -###########################
    -<<<<code_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When users need help with coding, your role is to provide precise and effective guidance.
    -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -### Response Output Format
    -
    -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem,
    -outline the plan for executing this step.
    -
    -**Action Status:** Set to 'stopped' or 'code_executing'.
    -If it's 'stopped', the action is to provide the final answer to the session records and executed steps.
    -If it's 'code_executing', the action is to write the code.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/customed-examples-zh/index.html b/docs/zh/coagent/customed-examples-zh/index.html deleted file mode 100644 index d260e06..0000000 --- a/docs/zh/coagent/customed-examples-zh/index.html +++ /dev/null @@ -1,551 +0,0 @@ - - - - - - - - -Customed Examples · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Customed Examples

    -
    -
    - - -

    如何创建你个性化的 agent phase 场景

    -

    下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建

    -

    设计你的prompt结构

    -
    import os, sys, requests
    -
    -# from configs.model_config import *
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.chains import BaseChain
    -from coagent.connector.schema import Message
    -from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS
    -import importlib
    -
    -
    -# update new agent configs
    -auto_feedback_from_code_execution_PROMPT = """#### Agent Profile
    -
    -You are a helpful AI assistant. Solve tasks using your coding and language skills.
    -In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
    -    1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
    -    2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
    -Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
    -When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
    -If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
    -When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
    -Reply "stopped" in the end when everything is done.
    -
    -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.
    -
    -#### Response Output Format
    -
    -**Thoughts:** Based on the question and observations above, provide the plan for executing this step.
    -
    -**Action Status:** Set to 'stopped' or 'code_executing'. If it's 'stopped', the action is to provide the final answer to the original question. If it's 'code_executing', the action is to write the code.
    -
    -**Action:** 
    -# Write your code here
    -import os
    -...
    -
    -
    -**Observation:** Check the results and effects of the executed code.
    -
    -... (Repeat this Thoughts/Action/Observation cycle as needed)
    -
    -**Thoughts:** I now know the final answer
    -
    -**Action Status:** stopped
    -
    -**Action:** The final answer to the original input question
    -"""
    -

    开始配置 Prompt Configs

    -
    AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS = [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'begin!!!', "function_name": 'handle_response', "is_context": False, "omit_if_empty": False}
    -]
    -

    更新完整的agent、chain、phase配置,以便后续更读取执行

    -
    from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS
    -import os
    -
    -## set a 
    -AGETN_CONFIGS.update({
    -    "auto_feedback_from_code_execution": {
    -        "role": {
    -            "role_prompt": auto_feedback_from_code_execution_PROMPT,
    -            "role_type": "assistant",
    -            "role_name": "auto_feedback_from_code_execution",
    -            "role_desc": "",
    -            "agent_type": "ReactAgent"
    -        },
    -        "prompt_config": AUTO_FEEDBACK_FROM_CODE_EXECUTION_PROMPT_CONFIGS,
    -        "chat_turn": 5,
    -        "stop": "\n**Observation:**",
    -        "focus_agents": [],
    -        "focus_message_keys": [],
    -    },
    -})
    -# update new chain configs
    -CHAIN_CONFIGS.update({
    -    "auto_feedback_from_code_executionChain": {
    -        "chain_name": "auto_feedback_from_code_executionChain",
    -        "chain_type": "BaseChain",
    -        "agents": ["auto_feedback_from_code_execution"],
    -        "chat_turn": 1,
    -        "do_checker": False,
    -        "chain_prompt": ""
    -    }
    -})
    -
    -# update phase configs
    -PHASE_CONFIGS.update({
    -    "auto_feedback_from_code_executionPhase": {
    -        "phase_name": "auto_feedback_from_code_executionPhase",
    -        "phase_type": "BasePhase",
    -        "chains": ["auto_feedback_from_code_executionChain"],
    -        "do_summary": False,
    -        "do_search": False,
    -        "do_doc_retrieval": False,
    -        "do_code_retrieval": False,
    -        "do_tool_retrieval": False,
    -        "do_using_tool": False
    -    },
    -})
    -

    接下来就构建 phase 实例,开始执行

    -
    from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.schema import Message
    -import base64, openai
    -
    -#
    -os.environ["API_BASE_URL"] = "http://openai.com/v1/chat/completions"
    -os.environ["OPENAI_API_KEY"] = "sk-xxxx"
    -openai.api_key = "sk-xxxx"
    -
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# 
    -phase_name = "auto_feedback_from_code_executionPhase"
    -phase = BasePhase(
    -    phase_name,
    -    embed_config=embed_config, llm_config=llm_config, 
    -    base_phase_config = PHASE_CONFIGS,
    -    base_chain_config = CHAIN_CONFIGS,
    -    base_role_config = AGETN_CONFIGS,
    -)
    -
    -
    -# round-1
    -query_content = """Plot a chart of META and TESLA's stock prices for the past year and save it as stock_price_ytd.png."""
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/coagent/index.xml b/docs/zh/coagent/index.xml deleted file mode 100644 index e4985e8..0000000 --- a/docs/zh/coagent/index.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - Coagents on CodeFuse-AI - /zh/coagent/ - Recent content in Coagents on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - Agent 编排 - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - 核心Connector介绍 为了便于大家理解整个 CoAgent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 下面,我们先介绍相关的核心组件 Agent 在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用 BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 =&gt; 输出 ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 ReactAgent:提供标准React的功能,根据问题实现当前任务 SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. 输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理 Chain 基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理 Phase 基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理 Prompt Manager Mutli-Agent链路中每一个agent的prompt创建 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt Memory Manager 主要用于 chat history 的管理,暂未完成 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - CoAgent 概览 - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - 简介 为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。 经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。 以下模块将从5个方面介绍Multi Agent框架所需要素: Agent Communication在Multi Agent框架中,确保Agent可以有效地进行信息交流对于管理上下文以及提高问答效率至关重要。 a. 遵循简洁直观易于理解的链式对话原则,将Agent以线性方式排列串连成一个执行链路。 b. 借鉴metaGPT中的Message Pool框架,允许Agent对Message Pool进行推送和订阅,使链路更加灵活。有利于精细化Prompt工程的场景,但难以把握复杂链路的关系分析。 Standard Operation Process(SOP):对LLM的生成结果进行标准化解析和处理。 a. 定义Agent的 Input 和 Output 范围,能够组装和解析相关Action和Status,保证框架运行的稳定性 b. 封装多种基础Action执行模块,如Tool Using、Planning、Coding、Direct Answering、final answer等SOP标识,以满足Agent的基本工作需求。 Plan and Executor:增加LLM的Tool使用、Agent调度、代码的生成。设置了几种基本链路,例如: a. 单轮问答,也可以扩展到CoT、ToT、GoT等形式。 b. ReAct,基础的响应决策过程,模型设置SOP 状态以终止循环 c. TaskPlaning - Executor,任务完成即可结束 Long-short term memory Management:Multi-Agent与单Agent的关键区别在于,Multi-Agent需要处理大量的交流信息,类似人类团队协作的过程。增加一个专门负责内容总结(类似于会议助理)的Agent,对长期记忆进行总结并提更有效信息传递给下一位Agent,而非传递所有内容给下一位Agent。 Human-agent interaction:面对复杂场景时,需要人类介入Agent交互过程并提供反馈。通过上述 Long-short term memory Management 和 Agent Communication 过程,使LLM能准确理解人类的意图,从而更有效地完成任务。 总的来说,这五个要素共同构建了一个Multi Agent框架,确保Agent之间的协作更加紧密和高效,同时也能够适应更复杂的任务需求和更多样的交互场景。通过组合多个Agent链路来实现一个完整且复杂的项目上线场景(Dev Phase),如Demand Chain(CEO)、Product Arguement Chain(CPO、CFO、CTO)、Engineer Group Chain(Selector、Developer1~N)、QA Engineer Chain(Developer、Tester)、Deploy Chain(Developer、Deploer)。 - - - Connector Agent - /zh/coagent/connector-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-agent-zh/ - 快速构建一个Agent 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.agents import BaseAgent from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选一个role来做示例 # 从已有的配置中选择一个config,具体参数细节见下面 role_configs = load_role_configs(AGETN_CONFIGS) agent_config = role_configs[&#34;general_planner&#34;] # 生成agent实例 base_agent = BaseAgent( role=agent_config. - - - Connector Chain - /zh/coagent/connector-chain-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-chain-zh/ - 快速构建一个 agent chain 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) # 设置openai的api-key import os, sys import openai import importlib os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxxx&#34; openai.api_key = &#34;sk-xxxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选多个role组合成 agent chain from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent. - - - Connector Memory - /zh/coagent/connector-memory-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-memory-zh/ - Memory Manager 主要用于 chat history 的管理,暂未完成 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 使用示例 创建 memory manager 实例 import os import openai from coagent.base_configs.env_config import KB_ROOT_PATH from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.schema import Message os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os. - - - Connector Phase - /zh/coagent/connector-phase-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-phase-zh/ - 快速构建一个 agent phase 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.phase import BasePhase from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的 phase 配置中选一个 phase 来做示例 # log-level,print prompt和llm predict os. - - - Connector Prompt - /zh/coagent/connector-prompt-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-prompt-zh/ - Prompt 的标准结构 在整个Prompt的整个结构中,我们需要去定义三个部分 Agent Profil Input Format Response Output Format #### Agent Profile Agent Description ... #### Input Format **Origin Query:** the initial question or objective that the user wanted to achieve **Context:** the current status and history of the tasks to determine if Origin Query has been achieved. #### Response Output Format **Action Status:** finished or continued If it&#39;s &#39;finished&#39;, the context can answer the origin query. If it&#39;s &#39;continued&#39;, the context cant answer the origin query. - - - Customed Examples - /zh/coagent/customed-examples-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/customed-examples-zh/ - 如何创建你个性化的 agent phase 场景 下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建 设计你的prompt结构 import os, sys, requests # from configs.model_config import * from coagent.connector.phase import BasePhase from coagent.connector.chains import BaseChain from coagent.connector.schema import Message from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS import importlib # update new agent configs auto_feedback_from_code_execution_PROMPT = &#34;&#34;&#34;#### Agent Profile You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - - - Prompt 管理器 - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 Prompt自定义配置 Prompt模块参数 field_name:唯一的字段名称标识,必须提供。 function:指定如何处理输入数据的函数,必须提供。 title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 Prompt配置示例 Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 [ {&#34;field_name&#34;: &#39;agent_profile&#39;, &#34;function_name&#34;: &#39;handle_agent_profile&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;context_placeholder&#39;, &#34;function_name&#34;: &#39;&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;tool_information&#39;,&#34;function_name&#34;: &#39;handle_tool_data&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;reference_documents&#39;, &#34;function_name&#34;: &#39;handle_doc_info&#39;}, {&#34;field_name&#34;: &#39;session_records&#39;, &#34;function_name&#34;: &#39;handle_session_records&#39;}, {&#34;field_name&#34;: &#39;task_records&#39;, &#34;function_name&#34;: &#39;handle_task_records&#39;}, {&#34;field_name&#34;: &#39;output_format&#39;, &#34;function_name&#34;: &#39;handle_output_format&#39;, &#39;title&#39;: &#39;Response Output Format&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;response&#39;, &#34;function_name&#34;: &#39;handle_response&#39;, &#34;title&#34;=&#34;begin! - - - 快速开始 - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - 快速使用 首先,填写LLM配置 import os, sys import openai # llm config os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 from coagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 最后选择一个已有场景进行执行 from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from coagent.connector.phase import BasePhase from coagent.connector.schema import Message # 选择一个已实现得场景进行执行 # 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录) import shutil source_file = &#39;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv&#39; shutil.copy(source_file, JUPYTER_WORK_PATH) # 选择一个场景 phase_name = &#34;baseGroupPhase&#34; phase = BasePhase( phase_name, embed_config=embed_config, llm_config=llm_config, ) # round-1 需要通过代码解释器来完成 query_content = &#34;确认本地是否存在employee_data. - - - diff --git "a/docs/zh/coagent/prompt-\347\256\241\347\220\206\345\231\250/index.html" "b/docs/zh/coagent/prompt-\347\256\241\347\220\206\345\231\250/index.html" deleted file mode 100644 index a0355d3..0000000 --- "a/docs/zh/coagent/prompt-\347\256\241\347\220\206\345\231\250/index.html" +++ /dev/null @@ -1,410 +0,0 @@ - - - - - - - - -Prompt 管理器 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Prompt 管理器

    -
    -
    - - -

    提示管理器(Prompt Manager)

    -

    管理多智能体链路中的prompt创建

    -
      -
    • 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。
    • -
    • 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。
    • -
    -

    Prompt预设模板结构

    -
      -
    • Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。
    • -
    • Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 -
        -
      • Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。
      • -
      • Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。
      • -
      • Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。
      • -
      -
    • -
    • Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。
    • -
    • Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。
    • -
    -

    Prompt自定义配置

    -

    Prompt模块参数

    -
      -
    • field_name:唯一的字段名称标识,必须提供。
    • -
    • function:指定如何处理输入数据的函数,必须提供。
    • -
    • title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。
    • -
    • description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。
    • -
    • is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。
    • -
    • omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。
    • -
    -

    Prompt配置示例

    -

    Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。

    -

    在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。

    -

    context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。

    -
    [
    -    {"field_name": 'agent_profile', "function_name": 'handle_agent_profile', "is_context": False},
    -    {"field_name": 'context_placeholder', "function_name": '', "is_context": True},
    -    {"field_name": 'tool_information',"function_name": 'handle_tool_data', "is_context": True},
    -    {"field_name": 'reference_documents', "function_name": 'handle_doc_info'},
    -    {"field_name": 'session_records', "function_name": 'handle_session_records'},
    -    {"field_name": 'task_records', "function_name": 'handle_task_records'},
    -    {"field_name": 'output_format', "function_name": 'handle_output_format', 'title': 'Response Output Format', "is_context": False},
    -    {"field_name": 'response', "function_name": 'handle_response', "title"="begin!!!", "is_context": False, "omit_if_empty": False}
    -]
    -

    未来规划

    -

    Prompt配置简化

    -

    未来的Prompt配置简化旨在降低用户面对复杂配置的难度。通过引入更直观的配置方法,我们计划使得Prompt配置不仅对高级用户友好,还能让初学者轻松上手。简化计划可能包括:

    -
      -
    • 预设配置短语:将复杂的配置字典转换为简洁的短语,每个短语都预定义了一个Prompt模块。用户将能够使用简单的字符串指令来快速配置Prompt,而无需深入了解所有参数。
    • -
    • 配置校验和建议:增加配置的即时校验,如果检测到配置错误或不一致性,自动提供修改建议,帮助用户优化Prompt结构。
    • -
    -

    动作(Action)注册的改进计划

    -

    在现行系统中,智能体必须在其角色提示(role prompt)内定义所有的动作(actions)。这意味着智能体需要同时处理动作的意图识别和生成动作所需的输入数据,这一过程对语言模型的理解和推理能力提出了更高要求。

    -

    为了优化这一流程,我们打算在后续版本中对动作的输入生成和执行进行模块化。这将使智能体的工作重点转移至判断当前情境下应执行哪些动作,而不必负责具体的操作指令。在这种新的架构下,当需要执行某个动作时,将有专门的机制负责生成相应动作的具体输入指令。

    -

    这种分离将显著降低单个模块的复杂性,使得整个系统更加灵活、易于扩展,同时也提升了动作执行的效率和准确性。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/coagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/zh/coagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" deleted file mode 100644 index 96da039..0000000 --- "a/docs/zh/coagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ /dev/null @@ -1,701 +0,0 @@ - - - - - - - - -快速开始 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速开始

    -
    -
    - - -

    快速使用

    -

    首先,填写LLM配置

    -
    import os, sys
    -import openai
    -
    -# llm config
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -openai.api_key = "sk-xxx"
    -# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
    -

    然后设置LLM配置和向量模型配置

    -
    from coagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -llm_config = LLMConfig(
    -    model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -

    最后选择一个已有场景进行执行

    -
    from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -from coagent.connector.phase import BasePhase
    -from coagent.connector.schema import Message
    -
    -# 选择一个已实现得场景进行执行
    -
    -# 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# 选择一个场景
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1 需要通过代码解释器来完成
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user", tools=[],
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -# phase.pre_print(query)  # 该功能用于预打印 Agents 执行链路的Prompt
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2 需要执行工具
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools,
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -# phase.pre_print(query)  # 该功能用于预打印 Agents 执行链路的Prompt
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    场景自定义

    -

    如何自定义场景

    -

    场景介绍和使用

    -

    下面是一些具体的场景介绍和使用。

    -

    欢迎大家开脑洞构造一些有趣的case。

    -

    baseGroupPhase

    -

    autogen的group使用场景

    -
    # 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# 设置日志级别,控制打印prompt或者llm 输出或其它信息
    -os.environ["log_verbose"] = "0"
    -
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1
    -query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -
    -query = Message(
    -    role_name="human", role_type="user", tools=[],
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -# phase.pre_print(query) # 该功能用于预打印 Agents 执行链路的Prompt
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    baseTaskPhase

    -

    xAgents的任务拆分及多步骤执行场景

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "baseTaskPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -# round-1
    -query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeReactPhase

    -

    基于 React 的代码解释器场景

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# then, create a data analyze phase
    -phase_name = "codeReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -    jupyter_work_path=JUPYTER_WORK_PATH,
    -)
    -
    -# round-1
    -query_content = "确认本地是否存在book_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user",
    -    role_content=query_content, input_query=query_content, origin_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeToolReactPhase

    -

    基于 React 模板的工具调用和代码解释器场景

    -
    TOOL_SETS = [
    -     "StockName", "StockInfo", 
    -    ]
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "codeToolReactPhase"
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析"
    -
    -query = Message(
    -  role_name="human", role_type="user", 
    -  input_query=query_content, role_content=query_content, 
    -  origin_query=query_content, tools=tools
    -  )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    docChatPhase

    -

    知识库检索问答链路

    -
    # create your knowledge base
    -from io import BytesIO
    -from pathlib import Path
    -
    -from coagent.service.kb_api import create_kb, upload_doc
    -from coagent.service.service_factory import get_kb_details
    -from coagent.utils.server_utils import run_async
    -kb_list = {x["kb_name"]: x for x in get_kb_details(KB_ROOT_PATH)}
    -
    -
    -# create a knowledge base
    -kb_name = "example_test"
    -data = {
    -    "knowledge_base_name": kb_name,
    -    "vector_store_type": "faiss", # default
    -    "kb_root_path": KB_ROOT_PATH, 
    -    "embed_model": embed_config.embed_model,
    -    "embed_engine": embed_config.embed_engine, 
    -    "embed_model_path": embed_config.embed_model_path,
    -    "model_device": embed_config.model_device,
    -}
    -run_async(create_kb(**data))
    -
    -# add doc to knowledge base
    -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl")
    -files = [file]
    -# if embedding init failed, you can use override = True
    -data = [{"override": True, "file": f, 
    -         "knowledge_base_name": kb_name, "not_refresh_vs_cache": False,
    -         "kb_root_path": KB_ROOT_PATH, "embed_model": embed_config.embed_model,
    -         "embed_engine": embed_config.embed_engine, "embed_model_path": embed_config.embed_model_path,
    -         "model_device": embed_config.model_device,
    -         } 
    -         for f in files]
    -
    -for k in data:
    -    file = Path(file).absolute().open("rb")
    -    filename = file.name
    -
    -    from fastapi import UploadFile
    -    from tempfile import SpooledTemporaryFile
    -
    -    temp_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024)
    -    temp_file.write(file.read())
    -    temp_file.seek(0)
    -    
    -    k.update({"file": UploadFile(file=temp_file, filename=filename),})
    -    run_async(upload_doc(**k))
    -
    -
    -# start to chat with knowledge base
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -# round-1
    -query_content = "langchain有哪些模块"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    origin_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content = "提示(prompts)有什么用?"
    -query = Message(
    -    role_name="human", role_type="user",
    -    origin_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    metagpt_code_devlop

    -

    metagpt的代码构造链路

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "metagpt_code_devlop"
    -llm_config = LLMConfig(
    -    model_name="gpt-4", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -    )
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model="text2vec-base-chinese", 
    -    embed_model_path="D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese"
    -    )
    -
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -
    -query_content = "create a snake game by pygame"
    -query = Message(role_name="human", role_type="user", input_query=query_content, role_content=query_content, origin_query=query_content)
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    searchChatPhase

    -

    固定场景链路,先搜索后基于LLM直接回答

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "searchChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -# round-1
    -query_content1 = "美国当前总统是谁?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content1, input_query=query_content1, origin_query=query_content1,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content2 = "美国上一任总统是谁,两个人有什么关系没?"
    -query = Message(
    -    role_name="human", role_type="user", 
    -    role_content=query_content2, input_query=query_content2, origin_query=query_content2,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    toolReactPhase

    -

    基于 React 模板的工具调用场景

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "toolReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config,
    -)
    -
    -# round-1
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools,
    -    role_content=query_content, input_query=query_content, origin_query=query_content
    -    )
    -
    -# phase.pre_print(query)  # 该功能用于预打印 Agents 执行链路的Prompt
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/contribution/index.xml b/docs/zh/contribution/index.xml deleted file mode 100644 index 0d0bb1c..0000000 --- a/docs/zh/contribution/index.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - Contributions on CodeFuse-AI - /zh/contribution/ - Recent content in Contributions on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - 贡献指南 - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - 中文&nbsp | &nbspEnglish&nbsp 非常感谢您对 Codefuse 项目感兴趣,我们非常欢迎您对 Codefuse 项目的各种建议、意见(包括批评)、评论和贡献。 您对 Codefuse 的各种建议、意见、评论可以直接通过 GitHub 的 Issues 提出。 参与 Codefuse 项目并为其作出贡献的方法有很多:代码实现、测试编写、流程工具改进、文档完善等等。任何贡献我们都会非常欢迎,并将您加入贡献者列表. 进一步,有了足够的贡献后,您还可以有机会成为 Codefuse 的 Committer。 任何问题,您都可以联系我们得到及时解答,联系方式包括微信、Gitter(GitHub提供的即时聊天工具)、邮件等等。 初次接触 初次来到 Codefuse 社区,您可以: 关注 Codefuse Github 代码库 加入 Codefuse 相关的微信群 随时提问; 通过以上方式及时了解 Codefuse 项目的开发动态并为您关注的话题发表意见。 贡献方式 这份贡献指南并不仅仅关于编写代码。我们重视并感激在各个领域的帮助。以下是一些您可以贡献的方式 文档 Issue PR 改进文档 文档是您了解 Codefuse 的最主要的方式,也是我们最需要帮助的地方! 浏览文档,可以加深您对 Codefuse 的了解,也可以帮助您理解 Codefuse 的功能和技术细节,如果您发现文档有问题,请及时联系我们; 如果您对改进文档的质量感兴趣,不论是修订一个页面的地址、更正一个链接、以及写一篇更优秀的入门文档,我们都非常欢迎! 我们的文档大多数是使用 markdown 格式编写的,您可以直接通过在 GitHub 中的 docs/ 中修改并提交文档变更。如果提交代码变更,可以参阅 Pull Request。 如果发现了一个 Bug 或问题 如果发现了一个 Bug 或问题,您可以直接通过 GitHub 的 Issues 提一个新的 Issue,我们会有人定期处理。详情见Issue模板 - - - 如何提交Issue - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - 中文&nbsp | &nbspEnglish&nbsp Issue Type Issue分为三种类型 Bug: 代码或者执行示例存在bug或缺少依赖导致无法正确执行 Documentation:文档表述存在争议、文档内容与代码不一致等 Feature:在当前代码基础继续演进的新功能 Issue Template Issue: Bug Template 提交Issue前的确认清单 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 我搜索了Codefuse相关的所有文档。 我使用GitHub搜索寻找了一个类似的问题,但没有找到。 我为这个问题添加了一个非常描述性的标题。 系统信息 确认系统,如 mac -xx 、windwos-xx、linux-xx 代码版本 确认代码版本或者分支,master、release等 问题描述 描述您碰到的问题,想要实现的事情、或代码执行Bug 代码示例 附上你的执行代码和相关配置,以便能够快速介入进行复现 报错信息、日志 执行上述代码示例后的报错日志和相关信息 相关依赖的模块 以chatbot项目为例 connector codechat sandbox &hellip; Issue: Documentation Template Issue with current documentation: 请帮忙指出当前文档中的问题、错别字或者令人困惑的地方 Idea or request for content 您觉得合理的文档表述方式应该是什么样的 Issue: Feature Template 提交Issue前的确认清单 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 我搜索了Codefuse相关的所有文档。 我使用GitHub Issue搜索寻找了一个类似的问题,但没有找到。 我为这个问题添加了一个非常描述性的标题。 功能描述 描述这个功能作何用途 - - - 如何提交PR - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - 中文&nbsp | &nbspEnglish&nbsp Contribution Pre-Checklist 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 找到你想处理的GitHub问题。如果不存在,创建一个问题或草案PR,并请求维护者进行检查。 检查相关的、相似的或重复的拉取请求。 创建一个草案拉取请求。 完成PR模板中的描述。 链接任何被你的PR解决的GitHub问题。 Description PR的描述信息,用简洁的语言表达PR完成的事情,具体规范见Commit 格式规范 Related Issue #xx if has Test Code with Result 请提供相关的测试代码如果有必要的话 Commit 格式规范 Commit 分为“标题”和“内容”。原则上标题全部小写。内容首字母大写。 标题 commit message的标题:[&lt;type&gt;](&lt;scope&gt;) &lt;subject&gt; (#pr) type 可选值 本次提交的类型,限定在以下类型(全小写) fix:bug修复 feature:新增功能 feature-wip:开发中的功能,比如某功能的部分代码。 improvement:原有功能的优化和改进 style:代码风格调整 typo:代码或文档勘误 refactor:代码重构(不涉及功能变动) performance/optimize:性能优化 test:单元测试的添加或修复 deps:第三方依赖库的修改 community:社区相关的修改,如修改 Github Issue 模板等。 几点说明: 如在一次提交中出现多种类型,需增加多个类型。 如代码重构带来了性能提升,可以同时添加 [refactor][optimize] 不得出现如上所列类型之外的其他类型。如有必要,需要将新增类型添加到这个文档中。 scope 可选值 本次提交涉及的模块范围。因为功能模块繁多,在此仅罗列部分,后续根据需求不断完善。 以 chatbot的框架为例 connector codechat sandbox &hellip; 几点说明: 尽量使用列表中已存在的选项。如需添加,请及时更新本文档。 - - - 致谢 - /zh/contribution/%E8%87%B4%E8%B0%A2/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E8%87%B4%E8%B0%A2/ - CodeFuse-ai 文档主页基于docura构建! ChatBot 项目基于langchain-chatchat和codebox-api! &hellip;&hellip; 在此深深感谢他们的开源贡献! - - - diff --git "a/docs/zh/contribution/\345\246\202\344\275\225\346\217\220\344\272\244issue/index.html" "b/docs/zh/contribution/\345\246\202\344\275\225\346\217\220\344\272\244issue/index.html" deleted file mode 100644 index 9f18307..0000000 --- "a/docs/zh/contribution/\345\246\202\344\275\225\346\217\220\344\272\244issue/index.html" +++ /dev/null @@ -1,358 +0,0 @@ - - - - - - - - -如何提交Issue · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    如何提交Issue

    -
    -
    - - -

    - 中文  |  English  -

    -

    Issue Type

    -

    Issue分为三种类型

    -
      -
    • Bug: 代码或者执行示例存在bug或缺少依赖导致无法正确执行
    • -
    • Documentation:文档表述存在争议、文档内容与代码不一致等
    • -
    • Feature:在当前代码基础继续演进的新功能
    • -
    -

    Issue Template

    -

    Issue: Bug Template

    -

    提交Issue前的确认清单 -
    要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息

    -
      -
    • 我搜索了Codefuse相关的所有文档。
    • -
    • 我使用GitHub搜索寻找了一个类似的问题,但没有找到。
    • -
    • 我为这个问题添加了一个非常描述性的标题。
    • -
    -

    系统信息 -
    确认系统,如 mac -xx 、windwos-xx、linux-xx

    -

    代码版本 -
    确认代码版本或者分支,master、release等

    -

    问题描述 -
    描述您碰到的问题,想要实现的事情、或代码执行Bug

    -

    代码示例 -
    附上你的执行代码和相关配置,以便能够快速介入进行复现

    -

    报错信息、日志 -
    执行上述代码示例后的报错日志和相关信息

    -

    相关依赖的模块 -
    以chatbot项目为例

    -
      -
    • connector
    • -
    • codechat
    • -
    • sandbox
    • -
    • -
    -

    Issue: Documentation Template

    -

    Issue with current documentation: -
    请帮忙指出当前文档中的问题、错别字或者令人困惑的地方

    -

    Idea or request for content -
    您觉得合理的文档表述方式应该是什么样的

    -

    Issue: Feature Template

    -

    提交Issue前的确认清单 -
    要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息

    -
      -
    • 我搜索了Codefuse相关的所有文档。
    • -
    • 我使用GitHub Issue搜索寻找了一个类似的问题,但没有找到。
    • -
    • 我为这个问题添加了一个非常描述性的标题。
    • -
    -

    功能描述 -
    描述这个功能作何用途

    -

    相关示例 -
    提供参考的文档、仓库等信息,Please provide links to any relevant GitHub repos, papers, or other resources if relevant.

    -

    动机 -
    描述下这个feature的动机,为什么需要这个功能,提供足够的上下文信息帮助理解这个feature的诉求

    -

    Contribution -
    你如何参与到这个feature的构建(如果参与的话)

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/contribution/\345\246\202\344\275\225\346\217\220\344\272\244pr/index.html" "b/docs/zh/contribution/\345\246\202\344\275\225\346\217\220\344\272\244pr/index.html" deleted file mode 100644 index d5813c9..0000000 --- "a/docs/zh/contribution/\345\246\202\344\275\225\346\217\220\344\272\244pr/index.html" +++ /dev/null @@ -1,378 +0,0 @@ - - - - - - - - -如何提交PR · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    如何提交PR

    -
    -
    - - -

    - 中文  |  English  -

    -

    Contribution

    -

    Pre-Checklist

    -
      -
    • 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息
    • -
    • 找到你想处理的GitHub问题。如果不存在,创建一个问题或草案PR,并请求维护者进行检查。
    • -
    • 检查相关的、相似的或重复的拉取请求。
    • -
    • 创建一个草案拉取请求。
    • -
    • 完成PR模板中的描述。
    • -
    • 链接任何被你的PR解决的GitHub问题。
    • -
    -

    Description

    -

    PR的描述信息,用简洁的语言表达PR完成的事情,具体规范见Commit 格式规范

    - -

    #xx if has

    -

    Test Code with Result

    -

    请提供相关的测试代码如果有必要的话

    -

    Commit 格式规范

    -

    Commit 分为“标题”和“内容”。原则上标题全部小写。内容首字母大写。

    -

    标题

    -

    commit message的标题:[<type>](<scope>) <subject> (#pr)

    -

    type 可选值

    -

    本次提交的类型,限定在以下类型(全小写)

    -
      -
    • fix:bug修复
    • -
    • feature:新增功能
    • -
    • feature-wip:开发中的功能,比如某功能的部分代码。
    • -
    • improvement:原有功能的优化和改进
    • -
    • style:代码风格调整
    • -
    • typo:代码或文档勘误
    • -
    • refactor:代码重构(不涉及功能变动)
    • -
    • performance/optimize:性能优化
    • -
    • test:单元测试的添加或修复
    • -
    • deps:第三方依赖库的修改
    • -
    • community:社区相关的修改,如修改 Github Issue 模板等。
    • -
    -

    几点说明:

    -

    如在一次提交中出现多种类型,需增加多个类型。 -如代码重构带来了性能提升,可以同时添加 [refactor][optimize] -不得出现如上所列类型之外的其他类型。如有必要,需要将新增类型添加到这个文档中。

    -

    scope 可选值

    -

    本次提交涉及的模块范围。因为功能模块繁多,在此仅罗列部分,后续根据需求不断完善。 -
    以 chatbot的框架为例

    -
      -
    • connector
    • -
    • codechat
    • -
    • sandbox
    • -
    • -
    -

    几点说明:

    -

    尽量使用列表中已存在的选项。如需添加,请及时更新本文档。

    -

    subject 内容

    -

    标题需尽量清晰表明本次提交的主要内容。

    -

    例: -[feature](coagent)<增加antflow兼容和增加coagent demo>

    -

    示例

    -

    comming soon

    -

    Reference

    -

    doris-commit-format

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/contribution/\350\207\264\350\260\242/index.html" "b/docs/zh/contribution/\350\207\264\350\260\242/index.html" deleted file mode 100644 index 1cd85f2..0000000 --- "a/docs/zh/contribution/\350\207\264\350\260\242/index.html" +++ /dev/null @@ -1,298 +0,0 @@ - - - - - - - - -致谢 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    致谢

    -
    -
    - - -

    CodeFuse-ai 文档主页基于docura构建!

    -

    ChatBot 项目基于langchain-chatchatcodebox-api!

    -

    ……

    -

    在此深深感谢他们的开源贡献!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/contribution/\350\264\241\347\214\256\346\214\207\345\215\227/index.html" "b/docs/zh/contribution/\350\264\241\347\214\256\346\214\207\345\215\227/index.html" deleted file mode 100644 index 428f5cf..0000000 --- "a/docs/zh/contribution/\350\264\241\347\214\256\346\214\207\345\215\227/index.html" +++ /dev/null @@ -1,323 +0,0 @@ - - - - - - - - -贡献指南 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    贡献指南

    -
    -
    - - -

    - 中文  |  English  -

    -

    非常感谢您对 Codefuse 项目感兴趣,我们非常欢迎您对 Codefuse 项目的各种建议、意见(包括批评)、评论和贡献。

    -

    您对 Codefuse 的各种建议、意见、评论可以直接通过 GitHub 的 Issues 提出。

    -

    参与 Codefuse 项目并为其作出贡献的方法有很多:代码实现、测试编写、流程工具改进、文档完善等等。任何贡献我们都会非常欢迎,并将您加入贡献者列表.

    -

    进一步,有了足够的贡献后,您还可以有机会成为 Codefuse 的 Committer。

    -

    任何问题,您都可以联系我们得到及时解答,联系方式包括微信、Gitter(GitHub提供的即时聊天工具)、邮件等等。

    -

    初次接触

    -

    初次来到 Codefuse 社区,您可以:

    -
      -
    • 关注 Codefuse Github 代码库
    • -
    • 加入 Codefuse 相关的微信群 随时提问; -通过以上方式及时了解 Codefuse 项目的开发动态并为您关注的话题发表意见。
    • -
    -

    贡献方式

    -

    这份贡献指南并不仅仅关于编写代码。我们重视并感激在各个领域的帮助。以下是一些您可以贡献的方式

    -
      -
    • 文档
    • -
    • Issue
    • -
    • PR
    • -
    -

    改进文档

    -

    文档是您了解 Codefuse 的最主要的方式,也是我们最需要帮助的地方!

    -

    浏览文档,可以加深您对 Codefuse 的了解,也可以帮助您理解 Codefuse 的功能和技术细节,如果您发现文档有问题,请及时联系我们;

    -

    如果您对改进文档的质量感兴趣,不论是修订一个页面的地址、更正一个链接、以及写一篇更优秀的入门文档,我们都非常欢迎!

    -

    我们的文档大多数是使用 markdown 格式编写的,您可以直接通过在 GitHub 中的 docs/ 中修改并提交文档变更。如果提交代码变更,可以参阅 Pull Request。

    -

    如果发现了一个 Bug 或问题

    -

    如果发现了一个 Bug 或问题,您可以直接通过 GitHub 的 Issues 提一个新的 Issue,我们会有人定期处理。详情见Issue模板

    -

    您也可以通过阅读分析代码自己修复(当然在这之前最好能和我们交流下,或许已经有人在修复同样的问题了),然后提交一个 Pull Request。

    -

    修改代码和提交PR(Pull Request)

    -

    您可以下载代码,编译安装,部署运行试一试(可以参考编译文档,看看是否与您预想的一样工作。如果有问题,您可以直接联系我们,提 Issue 或者通过阅读和分析源代码自己修复。详情见如何提交pr

    -

    无论是修复 Bug 还是增加 Feature,我们都非常欢迎。如果您希望给 Doris 提交代码,您需要从 GitHub 上 fork 代码库至您的项目空间下,为您提交的代码创建一个新的分支,添加源项目为upstream,并提交PR。 提交PR的方式可以参考文档 Pull Request。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/chatbot-\346\212\200\346\234\257\350\267\257\347\272\277/index.html" "b/docs/zh/docs/chatbot-\346\212\200\346\234\257\350\267\257\347\272\277/index.html" deleted file mode 100644 index 0ae3d63..0000000 --- "a/docs/zh/docs/chatbot-\346\212\200\346\234\257\350\267\257\347\272\277/index.html" +++ /dev/null @@ -1,865 +0,0 @@ - - - - - - - - -ChatBot 技术路线 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    ChatBot 技术路线

    -
    -
    - - -

    - 中文  |  English  -

    -

    RoadMap

    -
    - 图片 -
    -
    -

    完整路线

    -
      -
    • Sandbox 环境 ✅ -
        -
      • 环境隔离的sandbox环境与代码执行 ✅
      • -
      • 上传、下载文件 ✅
      • -
      • 支持java执行环境
      • -
      -
    • -
    • Vector Database & Retrieval -
        -
      • task retrieval ✅
      • -
      • tool retrieval ✅
      • -
      -
    • -
    • Prompt Management ✅
    • -
    • memory Management ✅
    • -
    • Multi Agent ✅ -
        -
      • PRD需求文档、系分、接口设计 ⬜
      • -
      • 根据需求文档、系分、接口设计生产代码 ⬜
      • -
      • 自动测试、自动debugger ⬜
      • -
      • 运维流程接入(ToolLearning)⬜
      • -
      • 全流程自动 ⬜
      • -
      -
    • -
    • 基于fastchat接入LLM ✅
    • -
    • 基于sentencebert接入Text Embedding ✅ -
        -
      • 向量加载速度提升 ✅
      • -
      -
    • -
    • Connector ✅ -
        -
      • 基于langchain的react模式 ✅
      • -
      • 基于langchain完成tool检索 ✅
      • -
      -
    • -
    • Web Crawl 通用能力 ✅ -
        -
      • 技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅
      • -
      • issue document ⬜
      • -
      • SDK Library Document ⬜
      • -
      -
    • -
    -



    -
      -
    • v0.0
    • -
    • Sandbox 环境 ✅ -
        -
      • 环境隔离的sandbox环境与代码执行 ✅
      • -
      -
    • -
    • 基于fastchat接入LLM ✅
    • -
    • 基于sentencebert接入Text Embedding ✅
    • -
    • Web Crawl 通用能力:技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ -
    • -
    • v0.1
    • -
    • Sandbox 环境: 上传、下载文件 ✅
    • -
    • Vector Database & Retrieval ✅ -
        -
      • task retrieval ✅
      • -
      • tool retrieval ✅
      • -
      -
    • -
    • Connector ✅ -
        -
      • 基于langchain的react模式 ✅
      • -
      -
    • -
    • 基于sentencebert接入Text Embedding: 向量加载速度提升 ✅
    • -
    -

    Done -

    -
      -
    • v0.2
    • -
    • Prompt Management ✅
    • -
    • memory Management ✅
    • -
    • Vector Database & Retrieval ✅
    • -
    -

    DDL: 2024.01.31 -

    -
      -
    • v0.3
    • -
    • Sandbox 环境 ✅ -
        -
      • 支持java执行环境 ⬜
      • -
      -
    • -
    • Multi Agent Framework ✅ -
        -
      • PRD需求文档、系分、接口设计 ⬜
      • -
      • 根据需求文档、系分、接口设计生产代码 ⬜
      • -
      • 自动测试、自动debugger ⬜
      • -
      • 运维流程接入(ToolLearning) ⬜
      • -
      • 全流程自动 ⬜
      • -
      -
    • -
    • Web Crawl 通用能力 ✅ -
        -
      • issue document ⬜
      • -
      • SDK Library Document ⬜
      • -
      -
    • -
    -

    DDL: 2024.12.31 -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-chatbot-quickstart-zh/index.html b/docs/zh/docs/codefuse-chatbot-quickstart-zh/index.html deleted file mode 100644 index 2ee8178..0000000 --- a/docs/zh/docs/codefuse-chatbot-quickstart-zh/index.html +++ /dev/null @@ -1,791 +0,0 @@ - - - - - - - - -快速开始 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速开始

    -
    -
    - - -

    - 中文  |  English  -

    -

    🚀 快速使用

    -

    如需使用私有化模型部署,请自行安装 nvidia 驱动程序,本项目已在 Python 3.9.18,CUDA 11.7 环境下,Windows、X86 架构的 macOS 系统中完成测试。

    -

    Docker安装、私有化LLM接入及相关启动问题见:快速使用明细

    -

    python 环境准备

    -
      -
    • 推荐采用 conda 对 python 环境进行管理(可选)
    • -
    -
    # 准备 conda 环境
    -conda create --name devopsgpt python=3.9
    -conda activate devopsgpt
    -
      -
    • 安装相关依赖
    • -
    -
    cd codefuse-chatbot
    -pip install -r requirements.txt
    -

    基础配置

    -
    # 修改服务启动的基础配置
    -cd configs
    -cp model_config.py.example model_config.py
    -cp server_config.py.example server_config.py
    -
    -# model_config#11~12 若需要使用openai接口,openai接口key
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -# 可自行替换自己需要的api_base_url
    -os.environ["API_BASE_URL"] = "https://api.openai.com/v1"
    -
    -# vi model_config#LLM_MODEL 你需要选择的语言模型
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -
    -# vi model_config#EMBEDDING_MODEL 你需要选择的私有化向量模型
    -EMBEDDING_ENGINE = 'model'
    -EMBEDDING_MODEL = "text2vec-base"
    -
    -# 向量模型接入示例,修改 model_config#embedding_model_dict
    -# 若模型地址为:
    -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese
    -# 配置如下
    -"text2vec-base": "shibing624/text2vec-base-chinese"
    -
    -# vi server_config#8~14, 推荐采用容器启动服务,避免使用codeInterpreter功能时安装其它依赖导致环境冲突
    -DOCKER_SERVICE = True
    -# 是否采用容器沙箱
    -SANDBOX_DO_REMOTE = True
    -

    启动服务

    -

    默认只启动webui相关服务,未启动fastchat(可选)。

    -
    # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁
    -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# examples/llm_api.py#258 修改为 kwargs={"gptq_wbits": 4},
    -
    -# start llm-service(可选)
    -python examples/llm_api.py
    -

    更多LLM接入方法见详情… -

    -
    # 完成server_config.py配置后,可一键启动
    -cd examples
    -python start.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-chatbot-zh/index.html b/docs/zh/docs/codefuse-chatbot-zh/index.html deleted file mode 100644 index 74cc99f..0000000 --- a/docs/zh/docs/codefuse-chatbot-zh/index.html +++ /dev/null @@ -1,623 +0,0 @@ - - - - - - - - -CodeFuse-ChatBot Development by Private Knowledge Augmentation · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-ChatBot Development by Private Knowledge Augmentation

    -
    -
    - - -

    - 中文  |  English  -

    -

    DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。

    -

    📜 目录

    - -

    🤝 介绍

    -

    💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。

    -

    本项目核心差异技术、功能点:

    -
      -
    • 🧠 智能调度核心: 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 使用说明
    • -
    • 💻 代码整库分析: 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。
    • -
    • 📄 文档分析增强: 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。
    • -
    • 🔧 垂类专属知识: 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。
    • -
    • 🤖 垂类模型兼容: 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。
    • -
    -

    🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。接入Demo

    -

    👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。

    -
    - 图片 -
    -

    🎥 演示视频

    -

    为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。

    - -

    🧭 技术路线

    -
    - Image -
    -
      -
    • 🧠 Multi-Agent Schedule Core: 多智能体调度核心,简易配置即可打造交互式智能体。
    • -
    • 🕷️ Multi Source Web Crawl: 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。
    • -
    • 🗂️ Data Processor: 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。
    • -
    • 🔤 Text Embedding & Index::文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。
    • -
    • 🗄️ Vector Database & Graph Database: 向量与图数据库,提供灵活强大的数据管理解决方案。
    • -
    • 📝 Prompt Control & Management::Prompt 控制与管理,精确定义智能体的上下文环境。
    • -
    • 🚧 SandBox::沙盒环境,安全地执行代码编译和动作。
    • -
    • 💬 LLM::智能体大脑,支持多种开源模型和 LLM 接口。
    • -
    • 🛠️ API Management:: API 管理工具,实现对开源组件和运维平台的快速集成。
    • -
    -

    具体实现明细见:技术路线明细

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-devops-eval-quickstart-zh/index.html b/docs/zh/docs/codefuse-devops-eval-quickstart-zh/index.html deleted file mode 100644 index 7dd2c9b..0000000 --- a/docs/zh/docs/codefuse-devops-eval-quickstart-zh/index.html +++ /dev/null @@ -1,840 +0,0 @@ - - - - - - - - -评测 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    评测

    -
    -
    - - -

    🚀 如何进行测试

    -

    如果需要在自己的 HuggingFace 格式的模型上进行测试的话,总的步骤分为如下几步:

    -
      -
    1. 编写 Model 的 loader 函数
    2. -
    3. 编写 Model 的 context_builder 函数
    4. -
    5. 注册模型到配置文件中
    6. -
    7. 执行测试脚本 -如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。
    8. -
    -

    1. 编写 loader 函数

    -

    模型加载时还需要做一些额外的处理(e.g. tokenizer 调整),需要继承 ModelAndTokenizerLoader 类来覆写对应的 load_modelload_tokenizer 函数, 如下所示:

    -
    class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader):
    -    def __init__(self):
    -        super().__init__()
    -        pass
    -    
    -    @override
    -    def load_model(self, model_path: str):
    -    # Implementation of the method
    -        pass
    -    
    -    @override
    -    def load_tokenizer(self, model_path: str):
    -    # Implementation of the method
    -        pass
    -

    2. 编写 Model 的 context_builder 函数

    -

    如果输入需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),则需要继承 ContextBuilder 类来覆写 make_context 函数,如下所示:

    -
    class QwenChatContextBuilder(ContextBuilder):
    -    def __init__(self):
    -        super().__init__()
    -        
    -    @override
    -    def make_context(self, model, tokenizer, query: str, system: str = "hello!"):
    -    # Implementation of the method
    -        pass
    -

    3. 注册模型到配置文件中

    -

    去 conf 中的 model_conf.json,注册对应的模型名和这个模型将要使用的 loader 和 context_builder,示例如下:

    -
    {
    -  "Qwen-Chat": {
    -  "loader": "QwenModelAndTokenizerLoader",
    -  "context_builder": "QwenChatContextBuilder"
    -  }
    -}
    -

    4. 执行测试脚本

    -

    直接运行以下代码发起测试

    -
    python src/run_eval.py \
    ---model_path path_to_model \
    ---model_name model_name_in_conf \
    ---model_conf_path path_to_model_conf \
    ---eval_dataset_list all \
    ---eval_dataset_fp_conf_path path_to_dataset_conf \
    ---eval_dataset_type test \
    ---data_path path_to_downloaded_devops_eval_data \
    ---k_shot 0
    -

    👀 👀 具体评测流程见📖 数据集评测教程 -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-devops-eval-zh/index.html b/docs/zh/docs/codefuse-devops-eval-zh/index.html deleted file mode 100644 index 3fac429..0000000 --- a/docs/zh/docs/codefuse-devops-eval-zh/index.html +++ /dev/null @@ -1,583 +0,0 @@ - - - - - - - - -CodeFuse-DevOps-Eval · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-DevOps-Eval

    -
    -
    - - -

    codefuse-devops-eval

    -

    codefuse-devops-eval

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-devops-model-zh/index.html b/docs/zh/docs/codefuse-devops-model-zh/index.html deleted file mode 100644 index 44fe0d7..0000000 --- a/docs/zh/docs/codefuse-devops-model-zh/index.html +++ /dev/null @@ -1,591 +0,0 @@ - - - - - - - - -CodeFuse-DevOps-Model · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-DevOps-Model

    -
    -
    - - -

    codeFuse-devops-model

    -

    codeFuse-devops-model

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-devops/index.html b/docs/zh/docs/codefuse-devops/index.html deleted file mode 100644 index 97c05d3..0000000 --- a/docs/zh/docs/codefuse-devops/index.html +++ /dev/null @@ -1,780 +0,0 @@ - - - - - - - - -CodeFuse-DevOps · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-DevOps

    -
    -
    - - -

    CodeFuse-DevOps

    -

    CodeFuse-DevOps

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-evalution-quickstart-zh/index.html b/docs/zh/docs/codefuse-evalution-quickstart-zh/index.html deleted file mode 100644 index 9cf71bd..0000000 --- a/docs/zh/docs/codefuse-evalution-quickstart-zh/index.html +++ /dev/null @@ -1,1035 +0,0 @@ - - - - - - - - -快速使用 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速使用

    -
    -
    - - -

    推理环境:

    -

    CodeFuse-13B: python 3.8及以上版本,pytorch 2.0及以上版本,transformers 4.24.0及以上版本,CUDA 11.4及以上;

    -

    CodeFuse-CodeLlama-34B: python 3.8及以上版本,pytorch2.0及以上版本,transformers==4.32.0 ,Sentencepiece,CUDA 11.4及以上。

    -

    评测执行环境

    -

    评测生成的代码需要使用多种语言编译、运行。我们使用的各编程语言依赖及所用包的版本如下:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    依赖版本
    Python3.10.9
    JDK18.0.2.1
    Node.js16.14.0
    js-md50.7.3
    C++11
    g++7.5.0
    Boost1.75.0
    OpenSSL3.0.0
    go1.18.4
    cargo1.71.1
    -

    为了省去使用者配置这些语言环境的麻烦,我们构建了一个Docker镜像,并在其中配置了所需要的环境,你可以按照下面的指令拉取使用

    -
    docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest
    -

    如果您熟悉Dockerfile,也可以从codefuseEval/docker/Dockerfile构建镜像,或者修改之以定制自己的配置:

    -
    cd codefuseEval/docker
    -docker build [OPTIONS] .
    -

    获取镜像后,使用如下命令创建容器:

    -
    docker run -it --gpus all --mount type=bind,source=<LOCAL PATH>,target=<PATH IN CONTAINER> [OPTIONS] <IMAGE NAME:TAG>
    -

    检查推理结果指令

    -

    我们提供脚本来检查所提供代码 LLM 的结果。请使用以下脚本检查相应的推理结果。

    -
    bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python
    -bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-13B/humaneval_result_python.jsonl humaneval_python 
    -

    如何使用CodeFuseEval

    -
      -
    1. 下载模型并更新 ckpt config.json 中的当前模型信息。 主要更新对应型号和版本中的「path」参数。
    2. -
    3. 运行以下生成命令以生成结果。
    4. -
    -
    bash codefuseEval/script/generation.sh MODELNAME MODELVERSION EVALDATASET OUTFILE 
    -
    -eg:
    -bash codefuseEval/script/generation.sh CodeFuse-13B v1 humaneval_python result/test.jsonl
    -
      -
    1. 运行以下评估命令来评估相应模型版本的生成结果。
    2. -
    -
    bash codefuseEval/script/evaluation.sh <RESULT_FILE> <METRIC> <PROBLEM_FILE>
    -eg: 
    -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python
    -

    评测说明

    -

    我们推荐使用给定的评测环境进行评测。在评测前,将生成的代码以如下JSON列表形式存储:

    -
    {"task_id": "../..", "generation: "..."}
    -{"task_id": "../..", "generation: "..."}
    -...
    -

    评测数据集

    -

    样本使用JSON列表格式存储在codefuseEval/data中,根据用户所需的下游任务情况,每条样本包含

    -
      -
    • task_id: 题目的目标语言与ID。语言为[“Python”, “Java”, “JavaScript”, “CPP”, “Go”]中之一。
    • -
    • prompt: 函数声明与描述,用于代码生成。
    • -
    • declaration: 仅有函数声明,用于代码翻译。
    • -
    • canonical_solution: 手写的示例解答。
    • -
    • test: 隐藏测例,用于评测。
    • -
    • example_test: 公共测试样本,用于评估生成代码。
    • -
    • prompt_text: prompt文本情况。
    • -
    • prompt_explain: prompt信息说明。
    • -
    • func_title: 生成函数头信息。
    • -
    • prompt_text_chinese: 中文prompt信息。
    • -
    -

    评测指标

    -

    除了目前提供的Codex 中提出的无偏 pass@k 指标之外,我们还将huggingface开源的相关指标与CodeBLEU提出的相似性指标进行集成。 -目前建议用户主要使用的指标如下:

    -
      -
    • codebleu: codebleu相似性评测指标。
    • -
    • pass@k: 无偏pass@k的评测指标。
    • -
    • bleu: 文本相似性指标bleu
    • -
    • bleurt: 文本语义相似性指标bleurt
    • -
    • total_time_cost: 基于被评数据集、模型推理总耗时
    • -
    • Average time cost: 基于被评数据集单个任务、模型推理平均耗时
    • -
    -

    评测命令:

    -
    bash codefuseEval/script/evaluation.sh <RESULT_FILE> <METRIC> <PROBLEM_FILE> <TEST_GROUDTRUTH>
    -eg: 
    -bash codefuseEval/script/evaluation.sh codefuseEval/result/test.jsonl pass@k humaneval_python 
    -

    并在本仓库的根目录下使用如下指令(请谨慎执行,生成的代码可能有极低概率产生意外行为。在execution.py中查看警告并取消执行代码的注释,风险自负):

    -

    同时我们当前提供如下的标志位,可以直接将测试数据集中的示例解答作为生成答案带入进行测试。

    -
      -
    • TEST_GROUDTRUTH 取值为True或False
    • -
    -

    当TEST_GROUDTRUTH为True时,开启self-test模式,将读取PROBLEM_FILE,将示例解答作为生成答案代入进行测试。 -TEST_GROUDTRUTH为False时,开启评测模式,读取RESULT_FILE和将读取PROBLEM_FILE,将生成答案代入进行测试

    -

    更多信息

    -

    使用自己的数据集评估自己的模型

    -

    如果你想用自己的数据集评估自己的模型,可以参考以下步骤:

    -
      -
    1. 注册自己的数据集
    2. -
    -
      -
    • 下载评估数据集并存储在codefuseEval/data或其他目录中。 数据集必须是jsonl格式。
    • -
    • 针对于数据集路径、数据集任务模式task_mode和使用数据集后生成结果的代码语言情况,需要在codefuseEval/util.py中的EVAL_DATASETDATASET_SUPPORTDATASET_LANGUAGE变量中进行设置。
    • -
    -
      -
    1. 注册你的评测模型
    2. -
    -
      -
    • 下载评估模型并存储在codefuseEval/model或其他目录中。
    • -
    • codefuseEval/processor包中编写评估模型处理器代码。
    • -
    -

    处理适配器

    -

    我们设计了一个名为Processor的基础结构,用户可以自己根据推理模型的情况创建自己需要的处理器, 主要目的是为了处理不同模型的区别情况进行处理,主要需要完成3个抽象函数:

    -
    load_model_tokenizer: 由于模型加载参数的区别以及tokenizer的终止符的区别,模型需要使用不同的参数进行适配加载,当前函数主要是为了帮助用户加载适配不同的模型
    -process_before:由于prompt根据用户不同的选择评测任务的类型或不同模型来适配不同的prompt样式,因此抽取出process_before函数主要用来帮助用户处理prompt
    -process_after:由于模型生成结果多样性,为了适配评测框架,方便生成结果数据可以拼接成合适的用例进行自动化运行,当前函数主要是根据任务类型和数据集情况,处理生成结果适配评测数据集和结果进行评测
    -

    您可以在codefuseEval/processor/base.py中查看BaseProcessor情况,创建自己模型的处理器,并实现上述函数功能

    -
      -
    • ckpt_config.json中设置信息模型。 举例如下
    • -
    -
    {
    -  "CodeFuse-13B": {     //模型名称
    -    "v1": {             //模型版本
    -      "path": "/mnt/model/CodeFuse13B-evol-instruction-4K/",       // 模型路径
    -      "processor_class": "codefuseEval.process.codefuse13b.Codefuse13BProcessor",  // 模型处理器路径
    -      "tokenizer": {                 // 将prompt token化时tokenizer传入的参数
    -        "truncation": true,
    -        "padding": true,
    -        "max_length": 600
    -      },
    -      "generation_config": {        //生成配置参数
    -        "greedy": {                 //如果是JsonObject,当前配置的是解码策略,可以通过设置下方「decode_mode」参数来加载生成配置参数中定义的不同的解码策略。
    -          "do_sample": false,
    -          "num_beams": 1,
    -          "max_new_tokens": 512
    -        },
    -        "beams": {
    -          "do_sample": false,
    -          "num_beams": 5,
    -          "max_new_tokens": 600,
    -          "num_return_sequences": 1
    -        },
    -        "dosample": {
    -          "da_sample": true
    -        },
    -        "temperature": 0.2,          //如果不是 JsonObject,它是一个默认参数,我们将在 Generation_config 中设置默认值。 你可以通过读取解码策略中同名参数的方式覆盖当前参数的默认值。
    -        "max_new_tokens": 600,
    -        "num_return_sequences": 1,
    -        "top_p": 0.9,
    -        "num_beams": 1,
    -        "do_sample": true         
    -      },
    -      "batch_size": 1,            // 单次生成的batch size大小
    -      "sample_num": 1,            // 单条评测数据生成的样本数
    -      "decode_mode": "beams"      // 选择在 Generation_config 中定义的解码模式
    -    }
    -  }
    -

    检查数据集命令

    -

    为了检查评估数据集提供的参考值是否正确,我们提供以下命令来检查数据集,针对于已经集成的数据集情况,检查数据集的命令如下所示

    -

    代码补全

    -
    bash codefuseEval/script/check_dataset.sh humaneval_python
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_java
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_js
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_rust
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_go
    -
    -bash codefuseEval/script/check_dataset.sh humaneval_cpp
    -

    自然语言生成代码

    -
    bash codefuseEval/script/check_dataset.sh mbpp
    -

    代码翻译

    -
    bash codefuseEval/script/check_dataset.sh codeTrans_python_to_java
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_python_to_cpp
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_java
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_cpp_to_python
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_python
    -
    -bash codefuseEval/script/check_dataset.sh codeTrans_java_to_cpp
    -

    科学计算

    -
    bash codefuseEval/script/check_dataset.sh codeCompletion_matplotlib
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_numpy
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_pandas
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_pytorch
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_scipy
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_sklearn
    -
    -bash codefuseEval/script/check_dataset.sh codeCompletion_tensorflow
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_matplotlib
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_numpy
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_pandas
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_pytorch
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_scipy
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_sklearn
    -
    -bash codefuseEval/script/check_dataset.sh codeInsertion_tensorflow
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/codefuse-mft-vlm/\345\277\253\351\200\237\344\275\277\347\224\250/index.html" "b/docs/zh/docs/codefuse-mft-vlm/\345\277\253\351\200\237\344\275\277\347\224\250/index.html" deleted file mode 100644 index e2ba7b5..0000000 --- "a/docs/zh/docs/codefuse-mft-vlm/\345\277\253\351\200\237\344\275\277\347\224\250/index.html" +++ /dev/null @@ -1,875 +0,0 @@ - - - - - - - - -快速使用 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速使用

    -
    -
    - - -

    Contents

    - -

    Install

    -

    请执行 sh init_env.sh

    -

    Datasets

    -

    使用了以下数据集训练模型:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    数据集任务种类样本量
    synthdog-enOCR800,000
    synthdog-zhOCR800,000
    cc3m(downsampled)Image Caption600,000
    cc3m(downsampled)Image Caption600,000
    SBUImage Caption850,000
    Visual Genome VQA (Downsampled)Visual Question Answer(VQA)500,000
    Visual Genome Region descriptions (Downsampled)Reference Grouding500,000
    Visual Genome objects (Downsampled)Grounded Caption500,000
    OCR VQA (Downsampled)OCR and VQA500,000
    -

    请到各个数据集的官网上下载这些数据。

    -

    Multimodal Alignment

    -

    请执行 sh scripts/pretrain.sh 或者 sh scripts/pretrain_multinode.sh

    -

    Visual Instruction Tuning

    -

    请执行 sh scripts/finetune.sh 或者 sh scripts/finetune_multinode.sh

    -

    Evaluation

    -

    请执行 llava/eval/ 当中的python脚本. 可以通过下面的代码来加载我们预训练的CodeFuse-VLM-14B:

    -
    import os
    -from llava.model.builder import load_mixed_pretrained_model
    -
    -model_path = '/pretrained/model/path'
    -tokenizer, model, image_processor, context_len = load_mixed_pretrained_model(model_path, None, 'qwen-vl-14b', os.path.join(model_path, 'Qwen-VL-visual'), 'cross_attn', os.path.join(model_path, 'mm_projector/mm_projector.bin'))
    -

    您也可以先运行下面的脚本来合并各个模型组件:scripts/merge_qwen_vl_weights.sh,然后通过下面的代码加载合并后的模型:

    -
    from llava.model import LlavaQWenForCausalLM
    -
    -model = LlavaQWenForCausalLM.from_pretrained('/path/to/our/pretrained/model')
    -

    CodeFuse-VLM 产品视频

    -

    这是我们模型支持的产品的视频

    -

    https://private-user-images.githubusercontent.com/22836551/300398424-201f667d-6b6b-4548-b3e6-724afc4b3071.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjE5MTIsIm5iZiI6MTcwNjUyMTYxMiwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzOTg0MjQtMjAxZjY2N2QtNmI2Yi00NTQ4LWIzZTYtNzI0YWZjNGIzMDcxLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5NDY1MlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWI0ZmJmZWNlNDZmNWM3NzA0OThlMmY1ODY4MDkxNWY5ZWNiNzRiYjJkYmE4NjEzM2EwYWRiNWY2ODc3N2ViYjEmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.BIvWGNx0XV7RoauxB0c2noEdbfZfu8-16LPHtCaCJ9k

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-modelcache-zh/index.html b/docs/zh/docs/codefuse-modelcache-zh/index.html deleted file mode 100644 index 60b1e80..0000000 --- a/docs/zh/docs/codefuse-modelcache-zh/index.html +++ /dev/null @@ -1,567 +0,0 @@ - - - - - - - - -CodeFuse-ModelCache · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-ModelCache

    -
    -
    - - -

    CodeFuse-ModelCache

    -

    CodeFuse-ModelCache

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-query-introduction-zh/index.html b/docs/zh/docs/codefuse-query-introduction-zh/index.html deleted file mode 100644 index 8d73a67..0000000 --- a/docs/zh/docs/codefuse-query-introduction-zh/index.html +++ /dev/null @@ -1,786 +0,0 @@ - - - - - - - - -CodeFuse-Query 介绍 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-Query 介绍

    -
    -
    - - -

    概述

    -

    CodeFuse-Query 是一个支持对 各种编程语言 进行 结构化分析代码数据平台。核心思想是利用各种语言解析器将所有代码转化为数据,并将其结构化存储到代码数据库中。通过使用自定义查询语言,按照业务需求进行数据分析。如下图所示: -image.png

    -

    2.1 CodeFuse-Query的架构

    -

    从整体上来说,CodeFuse-Query代码数据平台分为三大部分:代码数据模型、代码查询DSL、平台产品化服务。主要工作流程如下图所示:

    -

    image.png

    -

    代码数据化和标准化:COREF

    -

    我们定义了一种代码数据化和标准化的模型:COREF,要求所有代码都要能通过各种语言抽取器转化到该模型。 -COREF主要包含以下几种信息: -COREF = AST (抽象语法树) + ASG(抽象语义图) + CFG(控制流图) + PDG(程序依赖图)+ Call Graph(函数调用图) + Class Hierarchy (类继承关系)+ Documentation(文档/注释信息) -注:由于每种信息的计算难度不一,所以并不是所有语言的COREF信息均包含以上全部信息,基础信息主要有AST、ASG、Call Graph、Class Hierarchy和Documentation,其他信息( CFG 和 PDG )仍在建设中,后续会逐步支持。

    -

    代码查询DSL

    -

    基于生成的COREF代码数据,CodeFuse-Query 使用一种自定义的DSL语言 Gödel 来进行查询,从而完成代码分析需求。 -Gödel是一种逻辑推理语言,它的底层实现是基于逻辑推理语言Datalog,通过描述“事实”和“规则”, 程序可以不断地推导出新的事实。Gödel也是一个声明式语言,相较于命令式编程,声明式编程更加着重描述“要什么”,而把如何实现交给计算引擎。 -既然代码已经转化为关系型数据(COREF数据以关系型数据表的形式存储),相信大家会有疑问,为什么不直接用SQL,或者是直接使用SDK,而是又要专门去学习一个新的DSL语言呢?因为Datalog的计算具备单调性和终止性,简单理解就是,Datalog是在牺牲了表达能力的前提下获得了更高的性能,而Gödel继承了这个特点。

    -
      -
    • 相比较SDK,Gödel的主要优点是易学易用,声明式的描述,用户不需要关注中间的运算过程,只需要像SQL一样简单描述清楚需求即可。
    • -
    • 相比较SQL,Gödel的优点主要是描述能力更强、计算速度更快,例如描述递归算法和多表联合查询,而这些对于SQL来说都是比较困难的。
    • -
    -

    平台化、产品化

    -

    CodeFuse-Query 包括Sparrow CLI 和CodeFuse-Query在线服务Query中心。Sparrow CLI包含了所有组件和依赖,例如抽取器,数据模型,编译器等,用户完全可以通过使用Sparrow CLI在本地进行代码数据生成和查询(Sparrow CLI的使用方式请见 第3节 安装、配置、运行)。如果用户有在线查询的需求,可以使用Query中心进行实验。

    -

    2.2 CodeFuse-Query支持的分析语言

    -

    截至2023-10-31为止,CodeFuse-Query支持对11种编程语言进行数据分析。其中对5种编程语言( Java、JavaScript、TypeScript、XML、Go )的支持度非常成熟,对剩余6种编程语言(Object-C、C++、Python3、Swift、SQL、Properties )的支持度处于beta阶段,还有进一步提升和完善的空间,具体的支持情况见下表:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    语言状态COREF模型节点数
    Java成熟162
    XML成熟12
    TS/JS成熟392
    Go成熟40
    OC/C++beta53/397
    Python3beta93
    Swiftbeta248
    SQLbeta750
    Propertiesbeta9
    -

    注:以上语言状态的成熟程度判断标准是根据COREF包含的信息种类和实际落地情况来进行判定,除了OC/C++外,所有语言均支持了完整的AST信息和Documentation信息,以Java为例,COREF for Java还支持了ASG、Call Graph、Class Hierarchy、以及部分CFG信息。

    -

    2.3 CodeFuse-Query的使用场景

    -

    查询代码特征

    -

    小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Godel 如下,交给 CodeFuse-Query 系统给他返回了结果。

    -
    // script
    -use coref::java::*
    -
    -fn out(var: string) -> bool {
    -  for(v in Variable(JavaDB::load("coref_java_src.db"))) {
    -    if (v.getType().getName() = "String" && var = v.getName()) {
    -      return true
    -    }
    -  }
    -}
    -
    -fn main() {
    -  output(out())
    -}
    -

    类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。

    -

    输出静态分析能力

    -

    小安全是 XX 团队的安全同学,他做了一套系统交叉验证日志数据和代码数据是否一致。为了完成某个分析任务,他计划通过写 Godel 查询出来静态数据 D1,合并动态数据 D2,联合分析得出结论 C。小安全通过在 CodeFuse-Query 上面编写 Godel Query 测试技术上可行之后,使用 CodeFuse-Query 提供的标准 API 将系统对接了起来。 -类似需求:通过静态分析进行系统的卡点,提高测试的效率,通过分析出来的数据合并成说明文档。

    -

    代码规则检查器

    -

    小 TL 同学发现团队总是写出很多类似的 Bug A,他想针对 Bug A 制定一个代码规则和其检查器,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 -类似需求:编写静态缺陷扫描规则进行代码风险拦截。

    -

    分析代码特性

    -

    研发部同学小框架想知道目前代码仓库中Spring工程和Spring Boot工程比例。 好量化新框架的推广情况。小架构通过编写 Godel Query 描述不同项目分析特征,然后一次性 Query 了 11 万个代码仓库,过了几十分钟后就拿到了所有代码的数据,开开心心做 KPI 去了。 -类似需求:应用画像,代码画像,架构分析。

    -

    获取统计数据

    -

    小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Godel 实现出来以后,发现不怎么优化就已经性能非常高了,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,不知道方便了多少。 -类似需求:代码统计,代码度量,算法设计,学术研究。

    -

    架构分析

    -

    小架构同学最近推行了一种新的基于 txt 文件的消息中间件,目前已有的分析平台都不能支持分析此类系统的上下游依赖。小架构通过 Godel快速建模了该消息格式,并马上获取到了目前系统中不同组件的依赖关系。 -类似需求:系统 Overview,架构治理,血缘分析。

    -

    模型验证

    -

    小促销设计的系统里面要求用户一定是先玩游戏再领券。他通过 Godel 描述了该模型的验证逻辑,然后通过 CodeFuse-Query 系统保障当前以及未来系统的代码实现,都是完全符合该模型的。从此再不担心游戏出资损~ -类似需求:系统验证,网络验证,权限验证

    -

    2.4 CodeFuse-Query的应用领域

    -

    目前,CodeFuse-Query在蚂蚁集团已经支持 CodeFuse大语言模型数据清洗代码度量评估研发风险控制隐私安全分析代码智能、**终端包大小治理 **等多个场景的落地应用,服务月均调用量超过百万。 -image.png

    -

    高质量代码数据清洗 - CodeFuse代码大模型

    -

    CodeFuse代码大模型是蚂蚁集团对外开源的处理代码相关问题的模型,对于CodeFuse大语言模型而言,训练的数据质量直接影响模型的推理结果。低质量的代码数据会直接污染语言模型的输出,例如:模型可能会学习到错误的代码模式,从而生成错误的代码;数据中只包含某种编程语言的代码,模型可能无法很好地适应其他编程语言的代码。 -为了把控进入模型的代码数据质量,进而提升模型的推理能力。我们基于蚂蚁程序分析团队多年的实践积累结合业界共识,梳理了高质量代码的定义方式,并利用已有程序分析技术实现了自动化、大规模的代码数据清洗。 -CodeFuse-Query为CodeFuse代码大模型提供了以下数据清洗能力:

    -
      -
    • 高质量代码数据清洗:对代码数据进行清洗,包括对 Python,Java,JavaScript,TypeScript,Go,C,C++ 7 种语言进行漏洞扫描,对语言种类 / star 数进行筛选,过滤有效代码行数为 0 的数据等。目前已沉淀清洗后的 GitHub 和蚂蚁内部代码数据总共约 2TB
    • -
    • 代码画像:实现对大规模代码进行高性能多维度的自动标注,支持 Java, Scala, Kotlin, JavaScript, JSX, TypeScript, TSX, Vue, Python, Go 等 10 种语言,77 种通用标签,40 种蚂蚁特有标签,共 117 种标签。目前自动标注性能能够达到 40MB/s
    • -
    • 其他原子能力 -
        -
      • 高级代码特征提取,包括提取 AST(抽象语法树),DFG(数据流图)数据等。目前 AST 信息已用于 SFT 训练,准确率 97% 左右。
      • -
      • 代码片段识别,用于针对文本数据中的代码进行提取,方便进行代码格式化或加上 Markdown 格式: -
          -
        • 文本提取代码:从文本中提取代码块信息,支持主流语言的解析,函数及类定义,仅验证二分类问题,就是说仅验证文本是否含有代码块准确率 83% 左右。
        • -
        • 识别代码片段的编程语言种类:识别任意代码片段的编程语言种类,支持 30+ 种语言,准确率80%左右。
        • -
        -
      • -
      • 代码注释对提取:支持提取方法级别的注释-代码对信息,覆盖 15 种 GitHub 最流行的语言,用于 Text To Code/Code To Text 的 SFT 训练。
      • -
      -
    • -
    -

    代码数据指标 - 广目

    -

    广目是蚂蚁内部一款面向不同职能的研发同学和团队管理者,对代码力进行评估、展示客观数据和分析结果的数据产品。 -广目提供了个人代码力评估报告、日常代码力指标数据分析、团队代码力管理、代码评优荣誉展示等功能,旨在帮助蚂蚁研发工程师不断提升代码品质、减少代码负债,更长远的提升研发效能。 -CodeFuse-Query为广目提供的能力分为两部分:

    -
      -
    • 代码评估指标:代码复杂度、代码注释率、标准开发量等
    • -
    • 代码评优指标:代码复用度
    • -
    -

    变更分析-优酷服务端研发效能

    -

    优酷质量保障团队从2023年开始针对服务端精准测试的探索,经过半年的技术沉淀和体系搭建,形成了具备变更内容识别、变更影响分析、测试能力推荐、测试覆盖评估的精准测试体系。 -在此过程中,CodeFuse-Query能提供的能力主要有:

    -
      -
    • 根据代码变更内容(文件+行号),分析出影响的对象:方法、入口(http入口、hsf入口)、调用链路(从入口到变更方法的所有调用链路)、数据库操作(表、操作类型)
    • -
    • 结合线上动态调用链路(方法链路)、CodeFuse-Query静态分析调用链路的影响面精准分析能力,提升变更分析影响面的有效性、准备率
    • -
    -

    到目前为止,优酷已通过CodeFuse-Query接入所有核心应用,并基于静态分析采集数据,构建了服务端完整的代码知识库和流量知识库。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-query-zh/index.html b/docs/zh/docs/codefuse-query-zh/index.html deleted file mode 100644 index 9d8363c..0000000 --- a/docs/zh/docs/codefuse-query-zh/index.html +++ /dev/null @@ -1,535 +0,0 @@ - - - - - - - - -CodeFuse-Query · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-Query

    -
    -
    - - -

    CodeFuse-Query

    -

    CodeFuse-Query

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/codefuse-query/1_abstract/index.html b/docs/zh/docs/codefuse-query/1_abstract/index.html deleted file mode 100644 index a481e60..0000000 --- a/docs/zh/docs/codefuse-query/1_abstract/index.html +++ /dev/null @@ -1,792 +0,0 @@ - - - - - - - - - · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    -
    -
    - - -

    引言

    -

    随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。

    -

    这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 -在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 -此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。

    -

    在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。

    -

    CodeQuery 的特点和优势可以概括为以下几点:

    -
      -
    • 高度可扩展:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。
    • -
    • 以数据为中心:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。
    • -
    • 高度集成:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。
    • -
    • 支持多元化的需求:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。
    • -
    -

    CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/devops_eval/tool_learning_evalution/index.html b/docs/zh/docs/devops_eval/tool_learning_evalution/index.html deleted file mode 100644 index 3d58eb7..0000000 --- a/docs/zh/docs/devops_eval/tool_learning_evalution/index.html +++ /dev/null @@ -1,981 +0,0 @@ - - - - - - - - - · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    -
    -
    - - -

    tool learning 数据集评测教程

    -

    chatml接入方式

    -

    如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步:

    -
      -
    1. 编写 ~/evals/FuncCallEvalution 的 create_prompts 函数
    2. -
    3. 编写 ~/models/base_model 的 相关函数
    4. -
    5. 注册模型和评估函数
    6. -
    7. 执行测试脚本 -如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。
    8. -
    -

    1. 编写 loader 函数

    -

    如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 src.context_builder.context_builder_family.py 中继承 ModelAndTokenizerLoader 类来覆写对应的 load_modelload_tokenizer 函数,具体可以参照以下示例:

    -
    class FuncCallEvalution(ToolEvalution):
    -
    -    def create_prompts(self, func_call_datas):
    -        '''
    -        datas: [
    -            {
    -                "instruction": history[his_idx], 
    -                "input": "",
    -                "output": output, 
    -                "history": [(human_content, ai_content), (), ()],
    -                "functions": tools
    -            }
    -        ]
    -        '''
    -        system_content = '''CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。
    -        你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。'''
    -        function_format = '''You are ToolGPT, you have access to the following APIs:\n{tools}'''
    -
    -        func_call_train_datas = []
    -        history_error_cnt = 0
    -        funccall_error_cnt = 0
    -
    -        for data in func_call_datas:
    -            tools = data["functions"]
    -            chatrounds = data["chatrounds"]
    -
    -            function_content = ""
    -            if len(tools) > 0:
    -                function_content = function_format.format(tools=json.dumps(tools, ensure_ascii=False, sort_keys=True))
    -
    -            history = []
    -            for i in chatrounds:
    -                if i["role"]=="system":
    -                    continue
    -
    -                if i["role"]=="user":
    -                    history.append(("user", i["content"]))
    -
    -                if i["role"] == "assistant":
    -                    if "function_call" in i:
    -                        if not isinstance(i["function_call"], dict):
    -                            funccall_error_cnt+=1
    -                            continue
    -                        content  = "#function" + json.dumps({**{"content": i["content"]}, **i["function_call"]}, ensure_ascii=False)
    -                    else:
    -                        content = i["content"]
    -                    history.append(("assistant", content))
    -                                            
    -                    
    -                if i["role"] == "function":
    -                    content  = json.dumps({**{"content": i["content"]}, **{"name": i["name"]}}, ensure_ascii=False)
    -                    history.append(("user", content))
    -                
    -            
    -            history = [i[1] for i in history]
    -            history[0] = "\n".join([system_content,function_content, history[0]])
    -            
    -            for his_idx in range(0, len(history), 2):
    -                output = history[his_idx+1]
    -
    -                if "#function" in output:
    -                    output = output.split("#function")[-1]
    -
    -                try:
    -                    output = json.loads(output)
    -                except:
    -                    output = {"content": output}
    -
    -
    -                func_call_train_datas.append(
    -                    {
    -                        "instruction": history[his_idx], 
    -                        "input": "",
    -                        "output": output, 
    -                        "history": [history[:his_idx+2][i:i+2] for i in range(0, len(history[:his_idx]), 2)],
    -                        "functions": tools
    -                    },
    -                )
    -        return func_call_train_datas
    -

    2. 编写 Model 的 context_builder 函数

    -

    如果输入需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),则需要去 src.context_builder.context_builder_family 中继承 ContextBuilder 类来覆写 make_context 函数,这个函数是用来将输入转换格式为对应需要的输出的,一个示例如下:

    -
    class ToolModel:
    -    def __init__(self, model_path: str, template: str, trust_remote_code=True, tensor_parallel_size=1, gpu_memory_utilization=0.25):
    -        self.model_path = model_path
    -        self.trust_remote_code = trust_remote_code
    -        self.tensor_parallel_size = tensor_parallel_size
    -        self.gpu_memory_utilization = gpu_memory_utilization
    -        self.load_model(self.model_path, self.trust_remote_code, self.tensor_parallel_size, self.gpu_memory_utilization)
    -
    -    def generate(self, prompts: str, template: str = None, generate_configs: GenerateConfigs = None) -> list:
    -        '''产出对应结果'''
    -        pass
    -
    -    def generate_params(
    -        self, generate_configs: GenerateConfigs,
    -    ):
    -        '''generate param'''
    -        kargs = generate_configs.dict()
    -        return kargs
    -        
    -    def load_model(self, model_path, trust_remote_code=True, tensor_parallel_size=1, gpu_memory_utilization=0.25):
    -        '''加载模型'''
    -        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=trust_remote_code)
    -        self.model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map="auto", trust_remote_code=trust_remote_code).eval()
    -
    -        # self.model = LLM(model=model_path, trust_remote_code=trust_remote_code, tensor_parallel_size=tensor_parallel_size, gpu_memory_utilization=gpu_memory_utilization)
    -

    3. 注册模型和eval函数即可

    -

    在 ~/models/init.py 中注册即可

    -
    from .base_model import ToolModel
    -
    -__all__ = [
    -    "ToolModel", 
    -]
    -

    在 ~/evasl/init.py 中注册即可

    -
    from .base_evalution import ToolEvalution
    -from .toolfill_evalution import ToolFillEvalution
    -from .toolparser_evalution import ToolParserEvalution
    -from .toolsummary_evalution import ToolSummaryEvalution
    -from .func_call_evalution import FuncCallEvalution
    -
    -
    -__all__ = [
    -    "ToolEvalution", "ToolFillEvalution", "ToolParserEvalution", "ToolSummaryEvalution", "FuncCallEvalution"
    -]
    -

    4. 执行测试脚本

    -

    修改 ~/src/qwen_eval_main.py# datainfos和model_infos

    -
    model_infos = [
    -    {"model_name": "", "template": "chatml", "model_path": "",
    -     "peft_path": "", "model_class": QwenModel}]
    -
    -datainfos = [
    -    {"dataset_path": "~/fcdata_luban_zh_test.jsonl", "dataset_name": "fcdata_luban_zh", "tool_task": "func_call"},
    -    {"dataset_path": "~/test_datas/fcdata_zh_test_v1.jsonl", "dataset_name": "fcdata_zh", "tool_task": "func_call"},
    -]
    -

    运行下述命令即可

    -
    python qwen_eval_main.py
    -

    -

    非chatml接入

    -

    如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步:

    -
      -
    1. 编写 ~/getAssistantAns.py 相关代码
    2. -
    3. 执行测试脚本
    4. -
    -

    1、编写 getAssistantAns 示例

    -
    class GetAssistantAns():
    -    # 按照自己推理需求自己修改代码
    -
    -    def __init__(self, gpu_num=1):
    -        model = AutoModelForCausalLM.from_pretrained(model_name)
    -        device_list = []
    -        for gpu_idx in range(gpu_num):
    -            device_list.append(torch.device("cuda:0"))
    -
    -        # 将模型移动到指定的GPU设备
    -        model.to(device)
    -
    -
    -    def gen_answer(self, chat_dict, gpu_index):
    -        # 这里实际根据自己推理逻辑 然后转为标准格式返回
    -        # 以下仅仅是样例
    -        import time
    -        print(os.environ["CUDA_VISIBLE_DEVICES"])
    -        time.sleep(1)
    -        rtn_dict1 = {
    -                "role": "assistant",
    -                "content": None,
    -                "function_call":
    -                {
    -                    "name": "get_fudan_university_scoreline",
    -                    "arguments": "{\n  \"year\": \"2020\"\n}"
    -                }
    -            }
    -
    -        rtn_dict2 =  {
    -                "role": "assistant",
    -                "content": "2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分"
    -            }
    -
    -        return random.choice([rtn_dict1, rtn_dict2])
    -

    2、执行测试脚本

    -

    修改 ~/src/opensource_functioncall_evalution.py # test_ans_file_list

    -
    test_ans_file_list = [
    -        "fcdata_zh_test.jsonl"
    -        ]
    -

    运行下述命令即可

    -
    python opensource_functioncall_evalution.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/devops_eval/tool_learning_info_zh/index.html b/docs/zh/docs/devops_eval/tool_learning_info_zh/index.html deleted file mode 100644 index bc588d2..0000000 --- a/docs/zh/docs/devops_eval/tool_learning_info_zh/index.html +++ /dev/null @@ -1,899 +0,0 @@ - - - - - - - - - · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    -
    -
    - - -

    数据样例

    -

    在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下:

    -

    Function Call的数据格式

    - - - - - - - - - - - - - - - - - - - - -
    Input KeyInput TypeInput Description
    functionsList[Swagger]工具集合
    chatroundsList[chatround]多轮对话数据
    -

    chatrounds的数据格式

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Input KeyInput TypeInput Description
    rolestring角色名称,包含三种类别,user、assistant、function
    namestring若role为function,则存在name字段,为function的名称
    contentstringrole的返回内容
    function_calldict工具调用
    -
    {
    -    "functions":
    -    [
    -        {
    -            "name": "get_fudan_university_scoreline",
    -            "description": "查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线",
    -            "parameters":
    -            {
    -                "type": "object",
    -                "properties":
    -                {
    -                    "year":
    -                    {
    -                        "type": "string",
    -                        "description": "年份,例如:2020,2019,2018"
    -                    }
    -                },
    -                "required":
    -                [
    -                    "year"
    -                ]
    -            }
    -        }
    -    ],
    -    "chatrounds":
    -    [
    -        {
    -            "role": "system",
    -            "content": "CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。"
    -        },
    -        {
    -            "role": "user",
    -            "content": "查询2020年复旦大学的分数线"
    -        },
    -        {
    -            "role": "assistant",
    -            "content": null,
    -            "function_call":
    -            {
    -                "name": "get_fudan_university_scoreline",
    -                "arguments": "{\n  \"year\": \"2020\"\n}"
    -            }
    -        },
    -        {
    -            "role": "function",
    -            "name": "get_fudan_university_scoreline",
    -            "content": "{\n    \"scoreline\":{\n        \"文科一批\": 630,    \n        \"文科二批\": 610,  \n        \"理科一批\": 650,  \n        \"理科二批\": 630  \n    }\n}"
    -        },
    -        {
    -            "role": "assistant",
    -            "content": "2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分"
    -        }
    -    ]
    -}
    -

    上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。

    -

    评测指标

    -

    由于一般通用模型无法具备工具调用的能力,因此在进行Tool Learn-Eval评测之前需要对通用模型进行微调,先让模型学会工具使用的基本范式

    -

    下面,我们定义了几种评估工具使用的指标:

    - -

    ②③④⑤的和为1,代表工具调用失败的总数,⑤工具幻觉是工具名识别失败的一种特殊情况

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/devops_eval/tutorial_zh/index.html b/docs/zh/docs/devops_eval/tutorial_zh/index.html deleted file mode 100644 index ecaebf1..0000000 --- a/docs/zh/docs/devops_eval/tutorial_zh/index.html +++ /dev/null @@ -1,902 +0,0 @@ - - - - - - - - - · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    -
    -
    - - -

    数据集评测教程

    -

    🚀 如何进行测试

    -

    如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步:

    -
      -
    1. 编写 Model 的 loader 函数
    2. -
    3. 编写 Model 的 context_builder 函数
    4. -
    5. 注册模型到配置文件中
    6. -
    7. 执行测试脚本 -如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。
    8. -
    -

    1. 编写 loader 函数

    -

    如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 src.context_builder.context_builder_family.py 中继承 ModelAndTokenizerLoader 类来覆写对应的 load_modelload_tokenizer 函数,具体可以参照以下示例:

    -
    class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader):
    -    def __init__(self):
    -        super().__init__()
    -        pass
    -      
    -    def load_model(self, model_path: str):
    -        model = super().load_model(model_path)
    -        model.generation_config = GenerationConfig.from_pretrained(model_path)
    -        return model
    -    
    -    def load_tokenizer(self, model_path: str):
    -        tokenizer = super().load_tokenizer(model_path)
    -    
    -        # read generation config
    -        with open(model_path + '/generation_config.json', 'r') as f:
    -        generation_config = json.load(f)
    -        tokenizer.pad_token_id = generation_config['pad_token_id']
    -        tokenizer.eos_token_id = generation_config['eos_token_id']
    -        return tokenizer
    -

    2. 编写 Model 的 context_builder 函数

    -

    如果输入需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),则需要去 src.context_builder.context_builder_family 中继承 ContextBuilder 类来覆写 make_context 函数,这个函数是用来将输入转换格式为对应需要的输出的,一个示例如下:

    -
    class QwenChatContextBuilder(ContextBuilder):
    -    def __init__(self):
    -        super().__init__()
    -    
    -    def make_context(
    -        self,
    -        model,
    -        tokenizer, 
    -        query: str,
    -        system: str = "you are a helpful assistant"
    -    ):
    -      '''
    -  model: PretrainedModel
    -  tokenizer: PretrainedTokenzier
    -  query: Input string
    -  system: System prompt if needed
    -  '''
    -        im_start, im_end = "<|im_start|>", "<|im_end|>"
    -        im_start_tokens = [tokenizer.im_start_id]
    -        im_end_tokens = [tokenizer.im_end_id]
    -        nl_tokens = tokenizer.encode("\n")
    -
    -        def _tokenize_str(role, content):
    -            return f"{role}\n{content}", tokenizer.encode(
    -                role, allowed_special=set()
    -            ) + nl_tokens + tokenizer.encode(content, allowed_special=set())
    -
    -        system_text, system_tokens_part = _tokenize_str("system", system)
    -        system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
    -
    -        raw_text = ""
    -        context_tokens = []
    -
    -        context_tokens = system_tokens + context_tokens
    -        raw_text = f"{im_start}{system_text}{im_end}" + raw_text
    -        context_tokens += (
    -            nl_tokens
    -            + im_start_tokens
    -            + _tokenize_str("user", query)[1]
    -            + im_end_tokens
    -            + nl_tokens
    -            + im_start_tokens
    -            + tokenizer.encode("assistant")
    -            + nl_tokens
    -        )
    -        raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
    -        return raw_text, context_tokens
    -

    3. 注册模型到配置文件中

    -

    去 conf 中的 model_conf.json,注册对应的模型名和这个模型将要使用的 loader 和 context_builder,其中 loader 和 context_builder 写第一步和第二步中自定义的类名就可以,示例如下:

    -
    {
    -  "Qwen-Chat": {
    -  "loader": "QwenModelAndTokenizerLoader",
    -  "context_builder": "QwenChatContextBuilder"
    -  }
    -}
    -

    4. 执行测试脚本

    -

    直接运行以下代码发起测试

    -
    # model_path: 要测试的模型路径
    -# model_name: 模型配置文件对应的模型命名,默认为 Default ,代表走默认的 loader 和 context_builder
    -# model_conf_path: 模型配置文件的地址,一般就为 conf 路径下的 devopseval_dataset_fp.json
    -# eval_dataset_list: 要测试的数据集名称,默认 all,全部测试,如果需要测试单个或者多个,用 # 符号链接,示例:dataset1#dataset2
    -# eval_dataset_fp_conf_path: 数据集配置地址
    -# eval_dataset_type: 测试哪种类型,只支持默认 test 类型的测试集
    -# data_path: 评测数据集地址,填写下载数据集后的地址就可以
    -# k_shot: 支持 0-5,代表 few-shot 会给模型前缀加的示例数量
    -
    -  
    -python src/run_eval.py \
    ---model_path path_to_model \
    ---model_name model_name_in_conf \
    ---model_conf_path path_to_model_conf \
    ---eval_dataset_list all \
    ---eval_dataset_fp_conf_path path_to_dataset_conf \
    ---eval_dataset_type test \
    ---data_path path_to_downloaded_devops_eval_data \
    ---k_shot 0
    -

    举个🌰:比如评测数据集下载到了 folder1,代码放在了 folder2,模型在 folder3,模型不需要自定义 loader 和 context_builder,需要测试所有的数据集的 zero-shot 得分,那可以按照以下脚本发起测试:

    -
    python folder2/src/run_eval.py \
    ---model_path folder3 \
    ---model_name Default \
    ---model_conf_path folder1/conf/model_conf.json \
    ---eval_dataset_list all \
    ---eval_dataset_fp_conf_path folder1/conf/devopseval_dataset_fp.json \
    ---eval_dataset_type test \
    ---data_path folder2 \
    ---k_shot 0
    -

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/fastertransformer4codefuse-zh/index.html b/docs/zh/docs/fastertransformer4codefuse-zh/index.html deleted file mode 100644 index 9f31d2b..0000000 --- a/docs/zh/docs/fastertransformer4codefuse-zh/index.html +++ /dev/null @@ -1,784 +0,0 @@ - - - - - - - - -FasterTransformer4CodeFuse · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    FasterTransformer4CodeFuse

    -
    -
    - - -

    FasterTransformer4CodeFuse

    -

    FasterTransformer4CodeFuse

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/index.xml b/docs/zh/docs/index.xml deleted file mode 100644 index ae33fd0..0000000 --- a/docs/zh/docs/index.xml +++ /dev/null @@ -1,347 +0,0 @@ - - - - Docs on CodeFuse-AI - /zh/docs/ - Recent content in Docs on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - - /zh/docs/codefuse-query/1_abstract/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-query/1_abstract/ - 引言 随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。 这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。 在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。 CodeQuery 的特点和优势可以概括为以下几点: 高度可扩展:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。 以数据为中心:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。 高度集成:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。 支持多元化的需求:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。 CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。 - - - - /zh/docs/devops_eval/tool_learning_evalution/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/devops_eval/tool_learning_evalution/ - tool learning 数据集评测教程 chatml接入方式 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: 编写 ~/evals/FuncCallEvalution 的 create_prompts 函数 编写 ~/models/base_model 的 相关函数 注册模型和评估函数 执行测试脚本 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 1. 编写 loader 函数 如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 src.context_builder.context_builder_family.py 中继承 ModelAndTokenizerLoader 类来覆写对应的 load_model 和 load_tokenizer 函数,具体可以参照以下示例: class FuncCallEvalution(ToolEvalution): def create_prompts(self, func_call_datas): &#39;&#39;&#39; datas: [ { &#34;instruction&#34;: history[his_idx], &#34;input&#34;: &#34;&#34;, &#34;output&#34;: output, &#34;history&#34;: [(human_content, ai_content), (), ()], &#34;functions&#34;: tools } ] &#39;&#39;&#39; system_content = &#39;&#39;&#39;CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。 你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。&#39;&#39;&#39; function_format = &#39;&#39;&#39;You are ToolGPT, you have access to the following APIs:\n{tools}&#39;&#39;&#39; func_call_train_datas = [] history_error_cnt = 0 funccall_error_cnt = 0 for data in func_call_datas: tools = data[&#34;functions&#34;] chatrounds = data[&#34;chatrounds&#34;] function_content = &#34;&#34; if len(tools) &gt; 0: function_content = function_format. - - - - /zh/docs/devops_eval/tool_learning_info_zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/devops_eval/tool_learning_info_zh/ - 数据样例 在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下: Function Call的数据格式 Input Key Input Type Input Description functions List[Swagger] 工具集合 chatrounds List[chatround] 多轮对话数据 chatrounds的数据格式 Input Key Input Type Input Description role string 角色名称,包含三种类别,user、assistant、function name string 若role为function,则存在name字段,为function的名称 content string role的返回内容 function_call dict 工具调用 { &#34;functions&#34;: [ { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;description&#34;: &#34;查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线&#34;, &#34;parameters&#34;: { &#34;type&#34;: &#34;object&#34;, &#34;properties&#34;: { &#34;year&#34;: { &#34;type&#34;: &#34;string&#34;, &#34;description&#34;: &#34;年份,例如:2020,2019,2018&#34; } }, &#34;required&#34;: [ &#34;year&#34; ] } } ], &#34;chatrounds&#34;: [ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。&#34; }, { &#34;role&#34;: &#34;user&#34;, &#34;content&#34;: &#34;查询2020年复旦大学的分数线&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: null, &#34;function_call&#34;: { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;arguments&#34;: &#34;{\n \&#34;year\&#34;: \&#34;2020\&#34;\n}&#34; } }, { &#34;role&#34;: &#34;function&#34;, &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;content&#34;: &#34;{\n \&#34;scoreline\&#34;:{\n \&#34;文科一批\&#34;: 630, \n \&#34;文科二批\&#34;: 610, \n \&#34;理科一批\&#34;: 650, \n \&#34;理科二批\&#34;: 630 \n }\n}&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: &#34;2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分&#34; } ] } 上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 - - - - /zh/docs/devops_eval/tutorial_zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/devops_eval/tutorial_zh/ - 数据集评测教程 🚀 如何进行测试 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: 编写 Model 的 loader 函数 编写 Model 的 context_builder 函数 注册模型到配置文件中 执行测试脚本 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 1. 编写 loader 函数 如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 src.context_builder.context_builder_family.py 中继承 ModelAndTokenizerLoader 类来覆写对应的 load_model 和 load_tokenizer 函数,具体可以参照以下示例: class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass def load_model(self, model_path: str): model = super().load_model(model_path) model.generation_config = GenerationConfig.from_pretrained(model_path) return model def load_tokenizer(self, model_path: str): tokenizer = super().load_tokenizer(model_path) # read generation config with open(model_path + &#39;/generation_config. - - - ChatBot 技术路线 - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - 中文&nbsp | &nbspEnglish&nbsp RoadMap 完整路线 Sandbox 环境 ✅ 环境隔离的sandbox环境与代码执行 ✅ 上传、下载文件 ✅ 支持java执行环境 Vector Database &amp; Retrieval task retrieval ✅ tool retrieval ✅ Prompt Management ✅ memory Management ✅ Multi Agent ✅ PRD需求文档、系分、接口设计 ⬜ 根据需求文档、系分、接口设计生产代码 ⬜ 自动测试、自动debugger ⬜ 运维流程接入(ToolLearning)⬜ 全流程自动 ⬜ 基于fastchat接入LLM ✅ 基于sentencebert接入Text Embedding ✅ 向量加载速度提升 ✅ Connector ✅ 基于langchain的react模式 ✅ 基于langchain完成tool检索 ✅ Web Crawl 通用能力 ✅ 技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ issue document ⬜ SDK Library Document ⬜ v0.0 Sandbox 环境 ✅ 环境隔离的sandbox环境与代码执行 ✅ 基于fastchat接入LLM ✅ 基于sentencebert接入Text Embedding ✅ Web Crawl 通用能力:技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ v0. - - - CodeFuse-ChatBot Development by Private Knowledge Augmentation - /zh/docs/codefuse-chatbot-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-chatbot-zh/ - 中文&nbsp | &nbspEnglish&nbsp DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。 📜 目录 🤝 介绍 🎥 演示视频 🧭 技术路线 🤝 介绍 💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 本项目核心差异技术、功能点: 🧠 智能调度核心: 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 使用说明 💻 代码整库分析: 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。 📄 文档分析增强: 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。 🔧 垂类专属知识: 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 🤖 垂类模型兼容: 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。 🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。接入Demo 👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。 🎥 演示视频 为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。 知识库导入和问答:演示视频 本地代码库导入和问答:演示视频 🧭 技术路线 🧠 Multi-Agent Schedule Core: 多智能体调度核心,简易配置即可打造交互式智能体。 🕷️ Multi Source Web Crawl: 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。 🗂️ Data Processor: 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。 🔤 Text Embedding &amp; Index::文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。 🗄️ Vector Database &amp; Graph Database: 向量与图数据库,提供灵活强大的数据管理解决方案。 📝 Prompt Control &amp; Management::Prompt 控制与管理,精确定义智能体的上下文环境。 🚧 SandBox::沙盒环境,安全地执行代码编译和动作。 💬 LLM::智能体大脑,支持多种开源模型和 LLM 接口。 🛠️ API Management:: API 管理工具,实现对开源组件和运维平台的快速集成。 具体实现明细见:技术路线明细 - - - CodeFuse-ChatBot Development by Private Knowledge Augmentation - /zh/docs/overview/codefuse-chatbot-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-chatbot-zh/ - 中文&nbsp | &nbspEnglish&nbsp DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。 📜 目录 🤝 介绍 🎥 演示视频 🧭 技术路线 🤝 介绍 💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 本项目核心差异技术、功能点: 🧠 智能调度核心: 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 使用说明 💻 代码整库分析: 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。 📄 文档分析增强: 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。 🔧 垂类专属知识: 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 🤖 垂类模型兼容: 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。 🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。接入Demo 👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。 🎥 演示视频 为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。 知识库导入和问答:演示视频 本地代码库导入和问答:演示视频 🧭 技术路线 🧠 Multi-Agent Schedule Core: 多智能体调度核心,简易配置即可打造交互式智能体。 🕷️ Multi Source Web Crawl: 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。 🗂️ Data Processor: 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。 🔤 Text Embedding &amp; Index::文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。 🗄️ Vector Database &amp; Graph Database: 向量与图数据库,提供灵活强大的数据管理解决方案。 📝 Prompt Control &amp; Management::Prompt 控制与管理,精确定义智能体的上下文环境。 🚧 SandBox::沙盒环境,安全地执行代码编译和动作。 💬 LLM::智能体大脑,支持多种开源模型和 LLM 接口。 🛠️ API Management:: API 管理工具,实现对开源组件和运维平台的快速集成。 具体实现明细见:技术路线明细 - - - CodeFuse-DevOps - /zh/docs/codefuse-devops/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops/ - CodeFuse-DevOps CodeFuse-DevOps - - - CodeFuse-DevOps-Eval - /zh/docs/codefuse-devops-eval-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops-eval-zh/ - codefuse-devops-eval codefuse-devops-eval - - - CodeFuse-DevOps-Eval - /zh/docs/overview/codefuse-devops-eval-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-devops-eval-zh/ - DevOps-Eval是一个专门为DevOps领域大模型设计的综合评估数据集。我们希望DevOps-Eval能够帮助开发者,尤其是DevOps领域的开发者,追踪进展并分析他们拥有的DevOps大模型的优势和不足之处。 📚 该仓库包含与DevOps和AIOps相关的问题和练习, 还添加了关于ToolLearning相关的样本。 💥 目前有 7486 个多项选择题,根据DevOps的通用流程将其归纳未8个模块,如下图所示。 🔥 AIOps样本总计 2840 个,覆盖的场景包括日志解析、时序异常检测、时序分类、时序预测和根因分析。 🔧 ToolLearning样本 1509 个,涵盖59个领域,总计 239 种工具类别。 🏆 排行榜 以下是我们获得的初版评测结果,包括多个开源模型的zero-shot和five-shot准确率。我们注意到,对于大多数指令模型来说,five-shot的准确率要优于zero-shot。 👀 DevOps Zero Shot 模型 plan code build test release deploy operate monitor 平均分 DevOpsPal-14B-Chat 60.61 78.35 84.86 84.65 87.26 82.75 69.89 79.17 78.23 DevOpsPal-14B-Base 54.55 77.82 83.49 85.96 86.32 81.96 71.18 82.41 78.23 Qwen-14B-Chat 60.61 75.4 85.32 84.21 89.62 82.75 69.57 80.56 77.18 Qwen-14B-Base 57.58 73.81 84.4 85. - - - CodeFuse-DevOps-Model - /zh/docs/codefuse-devops-model-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops-model-zh/ - codeFuse-devops-model codeFuse-devops-model - - - CodeFuse-DevOps-Model - /zh/docs/overview/codefuse-devops-model-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-devops-model-zh/ - codeFuse-devops-model DevOps-Model 是蚂蚁集团联合北京大学发布面向中文 DevOps 领域的大语言模型,通过收集 DevOps 领域相关的专业数据,再针对模型进行语言模型的加训和对齐训练,产出可以帮助工程师在整个开发运维生命周期提效的大模型。弥补当前大模型在 DevOps 领域的缺失,旨在做到有问题,问 DevOps-Model ! 当前我们已经开源了 7B 和 14B 两种规格的经过加训得 Base 模型和经过对齐后的 Chat 模型,同时还开源了对应的训练代码,欢迎大家一起合作建设! 项目地址 Github 地址:https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main ModelScope 地址: DevOps-Model-7B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Base/summary DevOps-Model-7B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Chat/summary DevOps-Model-14B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Base/summary DevOps-Model-14B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Chat/summary 评测考题 针对模型评测,最初并没有这样的一个 benchmark 用来 DevOps 领域进行测试,所以我们首先选用了一些通用开源测试中和 DevOps 领域相关的选择题进行测试,具体测试数据如下: 数据集 考试科目 题目总数 CMMLU Computer science 204 Computer security 171 Machine learning 122 CEval college programming 37 CEval computer_architecture 21 CEval computer_network 19 总计 总计题目数 574 评测方式 由于都是单选题,我们采用的是选取模型产出的第一个 Token 中四个选项 Token 中得分最高的作为模型对于问题的回答。同时我们还测试了 Zero-shot 和 Five-shot 的结果。 - - - CodeFuse-MFT-VLM - /zh/docs/overview/codefuse-mft-vlm/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-mft-vlm/ - CodeFuse-VLM CodeFuse-VLM 是一个多模态大语言模型框架,该框架为用户提供多种视觉编码器,模态对齐模块和大语言模型的选择,以适配用户对不同任务的需求。 随着huggingface开源社区的不断更新,会有更多的vision encoder 和 LLM 底座发布,这些vision encoder 和 LLM底座都有各自的强项,例如 code-llama 适合生成代码类任务,但是不适合生成中文类的任务;因此我们搭建了CodeFuse-VLM 框架,支持多种视觉模型和语言大模型,使得CodeFuse-VLM可以适应不同种类的任务。 我们在CodeFuse-VLM 框架下, 使用Qwen-VL的视觉编码器, cross attention模态对齐模块, 和 Qwen-14B 模型训练了 CodeFuse-VLM-14B CodeFuse-VLM-14B 在多个benchmarks 上的性能超过了Qwen-VL和LLAVA-1.5 各个模型得分如下表所示: 模型 MMBench MMBench-CN VqaV2 GQA TextVQA Vizwiz LLAVA-1.5 67.7 63.6 80.0 63.3 61.3 53.6 Qwen-VL 60.6 56.7 78.2 57.5 63.8 38.9 CodeFuse-VLM-14B 75.7 69.8 79.3 59.4 63.9 45.3 我们的模型在MMBenchmark 多模态大模型榜单上取得了很高的排名: https://mmbench.opencompass.org.cn/leaderboard 这是我们模型的展示视频 https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo - - - CodeFuse-ModelCache - /zh/docs/codefuse-modelcache-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-modelcache-zh/ - CodeFuse-ModelCache CodeFuse-ModelCache - - - CodeFuse-ModelCache - /zh/docs/overview/codefuse-modelcache-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-modelcache-zh/ - 中文 | English Contents 新闻 项目简介 架构大图 致谢 Contributing 新闻 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 [2023.10.31] codefuse-ModelCache&hellip; 项目简介 Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。 架构大图 致谢 本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。 GPTCache Contributing ModelCache是一个非常有趣且有用的项目,我们相信这个项目有很大的潜力,无论你是经验丰富的开发者,还是刚刚入门的新手,都欢迎你为这个项目做出一些贡献,包括但不限于:提交问题和建议,参与代码编写,完善文档和示例。你的参与将会使这个项目变得更好,同时也会为开源社区做出贡献。 - - - CodeFuse-Query - /zh/docs/codefuse-query-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-query-zh/ - CodeFuse-Query CodeFuse-Query - - - CodeFuse-Query - /zh/docs/overview/codefuse-query-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-query-zh/ - CodeFuse-Query 随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。 这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。 在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。 CodeQuery 的特点和优势可以概括为以下几点: 高度可扩展:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。 以数据为中心:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。 高度集成:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。 支持多元化的需求:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。 CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。 - - - CodeFuse-Query 介绍 - /zh/docs/codefuse-query-introduction-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-query-introduction-zh/ - 概述 CodeFuse-Query 是一个支持对 各种编程语言 进行 结构化分析 的 代码数据平台。核心思想是利用各种语言解析器将所有代码转化为数据,并将其结构化存储到代码数据库中。通过使用自定义查询语言,按照业务需求进行数据分析。如下图所示: 2.1 CodeFuse-Query的架构 从整体上来说,CodeFuse-Query代码数据平台分为三大部分:代码数据模型、代码查询DSL、平台产品化服务。主要工作流程如下图所示: 代码数据化和标准化:COREF 我们定义了一种代码数据化和标准化的模型:COREF,要求所有代码都要能通过各种语言抽取器转化到该模型。 COREF主要包含以下几种信息: COREF = AST (抽象语法树) + ASG(抽象语义图) + CFG(控制流图) + PDG(程序依赖图)+ Call Graph(函数调用图) + Class Hierarchy (类继承关系)+ Documentation(文档/注释信息) 注:由于每种信息的计算难度不一,所以并不是所有语言的COREF信息均包含以上全部信息,基础信息主要有AST、ASG、Call Graph、Class Hierarchy和Documentation,其他信息( CFG 和 PDG )仍在建设中,后续会逐步支持。 代码查询DSL 基于生成的COREF代码数据,CodeFuse-Query 使用一种自定义的DSL语言 Gödel 来进行查询,从而完成代码分析需求。 Gödel是一种逻辑推理语言,它的底层实现是基于逻辑推理语言Datalog,通过描述“事实”和“规则”, 程序可以不断地推导出新的事实。Gödel也是一个声明式语言,相较于命令式编程,声明式编程更加着重描述“要什么”,而把如何实现交给计算引擎。 既然代码已经转化为关系型数据(COREF数据以关系型数据表的形式存储),相信大家会有疑问,为什么不直接用SQL,或者是直接使用SDK,而是又要专门去学习一个新的DSL语言呢?因为Datalog的计算具备单调性和终止性,简单理解就是,Datalog是在牺牲了表达能力的前提下获得了更高的性能,而Gödel继承了这个特点。 相比较SDK,Gödel的主要优点是易学易用,声明式的描述,用户不需要关注中间的运算过程,只需要像SQL一样简单描述清楚需求即可。 相比较SQL,Gödel的优点主要是描述能力更强、计算速度更快,例如描述递归算法和多表联合查询,而这些对于SQL来说都是比较困难的。 平台化、产品化 CodeFuse-Query 包括Sparrow CLI 和CodeFuse-Query在线服务Query中心。Sparrow CLI包含了所有组件和依赖,例如抽取器,数据模型,编译器等,用户完全可以通过使用Sparrow CLI在本地进行代码数据生成和查询(Sparrow CLI的使用方式请见 第3节 安装、配置、运行)。如果用户有在线查询的需求,可以使用Query中心进行实验。 2.2 CodeFuse-Query支持的分析语言 截至2023-10-31为止,CodeFuse-Query支持对11种编程语言进行数据分析。其中对5种编程语言( Java、JavaScript、TypeScript、XML、Go )的支持度非常成熟,对剩余6种编程语言(Object-C、C++、Python3、Swift、SQL、Properties )的支持度处于beta阶段,还有进一步提升和完善的空间,具体的支持情况见下表: 语言 状态 COREF模型节点数 Java 成熟 162 XML 成熟 12 TS/JS 成熟 392 Go 成熟 40 OC/C++ beta 53/397 Python3 beta 93 Swift beta 248 SQL beta 750 Properties beta 9 注:以上语言状态的成熟程度判断标准是根据COREF包含的信息种类和实际落地情况来进行判定,除了OC/C++外,所有语言均支持了完整的AST信息和Documentation信息,以Java为例,COREF for Java还支持了ASG、Call Graph、Class Hierarchy、以及部分CFG信息。 - - - CodeFuseEval: 代码大语言模型的多任务评估基准 - /zh/docs/overview/b10.codefuse-evalution/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/b10.codefuse-evalution/ - English| CodeFuseEval on ModelScope| CodeFuseEval on Hugging Face CodeFuseEval在HumanEval-x、MBPP的基准上,结合CodeFuse大模型多任务场景,开发的编程领域多任务的评测基准, 可用于评估模型在代码补全,自然语言生成代码,测试用例生成、跨语言代码翻译,中文指令生成代码等多类任务的性能。持续开放中,敬请期待! - - - FasterTransformer4CodeFuse - /zh/docs/fastertransformer4codefuse-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/fastertransformer4codefuse-zh/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - FasterTransformer4CodeFuse - /zh/docs/overview/fastertransformer4codefuse-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/fastertransformer4codefuse-zh/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - MFTCoder - /zh/docs/mftcoder-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/mftcoder-zh/ - MFTCoder MFTCoder - - - MFTCoder 介绍 - /docs/mftcoder-introduction-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-introduction-zh/ - 项目简介 国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架; Codefuse-MFTCoder 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。 项目框架 项目优势 :white_check_mark: 多任务:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去; :white_check_mark: 多模型:支持最新的多个开源模型,包括gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2等; :white_check_mark: 多框架:既支持主流开源的Accelerate+DeepSpeed/FSDP,也支持新开源的ATorch 框架; :white_check_mark: 高效微调:支持LoRA和QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景; 本项目主要内容如下: 同时支持单任务SFT(Supervised FineTuning)和MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等 支持QLoRA低成本高效指令微调、LoRA高效指令微调、全量参数高精度微调。 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA等。 支持lora与base model进行权重合并,推理更便捷。 整理并开源2个指令微调数据集:Evol-instruction-66k和CodeExercise-Python-27k。 开源多个[Codefuse系列指令微调模型权重],具体参见我们的huggingface组织和modelscope组织下的模型:codefuse-ai huggingface or codefuse-ai 魔搭。 - - - MFTCoder: Accelerate + DeepSpeed/FSDP 框架篇 - /docs/mftcoder-accelerate-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-accelerate-zh/ - [中文] [English] 1. 更新 🔥 MFTCoder-accelerate 新增支持accelerate + FSDP框架, 支持全量微调和LoRA; 🔥 MFTCoder-accelerate 支持最新更多主流开源模型: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; 🔥 MFTCoder-accelerate 新增self-paced Loss, 用于收敛均衡; 🔥 MFTCoder-accelerate 支持使用accelerate + DeepSpeed框架下支持 全量参数/QLoRA/LoRA微调; 🔥 MFTCoder-accelerate 在训练中支持了多任务微调MFT, 可以同时平衡多个任务的训练,训练的模型支持多任务推理; 🔥 MFTCoder-accelerate 在训练中支持多种模型基座: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen等 2. 数据格式 2.1 训练数据格式 训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 可以参考项目中的xxx.jsonl文件。 { &#34;id&#34;:0, &#34;data_name&#34;:&#34;code-helper&#34;, &#34;chat_rounds&#34;:[ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;你是一个智能代码助手,可以回复用户与代码相关的问题&#34; }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;写一个快速排序&#34; }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;以下是一个快速排序算法xxxxxx&#34; }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;解释一下这段代码&#34; }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;好的,这段代码xxx&#34; } ] } 2. - - - MFTCoder: 高效准确的多任务大模型微调框架 - /zh/docs/overview/mftcoder-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/mftcoder-zh/ - 🤗 HuggingFace • 🤖 魔搭 [中文] [English] 目录 新闻 文章 项目简介 环境 训练 模型 数据集 新闻 🔥🔥🔥 [2024/01/17] MFTCoder-v0.3.0发布。新增对Mixtral(MoE), DeepSeek等模型的支持;新增支持FSDP(Fully Sharded Data Parallel);新增Self-paced Loss, 支持多任务收敛均衡。 感兴趣详见微信公众号CodeFuse的文章MFTCoder 重磅升级v0.3.0发布 🔥🔥🔥 [2024/01/17] 开源了CodeFuse-DeepSeek-33B模型,在HumanEval pass@1(greedy decoding)上可以达到78.7%。该模型在Big Code榜单的结果近期发布,请关注公众号获取最新信息。 🔥🔥🔥 [2024/01/17] 开源了CodeFuse-Mixtral-8x7B模型,在HumanEval pass@1(greedy decoding)上可以达到56.1%。感兴趣详见微信公众号CodeFuse的文章MFTCoder提升Mixtral-8x7B混合专家模型的代码能力实践 🔥🔥 [2023/11/07] MFTCoder论文在Arxiv公布,介绍了多任务微调的技术细节。 🔥🔥 [2023/10/20] 开源了CodeFuse-QWen-14B模型,在HumanEval pass@1(greedy decoding)上可以达到48.8%。相比较与基座模型Qwen-14b提升16%。感兴趣详见微信公众号CodeFuse文章 🔥🔥 [2023/09/27] 开源了CodeFuse-StarCoder-15B模型,在HumanEval pass@1(greedy decoding)上可以达到54.9%。 🔥🔥 [2023/09/26] CodeFuse-CodeLlama-34B-4bits量化版本发布,量化后模型在HumanEval pass@1指标为73.8% (贪婪解码)。 🔥🔥 [2023/09/07]MFTCoder微调的模型CodeFuse-CodeLlama-34B在HumanEval Benchmarks的Python Pass@1 取得了74.4%(greedy decoding)的开源SOTA成绩。 🔥🔥 [2023/08/26]MFTCoder-v0.1.0 支持使用LoRA/QLoRA对Code Llama、Llama、Llama2、StarCoder、ChatGLM2、CodeGeeX2、Qwen和GPT-NeoX模型进行微调。 HumanEval表现 模型 HumanEval(Pass@1) 日期 CodeFuse-DeepSeek-33B 78. - - - MFTCoder训练: Atorch框架篇 - /docs/mftcoder-atorch-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-atorch-zh/ - [中文] [English] 1. 更新 🔥 MFTCoder在Atorch框架下支持GPTNeoX模型的微调; 🔥 MFTCoder支持全量的有监督微调; 🔥 MFTCoder支持LoRA微调; 2. 数据格式 2.1 训练数据格式 训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 可以参考项目中的xxx.jsonl文件。 { &#34;id&#34;:0, &#34;data_name&#34;:&#34;code-helper&#34;, &#34;chat_rounds&#34;:[ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;你是一个智能代码助手,可以回复用户与代码相关的问题&#34;, &#34;chat_round_id&#34;: 0 }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;写一个快速排序&#34;, &#34;chat_round_id&#34;: 1 }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;以下是一个快速排序算法xxxxxx&#34;, &#34;chat_round_id&#34;: 1 }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;解释一下这段代码&#34;, &#34;chat_round_id&#34;: 2 }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;好的,这段代码xxx&#34;, &#34;chat_round_id&#34;: 2 } ] } 2.2 推理数据格式 推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式: &#34;&#34;&#34; &lt;|role_start|&gt;system&lt;|role_end|&gt;这是System指令 &lt;|role_start|&gt;human&lt;|role_end|&gt;这是第1轮用户输入的问题 &lt;|role_start|&gt;bot&lt;|role_end|&gt;这是第1轮模型生成的内容&lt;/s&gt; &lt;|role_start|&gt;human&lt;|role_end|&gt;这是第2轮用户输入的问题 &lt;|role_start|&gt;bot&lt;|role_end|&gt;这是第2轮模型生成的内容&lt;/s&gt; . - - - QuickStart - /docs/codefuse-modelcache-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-quickstart-zh/ - ModelCache易于使用,只需1步骤即可构建缓存测试Demo 快速开始 构建Cache Cache的默认接口如下所示: class Cache: # it should be called when start the cache system def __init__(self): self.has_init = False self.cache_enable_func = None self.embedding_func = None self.post_process_messages_func = None self.config = Config() 在创建ModelCache之前,请考虑以下问题: 你将如何为查询生成嵌入向量?(embedding_func) 该函数将文本嵌入到一个用于上下文相似性搜索的密集向量中。ModelCache可以支持多种嵌入上下文的方法:Huggingface、ONNX和SentenceTransformers。默认逻辑中,使用了在中文领域表现更好的huggingface中的text2vec模型。只需将你的嵌入函数初始化为:text2vec.to_embeddings data_manager = get_data_manager(CacheBase(&#34;mysql&#34;, config=mysql_config), VectorBase(&#34;milvus&#34;, dimension=data2vec.dimension, milvus_config=milvus_config)) cache.init( embedding_func=data2vec.to_embeddings, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), query_pre_embedding_func=query_multi_splicing, insert_pre_embedding_func=insert_multi_splicing, ) 你将在哪里缓存数据?(data_manager缓存存储) 缓存存储用于存储所有标量数据,例如原始问题、提示、答案和访问时间。ModelCache支持多种缓存存储选项,如SQLite、MySQL和OceanBase。未来还将添加更多的NoSQL数据库选项。 你将在哪里存储和搜索向量嵌入?(data_manager向量存储) 向量存储组件用于存储和搜索所有嵌入向量,以便在语义上找到最相似的结果。ModelCache支持使用FAISS等向量搜索库或Milvus等向量数据库。未来还将添加更多的向量数据库和云服务选项。 以下是一些示例: data_manager = get_data_manager(CacheBase(&#34;sqlite&#34;), VectorBase(&#34;faiss&#34;, dimension=data2vec.dimension)) data_manager = get_data_manager(CacheBase(&#34;oceanbase&#34;), VectorBase(&#34;milvus&#34;, dimension=data2vec.dimension)) - - - QuickStart - /docs/mftcoder-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-quickstart-zh/ - 环境 首先, 你需要将CUDA(&gt;=11.4, 推荐11.7)及其相关驱动安装成功,并确保其工作正常, 并且安装基本的torch(&gt;=2.0.0) 在requirements.txt下固定了几个主要的python包的版本,执行如下脚本即可: sh init_env.sh 我们强烈建议您安装flash attention(&gt;=2.1.0, 推荐2.3.6), 安装请参考 https://github.com/Dao-AILab/flash-attention 训练 如果你熟悉大模型训练的各种主流开源资源,例如 transformers, DeepSpeed, FSDP等, 为了用开源项目快速上手高性能微调,我们建议您尝试: 🚀🚀 MFTCoder-accelerate: Accelerate + DeepSpeed/FSDP Codebase for MFT(Multi-task Finetuning) 如果你想探索一些新兴的训练框架,可以尝试: 🚀 MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning) 模型 使用本项目的训练代码,以及上述训练数据,我们训练并在huggingface, modelscope开源了以下模型。 模型 HuggingFace链接 魔搭 链接 基座模型 训练数据 Batch Size Seq Length 🔥🔥🔥 CodeFuse-DeepSeek-33B h-link m-link DeepSeek-coder-33B 60万 80 4096 🔥🔥🔥 CodeFuse-Mixtral-8x7B h-link m-link Mixtral-8x7B 60万 80 4096 🔥🔥🔥 CodeFuse-CodeLlama-34B h-link m-link CodeLlama-34b-Python 60万 80 4096 🔥🔥🔥 CodeFuse-CodeLlama-34B-4bits h-link m-link CodeLlama-34b-Python 4096 🔥🔥🔥 CodeFuse-StarCoder-15B h-link m-link StarCoder-15B 60万 80 4096 🔥🔥🔥 CodeFuse-QWen-14B h-link m-link Qwen-14b 110万 256 4096 🔥🔥🔥 CodeFuse-CodeGeex2-6B h-link m-link CodeGeex2-6B 110万 256 4096 数据集 目前本项目主要整理了如下指令数据集,并将其整理成统一的数据格式,这两个指令微调数据集是我们多任务训练中数十个任务中的2个,未来我们会陆续开源更多的代码任务指令微调数据集: - - - Test-Agent - /zh/docs/test-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/test-agent-zh/ - Test-Agent Test-Agent - - - Test-Agent: 您的智能测试助理 - /zh/docs/overview/test-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/test-agent-zh/ - 本地Mac M1体验效果 魔搭体验效果 魔搭模型访问链接:ModelScope TestGPT-7B 什么是Test Agent?(Introduction) Test Agent 旨在构建测试领域的“智能体”,融合大模型和质量领域工程化技术,促进质量技术代系升级。我们期望和社区成员一起合作,打造创新的测试领域解决方案,构建24小时在线的测试助理服务,让测试如丝般顺滑。 本期特性(Features) 模型 本期我们开源了测试领域模型TestGPT-7B。模型以CodeLlama-7B为基座,进行了相关下游任务的微调: 多语言测试用例生成(Java/Python/Javascript) 一直以来都是学术界和工业界非常关注的领域,近年来不断有新产品或工具孵化出来,如EvoSuite、Randoop、SmartUnit等。然而传统的用例生成存在其难以解决的痛点问题,基于大模型的测试用例生成在测试用例可读性、测试场景完整度、多语言支持方面都优于传统用例生成工具。本次重点支持了多语言测试用例生成,在我们本次开源的版本中首先包含了Java、Python、Javascript的测试用例生成能力,下一版本中逐步开放Go、C++等语言。 测试用例Assert补全 对当前测试用例现状的分析与探查时,我们发现代码仓库中存在一定比例的存量测试用例中未包含Assert。没有Assert的测试用例虽然能够在回归过程中执行通过,却无法发现问题。因此我们拓展了测试用例Assert自动补全这一场景。通过该模型能力,结合一定的工程化配套,可以实现对全库测试用例的批量自动补全,智能提升项目质量水位。 工程框架 本地模型快速发布和体验工程化框架 ChatBot页面 模型快速启动 私有化部署,本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100%安全 后续我们会持续迭代模型和工程化能力: 不断加入更多令人激动的测试域应用场景,如领域知识问答、测试场景分析等 支撑面向测试场景的copilot 工程框架开放,如测试领域知识智能embedding、测试通用工具API体系、智能测试Agent等,敬请期待! 以7B为基础,逐步扩展至13B、34B模型。欢迎关注! 性能最强的7B测试领域大模型(Model) 目前在TestAgent中,我们默认使用了TestGPT-7B模型。与当前已有开源模型相比,TestGPT-7B模型在用例执行通过率(pass@1)、用例场景覆盖(平均测试场景数)上都处于业界领先水平。 TestGPT-7B模型核心能力的评测结果如下: 多语言测试用例生成 针对模型支持的三种语言:Java、Python、Javascript,Pass@1评测结果如下: Model Java pass@1 Java Average number of test scenarios Python pass@1 Python Average number of test scenarios Javascript pass@1 Javascript Average number of test scenarios TestGPT-7B 48.6% 4.37 35.67% 3.56 36% 2.76 CodeLlama-13B-Instruct 40.54% 1.08 30.57% 1.65 31.7% 3. - - - VSCode插件 - /docs/codefuse-query-toolchain-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-toolchain-zh/ - 开发插件(VSCode) 安装 从VSCode官方插件市场安装(推荐) 插件地址 使用VSIX安装包安装 下载插件 手动从 vsix 安装: 或者使用指令直接从终端安装: code --install-extension [扩展vsix文件路径] 环境准备 Sparrow CLI ,参照 3 安装、配置、运行 扩展特性 本扩展提供了以下功能模块: COREF AST Viewer Gödel Language Server Gödel Language Runner COREF AST Viewer 以下功能需要在扩展设置中设置相关项后启用。目前仅支持于Java语言 Java 文件转成树状的 COREF Node Node 与代码位置的相互定位 在Lib API Viewer 查看 Node 的API,Node 复制 Lib API Viewer:查询与复制使用 Gödel Language Server Features 以下功能均需要在设置扩展后启用。不设置相关项的情况下,语法高亮仍然可用。 错误信息提示 错误信息会随着代码的更新而自动更新。 符号信息提示和补全 包含local变量和全局符号信息的补全提示,关键字等信息会提供对应的使用样例,全局符号信息会提供更详细的内部信息,如包含的成员变量、成员方法、静态方法。 关键字补全和使用样例提示 local 变量类型信息和符号补全 . 跟随的符号信息和补全 :: 跟随的符号信息和补全 注解使用样例提示 全局符号类型信息 (内部结构,成员方法,静态方法) 跳转到定义 可以通过右键跳转定义或者ctrl/command+left click直接跳转到准确的符号定义位置。 - - - 本地私有化&大模型接口接入 - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - 中文&nbsp | &nbspEnglish&nbsp 本地私有化/大模型接口接入 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。 本地私有化模型接入 模型地址配置示例,model_config.py配置修改 # 建议:走huggingface接入,尽量使用chat模型,不要使用base,无法获取正确输出 # 注意:当llm_model_dict和VLLM_MODEL_DICT同时存在时,优先启动VLLM_MODEL_DICT中的模型配置 # llm_model_dict 配置接入示例如下 # 1、若把模型放到 ~/codefuse-chatbot/llm_models 路径下 # 若模型地址如下 model_dir: ~/codefuse-chatbot/llm_models/THUDM/chatglm-6b # 参考配置如下 llm_model_dict = { &#34;chatglm-6b&#34;: { &#34;local_model_path&#34;: &#34;THUDM/chatglm-6b&#34;, &#34;api_base_url&#34;: &#34;http://localhost:8888/v1&#34;, # &#34;name&#34;修改为fastchat服务中的&#34;api_base_url&#34; &#34;api_key&#34;: &#34;EMPTY&#34; } } VLLM_MODEL_DICT = { &#39;chatglm2-6b&#39;: &#34;THUDM/chatglm-6b&#34;, } # or 若模型地址如下 model_dir: ~/codefuse-chatbot/llm_models/chatglm-6b llm_model_dict = { &#34;chatglm-6b&#34;: { &#34;local_model_path&#34;: &#34;chatglm-6b&#34;, &#34;api_base_url&#34;: &#34;http://localhost:8888/v1&#34;, # &#34;name&#34;修改为fastchat服务中的&#34;api_base_url&#34; &#34;api_key&#34;: &#34;EMPTY&#34; } } VLLM_MODEL_DICT = { &#39;chatglm2-6b&#39;: &#34;chatglm-6b&#34;, } # 2、若不想移动相关模型到 ~/codefuse-chatbot/llm_models # 同时删除 `模型路径重置` 以下的相关代码,具体见model_config. - - - 查询语言介绍 - /docs/codefuse-query-godellanguage-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-godellanguage-zh/ - GödelScript 查询语言 目录 GödelScript 基本概念和语法 简介 基本程序构成 基础类型和编译器内建函数 函数 语句 Schema 数据库 Trait Import Query Ungrounded Error: 未赋值/未绑定错误 查询示例 Java Python JavaScript XML Go 查询调试和优化技巧 Schema 传参导致笛卡尔积过大 多层 for 导致笛卡尔积过大 不要滥用@inline 在本机使用查询脚本流程 GödelScript 基本概念和语法 简介 // script fn hello(greeting: string) -&gt; bool { return greeting = &#34;hello world!&#34; } fn main() { output(hello()) } GödelScript 即 Gödel 查询语言。GödelScript 是 CodeQuery 用于查询和数据处理的领域专用语言 (DSL)。GödelScript 使用了类 Rust 的语法,提供了严格的类型检查、方便快捷的类型推导、智能友好的错误提示信息,使用户能够快速上手。 GödelScript 编译器主要应用场景为: 面向用户编写简单或复杂查询,提供更便捷的写法,提高编写查询的效率; 提供严格类型检查与类型推导,给予更智能的代码修改提示; 提供严格的 ungrounded(未赋值/未绑定) 检测,避免触发 Soufflé Ungrounded Error; Language Server 以及 IDE Extension 支持。 基本程序构成 程序结构 GödelScript 程序可能包含: - - - 概览 - /zh/docs/zh_overview/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/zh_overview/ - HuggingFace | 魔搭社区 | 产品主页 Hello World! This is CodeFuse! CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs),涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。 我们非常有激情去构建创新的解决方案来支持全生命周期AI驱动的软件开发,如上图所示。同时,我们也诚邀志同道合的工程师和研究人员加入这个社区,共同构建和增强CodeFuse。 - - - 功能特性 - /docs/codefuse-modelcache-feature-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-feature-zh/ - 功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 模块 功能 ModelCache GPTCache 基础接口 数据查询接口 &#9745; &#9745; 数据写入接口 &#9745; &#9745; Embedding embedding模型配置 &#9745; &#9745; 大模型embedding层 &#9745; bert模型长文本处理 &#9745; Large model invocation 是否与大模型解耦 &#9745; embeddingg模型本地加载 &#9745; 数据隔离 模型数据隔离 &#9745; &#9745; 超参数隔离 数据库 MySQL &#9745; &#9745; Milvus &#9745; &#9745; OceanBase &#9745; 会话管理 单轮回话 &#9745; &#9745; system指令 &#9745; 多轮回话 &#9745; 数据管理 数据持久化 &#9745; &#9745; 一键清空缓存 &#9745; 租户管理 支持多租户(多模型) &#9745; milvus多表能力 &#9745; 其他 长短对话区分能力 &#9745; 核心功能 在ModelCache中,沿用了GPTCache的主要思想,包含了一系列核心模块:adapter、embedding、similarity和data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将embedding、similarity、data_manager等模块串联起来;embedding模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank模块用于对召回的向量进行相似度排序和评估;data_manager模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: 架构调整(轻量化集成):以类redis的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰LLM调用和安全审核等功能,适配所有大模型服务。 多种模型加载方案: 支持加载本地embedding模型,解决huggingface网络连通问题 支持加载多种预训练模型embeding层 数据隔离能力 环境隔离:可依据环境,拉取不同的数据库配置,实现环境隔离(开发、预发、生产) 多租户数据隔离:根据模型动态创建collection,进行数据隔离,用于大模型产品中多个模型/服务数据隔离问题 支持系统指令:采用拼接的方式,解决propmt范式中sys指令问题。 长短文本区分:长文本会给相似评估带来更多挑战,增加了长短文本的区分,可单独配置判断阈值。 milvus性能优化:milvus consistency_level调整为&quot;Session&quot;级别,可以得到更好的性能。 数据管理能力: 一键清空缓存的能力,用于模型升级后的数据管理。 召回hitquery,用于后续的数据分析和模型迭代参考。 异步日志回写能力,用于数据分析和统计 增加model字段和数据统计字段,用于功能拓展。 未来会持续建设的功能: - - - 快速开始 - /zh/docs/codefuse-chatbot-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-chatbot-quickstart-zh/ - 中文&nbsp | &nbspEnglish&nbsp 🚀 快速使用 如需使用私有化模型部署,请自行安装 nvidia 驱动程序,本项目已在 Python 3.9.18,CUDA 11.7 环境下,Windows、X86 架构的 macOS 系统中完成测试。 Docker安装、私有化LLM接入及相关启动问题见:快速使用明细 python 环境准备 推荐采用 conda 对 python 环境进行管理(可选) # 准备 conda 环境 conda create --name devopsgpt python=3.9 conda activate devopsgpt 安装相关依赖 cd codefuse-chatbot pip install -r requirements.txt 基础配置 # 修改服务启动的基础配置 cd configs cp model_config.py.example model_config.py cp server_config.py.example server_config.py # model_config#11~12 若需要使用openai接口,openai接口key os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; # 可自行替换自己需要的api_base_url os.environ[&#34;API_BASE_URL&#34;] = &#34;https://api.openai.com/v1&#34; # vi model_config#LLM_MODEL 你需要选择的语言模型 LLM_MODEL = &#34;gpt-3. - - - 快速开始 - /docs/codefuse-query-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-quickstart-zh/ - 安装、配置、运行 硬件和软件要求 硬件:4C8G 环境要求:java 1.8 和 python3.8 以上执行环境, 请保证 java python 可执行环境 Sparrow 安装步骤和指导 CodeFuse-Query 下载包是一个 zip 存档,其中包含工具、脚本和各种特定于 CodeFuse-Query 的文件。如果您没有 CodeFuse-Query 许可证,那么下载此存档即表示您同意 CodeFuse-Query 条款和条件。 目前仅支持 mac,linux 系统下使用 CodeFuse-Query,下载地址为:(目前仅给出示例,开源后给出正式下载地址) mac: CodeFuse-Query 2.0.0 linux: CodeFuse-Query 2.0.0 您应该始终使用 CodeFuse-Query 捆绑包,确保版本兼容性 Tips: mac系统下直接下载软件包会提示需要验证开发者 可在安全性设置中进行修改验证 点击仍然允许 详细步骤可参照:Mac 官方文档: 如何在 Mac 上安全地打开 App 或使用xattr -d com.apple.quarantine命令,删除 CodeFuse-Query 被 macOS 赋予的外部属性 xattr -d com.apple.quarantine是一个命令行指令,用于删除文件的 com.apple.quarantine 扩展属性。该扩展属性是 macOS 系统用来标记从外部来源下载的文件或应用程序的属性,以确保安全性。 xattr -d com.apple.quarantine path/to/file 配置和初始化 CodeFuse-Query 开发环境 解压缩:命令行解压或者直接点一下解压缩即可 需要具备 java8 和 python3. - - - 快速使用 - /zh/docs/codefuse-evalution-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-evalution-quickstart-zh/ - 推理环境: CodeFuse-13B: python 3.8及以上版本,pytorch 2.0及以上版本,transformers 4.24.0及以上版本,CUDA 11.4及以上; CodeFuse-CodeLlama-34B: python 3.8及以上版本,pytorch2.0及以上版本,transformers==4.32.0 ,Sentencepiece,CUDA 11.4及以上。 评测执行环境 评测生成的代码需要使用多种语言编译、运行。我们使用的各编程语言依赖及所用包的版本如下: 依赖 版本 Python 3.10.9 JDK 18.0.2.1 Node.js 16.14.0 js-md5 0.7.3 C++ 11 g++ 7.5.0 Boost 1.75.0 OpenSSL 3.0.0 go 1.18.4 cargo 1.71.1 为了省去使用者配置这些语言环境的麻烦,我们构建了一个Docker镜像,并在其中配置了所需要的环境,你可以按照下面的指令拉取使用 docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest 如果您熟悉Dockerfile,也可以从codefuseEval/docker/Dockerfile构建镜像,或者修改之以定制自己的配置: cd codefuseEval/docker docker build [OPTIONS] . 获取镜像后,使用如下命令创建容器: docker run -it --gpus all --mount type=bind,source=&lt;LOCAL PATH&gt;,target=&lt;PATH IN CONTAINER&gt; [OPTIONS] &lt;IMAGE NAME:TAG&gt; 检查推理结果指令 我们提供脚本来检查所提供代码 LLM 的结果。请使用以下脚本检查相应的推理结果。 bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python bash codefuseEval/script/check_reference. - - - 快速使用 - /zh/docs/codefuse-mft-vlm/%E5%BF%AB%E9%80%9F%E4%BD%BF%E7%94%A8/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-mft-vlm/%E5%BF%AB%E9%80%9F%E4%BD%BF%E7%94%A8/ - Contents Install Datasets Multimodal Alignment Visual Instruction Tuning Evaluation Install 请执行 sh init_env.sh Datasets 使用了以下数据集训练模型: 数据集 任务种类 样本量 synthdog-en OCR 800,000 synthdog-zh OCR 800,000 cc3m(downsampled) Image Caption 600,000 cc3m(downsampled) Image Caption 600,000 SBU Image Caption 850,000 Visual Genome VQA (Downsampled) Visual Question Answer(VQA) 500,000 Visual Genome Region descriptions (Downsampled) Reference Grouding 500,000 Visual Genome objects (Downsampled) Grounded Caption 500,000 OCR VQA (Downsampled) OCR and VQA 500,000 请到各个数据集的官网上下载这些数据。 Multimodal Alignment 请执行 sh scripts/pretrain. - - - 快速使用 - /docs/codefuse-devops-model-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-quickstart-zh/ - 依赖安装 需要先 PIP 安装一下 Github 地址下的 requirement.txt 中的包,可以参考一下代码 pip install -r requirements.txt 模型下载 模型下载相关信息如下: 🤗 Huggingface 地址 - 基座模型 对齐模型 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat 🤖 ModelScope 地址 - 基座模型 对齐模型 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat 找到自己想要下载的 Chat 模型版本,当前提供了 7B 和 14B 的模型 模型使用 根据以下代码来和 Chat 模型进行交互 from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained(&#34;path_to_DevOps-Model-Chat&#34;, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(&#34;path_to_DevOps-Model-Chat&#34;, device_map=&#34;auto&#34;, trust_remote_code=True, bf16=True).eval() # 指定 generation_config model. - - - 快速使用 - /docs/test-agent-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/test-agent-quickstart-zh/ - 快速使用(QuickStart) 前置准备 模型下载 您可在modelscope或huggingface上获取到模型的详细信息并下载模型文件。 需要注意的是: 1)如果您通过modelscope下载模型,下载方式可参考:下载说明; 2)如果您通过huggingface下载模型,请确保您可以正常访问huggingface。 环境安装 python&gt;=3.8 transformers==4.33.2 git clone https://github.com/codefuse-ai/Test-Agent cd Test-Agent pip install -r requirements.txt 在开始运行TestGPT-7B模型之前,请确保你的执行环境拥有大约14GB的显存。 启动服务 项目提供了网页端快速搭建UI的能力能够更直观的展示模型交互和效果,我们可以使用简单的几个命令把前端页面唤醒并实时调用模型能力。在项目目录下,依次启动以下服务: 1.启动controller python3 -m chat.server.controller 2.启动模型worker python3 -m chat.server.model_worker &ndash;model-path models/TestGPT-7B &ndash;device mps (models/TestGPT-7B 为实际模型文件路径) 对于启动方式,可以按需选择以下几种配置选项: &ndash;device mps 用于在Mac电脑上开启GPU加速的选项(Apple Silicon或AMD GPUs); &ndash;device xpu 用于在Intel XPU上开启加速的选项(Intel Data Center and Arc A-Series GPUs); 需安装Intel Extension for PyTorch 设置OneAPI环境变量:source /opt/intel/oneapi/setvars.sh &ndash;device npu 用于在华为AI处理器上开启加速的选项; 需安装Ascend PyTorch Adapter 设置CANN环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh &ndash;device cpu 单独使用CPU运行的选项,不需要GPU; &ndash;num-gpus 2 指定并发gpu运行的选项。 启动web服务 python3 -m chat. - - - 评测 - /zh/docs/codefuse-devops-eval-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops-eval-quickstart-zh/ - 🚀 如何进行测试 如果需要在自己的 HuggingFace 格式的模型上进行测试的话,总的步骤分为如下几步: 编写 Model 的 loader 函数 编写 Model 的 context_builder 函数 注册模型到配置文件中 执行测试脚本 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 1. 编写 loader 函数 模型加载时还需要做一些额外的处理(e.g. tokenizer 调整),需要继承 ModelAndTokenizerLoader 类来覆写对应的 load_model 和 load_tokenizer 函数, 如下所示: class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass @override def load_model(self, model_path: str): # Implementation of the method pass @override def load_tokenizer(self, model_path: str): # Implementation of the method pass 2. 编写 Model 的 context_builder 函数 如果输入需要转换为特定的格式(e. - - - 启动明细 - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - 中文&nbsp | &nbspEnglish&nbsp 如需使用私有化模型部署,请自行安装 nvidia 驱动程序。。 python 环境准备 推荐采用 conda 对 python 环境进行管理(可选) # 准备 conda 环境 conda create --name devopsgpt python=3.9 conda activate devopsgpt 安装相关依赖 cd codefuse-chatbot # python=3.9,notebook用最新即可,python=3.8用notebook=6.5.6 pip install -r requirements.txt 沙盒环境准备 windows Docker 安装: Docker Desktop for Windows 支持 64 位版本的 Windows 10 Pro,且必须开启 Hyper-V(若版本为 v1903 及以上则无需开启 Hyper-V),或者 64 位版本的 Windows 10 Home v1903 及以上版本。 【全面详细】Windows10 Docker安装详细教程 Docker 从入门到实践 Docker Desktop requires the Server service to be enabled 处理 安装wsl或者等报错提示 Linux Docker 安装: Linux 安装相对比较简单,请自行 baidu/google 相关安装 - - - 数据 - /zh/docs/%E6%95%B0%E6%8D%AE%E4%BB%8B%E7%BB%8D/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/%E6%95%B0%E6%8D%AE%E4%BB%8B%E7%BB%8D/ - ⏬ 数据 下载 方法一:下载zip压缩文件(你也可以直接用浏览器打开下面的链接): wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip 然后可以使用 pandas加载数据: import os import pandas as pd File_Dir=&#34;devopseval-exam&#34; test_df=pd.read_csv(os.path.join(File_Dir,&#34;test&#34;,&#34;UnitTesting.csv&#34;)) 方法二:使用Hugging Face datasets直接加载数据集。示例如下: from datasets import load_dataset dataset=load_dataset(r&#34;DevOps-Eval/devopseval-exam&#34;,name=&#34;UnitTesting&#34;) print(dataset[&#39;val&#39;][0]) # {&#34;id&#34;: 1, &#34;question&#34;: &#34;单元测试应该覆盖以下哪些方面?&#34;, &#34;A&#34;: &#34;正常路径&#34;, &#34;B&#34;: &#34;异常路径&#34;, &#34;C&#34;: &#34;边界值条件&#34;,&#34;D&#34;: 所有以上,&#34;answer&#34;: &#34;D&#34;, &#34;explanation&#34;: &#34;&#34;} ``` 方法三:使用modelscope下载相关所有数据。示例如下: from modelscope.msdatasets import MsDataset MsDataset.clone_meta(dataset_work_dir=&#39;./xxx&#39;, dataset_id=&#39;codefuse-ai/devopseval-exam&#39;)``` 👀 说明 为了方便使用,我们已经整理出了 55 个细分类别以及它们的中英文名称。具体细节请查看 category_mapping.json 。格式如下: { &#34;UnitTesting.csv&#34;: [ &#34;unit testing&#34;, &#34;单元测试&#34;, {&#34;dev&#34;: 5, &#34;test&#34;: 32} &#34;TEST&#34; ], ... &#34;file_name&#34;:[ &#34;英文名称&#34;, &#34;中文名称&#34;, &#34;样本数量&#34;, &#34;类别(PLAN,CODE,BUILD,TEST,RELEASE,DEPOLY,OPERATE,MONITOR八选一)&#34; ] } 每个细分类别由两个部分组成:dev 和 test。每个细分类别的 dev 集包含五个示范实例以及为 few-shot 评估提供的解释。而 test 集则用于模型评估,并且test数据已包含准确标签。 - - - 训练解析 - /docs/codefuse-devops-model-train-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-train-zh/ - 训练流程 根据查阅文献可知,大部分领域模型都是在对话模型的基础上,通过SFT微调来进行知识注入。而SFT微调所需要QA预料基本都来自于ChatGPT生成。然而,该方案可能存在QA语料无法完全覆盖领域知识的情况。 因此,DevOps-Model采用的是预训练加训 + SFT微调的方案,如图2.1所示。我们认为针对领域大模型,预训练的加训是必要的,因为其可以将领域内的一些知识在预训练阶段注入到大模型,如果这些知识在通用大模型预训练时没有出现过,那会让大模型学习到新的知识;如果出现过,就可以让大模型进一步加深印象。第二步则是大模型对齐,目的是让大模型可以根据问题来回答最合适的内容。 训练数据 数据收集 模型的定位是中文 DevOps 领域大模型,因此收集与中文DevOps相关的预训练数据和QA数据。 预训练数据主要来自互联网技术博客、技术文档、技术书籍等,最终收集到了 50G+ 的预训练语料数据; 针对 QA 数据,我们的目的是想让模型不但对齐到通用的问答能力,而且针对 DevOps 领域也可以学会如何更好的回答问题,因此不但收集了通用领域的单轮和多轮对话数据,还针对 DevOps 领域,通过爬取和 ChatGPT 生成的方式产出了属于 DevOps 领域的问答数据。最终我们精心筛选了约 200K 的 QA 数据进行 SFT微调训练,具体数据量如下表所示。 数据类型 数据量级 通用单轮 QA 50K 通用多轮 QA 20K DevOps 领域 QA 130K 数据筛选 由于预训练数据大部分是从互联网上收集的数据,质量会参差不齐,而大模型训练中数据是最重要的一环,我们建立了如上图所示的清洗 Pipeline,来针对收集到的数据进行质量的全面过滤。 首先,由专家经验和人工筛选,总结出来了一批文档级别的 Heuristic 过滤规则,这一步主要用来过滤掉那些质量非常差的文档; 然后,即便是一篇质量稍差的文章中,也有可能还是含有一些有价值的领域知识,我们也需要尽可能的进行收集。此处,我们对文章进行段落拆分,将文章拆分成一个个段落; 然后,我们将拆分后的段落会再次通过步骤1进行过滤,便得到了一批经过规则过滤后的段落; 然后,我们摘取了其中 1000 个段落,由经验丰富的专业开发人员来进行打标,获得高质量的打标数据; 最后,我们根据打标后的结果来训练了一个打分模型来针对段落进行质量的打分,段落的向量模型选用了预训练好的中文版本的 Sentence-Bert,打分算法选用了逻辑回归,为了避免打分模型的误差,会再通过帕累托分布来根据段落的质量打分进行采样来决定要不要过滤这个段落。 经过这个 Pipeline 后,我们最终沉淀下 15G 左右的数据来进行大模型的预训练加训。 - - - 用户案例 - /docs/codefuse-query-usercase-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-usercase-zh/ - 使用场景 查询代码特征 小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Gödel 如下,交给 CodeFuse-Query 系统给他返回了结果。 // script use coref::java::* fn out(var: string) -&gt; bool { for(v in Variable(JavaDB::load(&#34;coref_java_src.db&#34;))) { if (v.getType().getName() = &#34;String&#34; &amp;&amp; var = v.getName()) { return true } } } fn main() { output(out()) } 类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。 代码规则检查器 小 TL 同学发现团队总是写出很多类似的 Bug A,他想针对 Bug A 制定一个代码规则和其检查器,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 类似需求:编写静态缺陷扫描规则进行代码风险拦截。 获取统计数据 小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Gödel 实现出来以后,发现不怎么优化就已经性能非常高了,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,不知道方便了多少。 类似需求:代码统计,代码度量,算法设计,学术研究。 - - - 最佳配置 - /docs/codefuse-modelcache-config-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-config-zh/ - 环境依赖 python版本: 3.8及以上 依赖包安装: pip install requirements.txt 服务启动 在启动服务前,应该进行如下环境配置: 安装关系数据库 mysql, 导入sql创建数据表,sql文件: reference_doc/create_table.sql 安装向量数据库milvus 在配置文件中添加数据库访问信息,配置文件为: modelcache/config/milvus_config.ini modelcache/config/mysql_config.ini 离线模型bin文件下载, 参考地址:https://huggingface.co/shibing624/text2vec-base-chinese/tree/main,并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 通过flask4modelcache.py脚本启动后端服务。 - - - 最佳配置 - /docs/codefuse-modelcache-release-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-release-zh/ - 时间 功能 版本号 20230430 完成GPTCache调研,开源流程在OpenAI接口上跑通,单节点形式 无 20230509 1、完成技术选型及上下游交互方案 2、重新开发数据库模块,替换SQLalchemy框架 3、重构llm_handler模块,兼容codegpt,适配codegpt模型参数 V0.1.0 20230519 1、根据环境动态选择codegpt服务模式 2、模型本地加载能力,以及预加载能力 3、增加本地路径依据环境动态加载能力 V0.1.1 20230522 1、架构优化,调整为类redis结构,解藕大模型调用 2、关系数据库由sqlite切换至OceanBase 3、向量数据库由faiss切换至milvus 4、模型数据隔离能力 5、增加核心模块adapter_query、adapter_insert V0.2.0 20230531 1、线上环境上线,动态感知能力 2、embedding模型评测及选型 3、增加预发环境及数据隔离能力 4、增加原始query字段透出能力 V0.2.1 20230607 1、优化关系数据库访问性能 2、优化环境和模型隔离能力 V0.2.2 20230630 1、在modelCache中增加大模型embedding层适配模块 2、增加采纳率统计能力 V0.2.3 20230730 1、增加缓存统计功能 2、增加数据删除功能接口 3、缓存一键清空能力上线 4、多轮会话能力研发,支持system指令和多轮对话 v0.3.0 20230830 1、增加异步处理能力,性能提升超20% 2、架构变更,解藕embedding推理和业务处理逻辑 3、黑名单过滤功能 V0.3.1 - - - diff --git a/docs/zh/docs/mftcoder-zh/index.html b/docs/zh/docs/mftcoder-zh/index.html deleted file mode 100644 index 771d1ef..0000000 --- a/docs/zh/docs/mftcoder-zh/index.html +++ /dev/null @@ -1,543 +0,0 @@ - - - - - - - - -MFTCoder · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder

    -
    -
    - - -

    MFTCoder

    -

    MFTCoder

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/b10.codefuse-evalution/index.html b/docs/zh/docs/overview/b10.codefuse-evalution/index.html deleted file mode 100644 index 72efa1c..0000000 --- a/docs/zh/docs/overview/b10.codefuse-evalution/index.html +++ /dev/null @@ -1,787 +0,0 @@ - - - - - - - - -CodeFuseEval: 代码大语言模型的多任务评估基准 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuseEval: 代码大语言模型的多任务评估基准

    -
    -
    - - - -

    CodeFuseEval在HumanEval-x、MBPP的基准上,结合CodeFuse大模型多任务场景,开发的编程领域多任务的评测基准, 可用于评估模型在代码补全,自然语言生成代码,测试用例生成、跨语言代码翻译,中文指令生成代码等多类任务的性能。持续开放中,敬请期待!

    -

    img

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/codefuse-chatbot-zh/index.html b/docs/zh/docs/overview/codefuse-chatbot-zh/index.html deleted file mode 100644 index 3dfe6f9..0000000 --- a/docs/zh/docs/overview/codefuse-chatbot-zh/index.html +++ /dev/null @@ -1,623 +0,0 @@ - - - - - - - - -CodeFuse-ChatBot Development by Private Knowledge Augmentation · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-ChatBot Development by Private Knowledge Augmentation

    -
    -
    - - -

    - 中文  |  English  -

    -

    DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。

    -

    📜 目录

    - -

    🤝 介绍

    -

    💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。

    -

    本项目核心差异技术、功能点:

    -
      -
    • 🧠 智能调度核心: 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 使用说明
    • -
    • 💻 代码整库分析: 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。
    • -
    • 📄 文档分析增强: 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。
    • -
    • 🔧 垂类专属知识: 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。
    • -
    • 🤖 垂类模型兼容: 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。
    • -
    -

    🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。接入Demo

    -

    👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。

    -
    - 图片 -
    -

    🎥 演示视频

    -

    为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。

    - -

    🧭 技术路线

    -
    - Image -
    -
      -
    • 🧠 Multi-Agent Schedule Core: 多智能体调度核心,简易配置即可打造交互式智能体。
    • -
    • 🕷️ Multi Source Web Crawl: 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。
    • -
    • 🗂️ Data Processor: 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。
    • -
    • 🔤 Text Embedding & Index::文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。
    • -
    • 🗄️ Vector Database & Graph Database: 向量与图数据库,提供灵活强大的数据管理解决方案。
    • -
    • 📝 Prompt Control & Management::Prompt 控制与管理,精确定义智能体的上下文环境。
    • -
    • 🚧 SandBox::沙盒环境,安全地执行代码编译和动作。
    • -
    • 💬 LLM::智能体大脑,支持多种开源模型和 LLM 接口。
    • -
    • 🛠️ API Management:: API 管理工具,实现对开源组件和运维平台的快速集成。
    • -
    -

    具体实现明细见:技术路线明细

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/codefuse-devops-eval-zh/index.html b/docs/zh/docs/overview/codefuse-devops-eval-zh/index.html deleted file mode 100644 index 8175f90..0000000 --- a/docs/zh/docs/overview/codefuse-devops-eval-zh/index.html +++ /dev/null @@ -1,1391 +0,0 @@ - - - - - - - - -CodeFuse-DevOps-Eval · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-DevOps-Eval

    -
    -
    - - -

    - -

    DevOps-Eval是一个专门为DevOps领域大模型设计的综合评估数据集。我们希望DevOps-Eval能够帮助开发者,尤其是DevOps领域的开发者,追踪进展并分析他们拥有的DevOps大模型的优势和不足之处。

    -

    📚 该仓库包含与DevOps和AIOps相关的问题和练习, 还添加了关于ToolLearning相关的样本。

    -

    💥 目前有 7486 个多项选择题,根据DevOps的通用流程将其归纳未8个模块,如下图所示。

    -

    🔥 AIOps样本总计 2840 个,覆盖的场景包括日志解析时序异常检测时序分类时序预测根因分析

    -

    🔧 ToolLearning样本 1509 个,涵盖59个领域,总计 239 种工具类别。

    -

    -

    🏆 排行榜

    -

    以下是我们获得的初版评测结果,包括多个开源模型的zero-shot和five-shot准确率。我们注意到,对于大多数指令模型来说,five-shot的准确率要优于zero-shot。

    -

    👀 DevOps

    -

    Zero Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型plancodebuildtestreleasedeployoperatemonitor平均分
    DevOpsPal-14B-Chat60.6178.3584.8684.6587.2682.7569.8979.1778.23
    DevOpsPal-14B-Base54.5577.8283.4985.9686.3281.9671.1882.4178.23
    Qwen-14B-Chat60.6175.485.3284.2189.6282.7569.5780.5677.18
    Qwen-14B-Base57.5873.8184.485.5386.3281.1870.0580.0976.19
    Baichuan2-13B-Base60.6169.4279.8279.8282.5581.1870.3783.873.73
    Baichuan2-13B-Chat60.6168.4377.9880.781.683.5367.6384.7272.9
    DevOpsPal-7B-Chat54.5569.1183.9482.0276.898064.7377.7871.92
    DevOpsPal-7B-Base54.5568.9682.1178.9580.6676.4765.5478.771.69
    Qwen-7B-Base53.0368.1378.975.4480.198065.0680.0971.09
    Qwen-7B-Chat57.5866.0180.2879.8276.8977.6562.6479.1769.75
    Baichuan2-7B-Chat54.5563.6677.9876.3271.773.3359.4279.6366.97
    Internlm-7B-Chat60.6162.1577.0676.3266.9874.5160.3978.2466.27
    Baichuan2-7B-Base56.0662.4575.6970.6174.0669.861.6775.9366.21
    Internlm-7B-Base54.5558.2979.3678.9577.8370.5965.8675.9365.99
    -

    Five Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型plancodebuildtestreleasedeployoperatemonitor平均分
    DevOpsPal-14B-Chat63.6479.4981.6585.9686.7986.6772.9581.4879.69
    DevOpsPal-14B-Base62.1280.5582.5785.5385.8584.7171.9880.0979.63
    Qwen-14B-Chat65.157682.5785.5384.9184.3170.8581.4877.81
    Qwen-14B-Base66.6776.1584.485.5386.3280.3972.4680.5677.56
    Baichuan2-13B-Base63.6471.3980.7382.4681.1384.3173.7585.1975.8
    Qwen-7B-Base75.7672.5278.981.1483.9681.1870.3781.9475.36
    Baichuan2-13B-Chat62.1269.9576.6184.2183.4979.6171.9880.5674.12
    DevOpsPal-7B-Chat66.6769.9583.9481.1480.1982.7568.676.8573.61
    DevOpsPal-7B-Base69.769.4982.1181.1482.5582.3567.1579.1773.35
    Qwen-7B-Chat65.1566.5482.5781.5881.681.1865.3881.0271.69
    Baichuan2-7B-Base60.6167.2276.617577.8378.4367.3179.6370.8
    Internlm-7B-Chat60.6163.0679.8280.2667.9275.6960.0677.3169.21
    Baichuan2-7B-Chat60.6164.9581.1975.8871.2375.6964.979.1769.05
    Internlm-7B-Base62.1265.2577.5280.774.0678.8263.4575.4667.17
    -

    🔥 AIOps

    -
    -

    Zero Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型日志解析根因分析时序异常检测时序分类时序预测平均分
    Qwen-14B-Base66.2958.825.3343.562.552.25
    DevOpsPal-14B—Base63.1453.623.3343.564.0650.49
    Qwen-14B-Chat64.5751.622.673662.548.94
    DevOpsPal-14B—Chat6056244357.8148.8
    Qwen-7B-Base5039.222.675443.7541.48
    DevOpsPal-7B—Chat56.5730.425.334544.0640.92
    Baichuan2-13B-Chat641821.3337.546.8839.3
    Qwen-7B-Chat57.4338.822.3339.525.3136.97
    Internlm-7B—Chat58.868.822.3328.551.2536.34
    Baichuan2-7B-Chat60.86102834.539.0636.34
    Baichuan2-7B-Base53.4312.827.6736.540.3135.49
    Baichuan2-13B-Base5412.42334.542.8134.86
    DevOpsPal-7B—Base46.5720.8253438.7533.94
    Internlm-7B—Base48.5718.823.3337.533.7533.1
    -

    One Shot

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型日志解析根因分析时序异常检测时序分类时序预测平均分
    DevOpsPal-14B—Chat66.2980.823.3344.556.2554.44
    DevOpsPal-14B—Base607425.3343.552.551.13
    Qwen-14B-Base64.2974.42848.540.3150.77
    Qwen-7B-Base5660.827.674457.1949.44
    Qwen-14B-Chat49.7165.628.674842.1946.13
    Baichuan2-13B-Base5643.224.334146.8842.89
    Baichuan2-7B-Chat58.5731.62731.551.8841.83
    DevOpsPal-7B—Base52.8644.42844.536.2541.2
    Baichuan2-7B-Base48.2940.4274240.9439.86
    Qwen-7B-Chat54.575229.6726.527.1938.73
    Baichuan2-13B-Chat57.4344.42525.530.6337.75
    DevOpsPal-7B—Chat56.5727.225.3341.533.4437.46
    Internlm-7B—Chat62.5712.822.332150.3136.69
    Internlm-7B—Base4833.2293531.5635.85
    -
    -

    🔧 ToolLearning

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FuncCall-Fillerdataset_namefccr1-fcffr1-fcfnr1-fcfpr1-fcfniraar
    Qwen-14b-chatluban6110097.6863.3210069.46
    Qwen-7b-chatluban50.5810098.0752.5110063.59
    Baichuan-7b-chatluban60.2310097.362.9399.6161.12
    Internlm-chat-7bluban47.8810096.1451.7499.6161.85
    Qwen-14b-chatfc_data98.3799.7399.8698.7810081.58
    Qwen-7b-chatfc_data99.4699.8610099.5910079.25
    Baichuan-7b-chatfc_data97.9699.3210098.6410089.53
    Internlm-chat-7bfc_data94.2995.7810098.510088.19
    CodeLLaMa-7bfc_data98.7899.7310099.0510094.7
    CodeLLaMa-7b-16fc_data98.199.8799.7398.510093.14
    CodeFuse-7b-4kfc_data98.9199.8799.8799.1810089.5
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/codefuse-devops-model-zh/index.html b/docs/zh/docs/overview/codefuse-devops-model-zh/index.html deleted file mode 100644 index 3dccec9..0000000 --- a/docs/zh/docs/overview/codefuse-devops-model-zh/index.html +++ /dev/null @@ -1,769 +0,0 @@ - - - - - - - - -CodeFuse-DevOps-Model · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-DevOps-Model

    -
    -
    - - -

    codeFuse-devops-model

    -

    DevOps-Model 是蚂蚁集团联合北京大学发布面向中文 DevOps 领域的大语言模型,通过收集 DevOps 领域相关的专业数据,再针对模型进行语言模型的加训和对齐训练,产出可以帮助工程师在整个开发运维生命周期提效的大模型。弥补当前大模型在 DevOps 领域的缺失,旨在做到有问题,问 DevOps-Model !

    -

    当前我们已经开源了 7B 和 14B 两种规格的经过加训得 Base 模型和经过对齐后的 Chat 模型,同时还开源了对应的训练代码,欢迎大家一起合作建设!

    -

    项目地址

    -

    Github 地址:https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main

    -

    ModelScope 地址:

    -
      -
    • DevOps-Model-7B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Base/summary
    • -
    • DevOps-Model-7B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Chat/summary
    • -
    • DevOps-Model-14B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Base/summary
    • -
    • DevOps-Model-14B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Chat/summary
    • -
    -

    评测考题

    -

    针对模型评测,最初并没有这样的一个 benchmark 用来 DevOps 领域进行测试,所以我们首先选用了一些通用开源测试中和 DevOps 领域相关的选择题进行测试,具体测试数据如下:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    数据集考试科目题目总数
    CMMLUComputer science 204
    Computersecurity171
    Machinelearning122
    CEvalcollege programming37
    CEvalcomputer_architecture21
    CEvalcomputer_network19
    总计总计题目数574
    -

    评测方式

    -

    由于都是单选题,我们采用的是选取模型产出的第一个 Token 中四个选项 Token 中得分最高的作为模型对于问题的回答。同时我们还测试了 Zero-shot 和 Five-shot 的结果。

    -

    评测结果

    -

    -

    具体的得分如下表所示:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    参数量级模型模型大小Zero-shot 得分Five-shot 得分
    10+ BDevOps-Model-14B-Base14B70.7373.00
    10+ BQwen-14B-Base14B69.1671.25
    10+ BBaichuan2-13B-Base13B55.7561.15
    10+ BDevOps-Model-14B-Chat14B74.0475.96
    10+ BQwen-14B-Chat14B69.1670.03
    10+ BBaichuan2-13B-Chat13B52.7955.23
    7BDevOps-Model-7B-Base7B62.7262.02
    7BQwen-7B-Base7B55.7556.0
    7BBaichuan2-7B-Base7B49.3055.4
    7BInternlm-7B-Base7B47.5652.6
    7BDevOps-Model-7B-Chat7B62.2064.11
    7BQwen-7B-Chat7B46.0052.44
    7BBaichuan2-7B-Chat7B52.2654.46
    7BInternlm-7B-Chat7B52.6155.75
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/codefuse-mft-vlm/index.html b/docs/zh/docs/overview/codefuse-mft-vlm/index.html deleted file mode 100644 index df1987c..0000000 --- a/docs/zh/docs/overview/codefuse-mft-vlm/index.html +++ /dev/null @@ -1,835 +0,0 @@ - - - - - - - - -CodeFuse-MFT-VLM · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-MFT-VLM

    -
    -
    - - -

    CodeFuse-VLM

    -

    CodeFuse-VLM 是一个多模态大语言模型框架,该框架为用户提供多种视觉编码器,模态对齐模块和大语言模型的选择,以适配用户对不同任务的需求。

    -

    随着huggingface开源社区的不断更新,会有更多的vision encoder 和 LLM 底座发布,这些vision encoder 和 LLM底座都有各自的强项,例如 code-llama 适合生成代码类任务,但是不适合生成中文类的任务;因此我们搭建了CodeFuse-VLM 框架,支持多种视觉模型和语言大模型,使得CodeFuse-VLM可以适应不同种类的任务。

    -

    img.jpg

    -

    我们在CodeFuse-VLM 框架下, 使用Qwen-VL的视觉编码器, cross attention模态对齐模块, 和 Qwen-14B 模型训练了 CodeFuse-VLM-14B

    -

    CodeFuse-VLM-14B 在多个benchmarks 上的性能超过了Qwen-VL和LLAVA-1.5 -img.jpg

    -

    各个模型得分如下表所示:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型MMBenchMMBench-CNVqaV2GQATextVQAVizwiz
    LLAVA-1.567.763.680.063.361.353.6
    Qwen-VL60.656.778.257.563.838.9
    CodeFuse-VLM-14B75.769.879.359.463.945.3
    -

    我们的模型在MMBenchmark 多模态大模型榜单上取得了很高的排名: https://mmbench.opencompass.org.cn/leaderboard

    -

    这是我们模型的展示视频

    -

    https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/codefuse-modelcache-zh/index.html b/docs/zh/docs/overview/codefuse-modelcache-zh/index.html deleted file mode 100644 index 46ba7a6..0000000 --- a/docs/zh/docs/overview/codefuse-modelcache-zh/index.html +++ /dev/null @@ -1,601 +0,0 @@ - - - - - - - - -CodeFuse-ModelCache · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-ModelCache

    -
    -
    - - -

    -

    -

    -

    - 中文 | - English -

    -

    -
    -

    Contents

    - -

    新闻

    -
      -
    • 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。
    • -
    • 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。
    • -
    • [2023.10.31] codefuse-ModelCache…
    • -
    -

    项目简介

    -

    Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。

    -

    架构大图

    -

    modelcache modules

    -

    致谢

    -

    本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。
    GPTCache

    -

    Contributing

    -

    ModelCache是一个非常有趣且有用的项目,我们相信这个项目有很大的潜力,无论你是经验丰富的开发者,还是刚刚入门的新手,都欢迎你为这个项目做出一些贡献,包括但不限于:提交问题和建议,参与代码编写,完善文档和示例。你的参与将会使这个项目变得更好,同时也会为开源社区做出贡献。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/codefuse-query-zh/index.html b/docs/zh/docs/overview/codefuse-query-zh/index.html deleted file mode 100644 index 3b50b92..0000000 --- a/docs/zh/docs/overview/codefuse-query-zh/index.html +++ /dev/null @@ -1,547 +0,0 @@ - - - - - - - - -CodeFuse-Query · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    CodeFuse-Query

    -
    -
    - - -

    CodeFuse-Query

    -

    随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。

    -

    这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 -在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 -此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。

    -

    在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。

    -

    CodeQuery 的特点和优势可以概括为以下几点:

    -
      -
    • 高度可扩展:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。
    • -
    • 以数据为中心:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。
    • -
    • 高度集成:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。
    • -
    • 支持多元化的需求:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。
    • -
    -

    CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/fastertransformer4codefuse-zh/index.html b/docs/zh/docs/overview/fastertransformer4codefuse-zh/index.html deleted file mode 100644 index d56342a..0000000 --- a/docs/zh/docs/overview/fastertransformer4codefuse-zh/index.html +++ /dev/null @@ -1,784 +0,0 @@ - - - - - - - - -FasterTransformer4CodeFuse · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    FasterTransformer4CodeFuse

    -
    -
    - - -

    FasterTransformer4CodeFuse

    -

    FasterTransformer4CodeFuse

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/mftcoder-zh/index.html b/docs/zh/docs/overview/mftcoder-zh/index.html deleted file mode 100644 index 290d4a2..0000000 --- a/docs/zh/docs/overview/mftcoder-zh/index.html +++ /dev/null @@ -1,701 +0,0 @@ - - - - - - - - -MFTCoder: 高效准确的多任务大模型微调框架 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MFTCoder: 高效准确的多任务大模型微调框架

    -
    -
    - - -
    -

    - 🤗 HuggingFace - • 🤖 魔搭 -

    -

    [中文] [English]

    -
    -

    目录

    - -

    新闻

    -

    🔥🔥🔥 [2024/01/17] MFTCoder-v0.3.0发布。新增对Mixtral(MoE), DeepSeek等模型的支持;新增支持FSDP(Fully Sharded Data Parallel);新增Self-paced Loss, 支持多任务收敛均衡。 感兴趣详见微信公众号CodeFuse的文章MFTCoder 重磅升级v0.3.0发布

    -

    🔥🔥🔥 [2024/01/17] 开源了CodeFuse-DeepSeek-33B模型,在HumanEval pass@1(greedy decoding)上可以达到78.7%。该模型在Big Code榜单的结果近期发布,请关注公众号获取最新信息。

    -

    🔥🔥🔥 [2024/01/17] 开源了CodeFuse-Mixtral-8x7B模型,在HumanEval pass@1(greedy decoding)上可以达到56.1%。感兴趣详见微信公众号CodeFuse的文章MFTCoder提升Mixtral-8x7B混合专家模型的代码能力实践

    -

    🔥🔥 [2023/11/07] MFTCoder论文在Arxiv公布,介绍了多任务微调的技术细节。

    -

    🔥🔥 [2023/10/20] 开源了CodeFuse-QWen-14B模型,在HumanEval pass@1(greedy decoding)上可以达到48.8%。相比较与基座模型Qwen-14b提升16%。感兴趣详见微信公众号CodeFuse文章

    -

    🔥🔥 [2023/09/27] 开源了CodeFuse-StarCoder-15B模型,在HumanEval pass@1(greedy decoding)上可以达到54.9%。

    -

    🔥🔥 [2023/09/26] CodeFuse-CodeLlama-34B-4bits量化版本发布,量化后模型在HumanEval pass@1指标为73.8% (贪婪解码)。

    -

    🔥🔥 [2023/09/07]MFTCoder微调的模型CodeFuse-CodeLlama-34BHumanEval Benchmarks的Python Pass@1 取得了74.4%(greedy decoding)的开源SOTA成绩。

    -

    🔥🔥 [2023/08/26]MFTCoder-v0.1.0 支持使用LoRA/QLoRA对Code Llama、Llama、Llama2、StarCoder、ChatGLM2、CodeGeeX2、Qwen和GPT-NeoX模型进行微调。

    -

    HumanEval表现

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    模型HumanEval(Pass@1)日期
    CodeFuse-DeepSeek-33B78.7%2024/01
    CodeFuse-CodeLlama-34B74.4%2023/09
    CodeFuse-CodeLlama-34B-4bits73.8%2023/09
    WizardCoder-Python-34B-V1.073.2%2023/08
    GPT-4(zero-shot)67.0%2023/03
    PanGu-Coder2 15B61.6%2023/08
    CodeFuse-Mixtral-8x7B56.1%2024/01
    CodeFuse-StarCoder-15B54.9%2023/08
    CodeLlama-34b-Python53.7%2023/08
    CodeFuse-QWen-14B48.8%2023/10
    CodeLlama-34b48.8%2023/08
    GPT-3.5(zero-shot)48.1%2022/11
    OctoCoder46.2%2023/08
    StarCoder-15B33.6%2023/05
    QWen-14B32.3%2023/10
    -

    文章

    -

    🔥 CodeFuse-MFTCoder提升CodeGeeX2-6B代码能力

    -

    🔥 CodeFuse-MFTCoder提升Qwen-14B代码能力

    -

    项目简介

    -

    国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架;

    -

    Codefuse-MFTCoder 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。

    -

    项目框架

    -

    img_1.jpg

    -

    项目优势

    -

    :white_check_mark: 多任务:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去;

    -

    :white_check_mark: 多模型:支持最新的多个开源模型,包括gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2等;

    -

    :white_check_mark: 多框架:既支持主流开源的Accelerate+DeepSpeed/FSDP,也支持新开源的ATorch 框架

    -

    :white_check_mark: 高效微调:支持LoRA和QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景;

    -

    本项目主要内容如下:

    -
      -
    • 同时支持单任务SFT(Supervised FineTuning)和MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等
    • -
    • 支持QLoRA低成本高效指令微调、LoRA高效指令微调、全量参数高精度微调。
    • -
    • 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA等。
    • -
    • 支持lora与base model进行权重合并,推理更便捷。
    • -
    • 整理并开源2个指令微调数据集:Evol-instruction-66kCodeExercise-Python-27k
    • -
    • 开源多个[Codefuse系列指令微调模型权重],具体参见我们的huggingface组织和modelscope组织下的模型:codefuse-ai huggingface or codefuse-ai 魔搭。 -|
    • -
    -

    引用

    -

    如果你觉得我们的工作对你有帮助,请引用我们的论文

    -
    @article{mftcoder2023,
    -      title={MFTCoder: Boosting Code LLMs with Multitask Fine-Tuning}, 
    -      author={Bingchang Liu and Chaoyu Chen and Cong Liao and Zi Gong and Huan Wang and Zhichao Lei and Ming Liang and Dajun Chen and Min Shen and Hailian Zhou and Hang Yu and Jianguo Li},
    -      year={2023},
    -      journal={arXiv preprint arXiv},
    -      archivePrefix={arXiv},
    -      eprint={2311.02303}
    -}
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/overview/test-agent-zh/index.html b/docs/zh/docs/overview/test-agent-zh/index.html deleted file mode 100644 index 34b5978..0000000 --- a/docs/zh/docs/overview/test-agent-zh/index.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - -Test-Agent: 您的智能测试助理 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Test-Agent: 您的智能测试助理

    -
    -
    - - -

    本地Mac M1体验效果

    -

    图片

    -

    魔搭体验效果

    -

    魔搭模型访问链接:ModelScope TestGPT-7B -MS

    -

    什么是Test Agent?(Introduction)

    -

    Test Agent 旨在构建测试领域的“智能体”,融合大模型和质量领域工程化技术,促进质量技术代系升级。我们期望和社区成员一起合作,打造创新的测试领域解决方案,构建24小时在线的测试助理服务,让测试如丝般顺滑。

    -

    本期特性(Features)

    -
      -
    • -

      模型 本期我们开源了测试领域模型TestGPT-7B。模型以CodeLlama-7B为基座,进行了相关下游任务的微调:

      -
        -
      • 多语言测试用例生成(Java/Python/Javascript) 一直以来都是学术界和工业界非常关注的领域,近年来不断有新产品或工具孵化出来,如EvoSuite、Randoop、SmartUnit等。然而传统的用例生成存在其难以解决的痛点问题,基于大模型的测试用例生成在测试用例可读性、测试场景完整度、多语言支持方面都优于传统用例生成工具。本次重点支持了多语言测试用例生成,在我们本次开源的版本中首先包含了Java、Python、Javascript的测试用例生成能力,下一版本中逐步开放Go、C++等语言。
      • -
      • 测试用例Assert补全 对当前测试用例现状的分析与探查时,我们发现代码仓库中存在一定比例的存量测试用例中未包含Assert。没有Assert的测试用例虽然能够在回归过程中执行通过,却无法发现问题。因此我们拓展了测试用例Assert自动补全这一场景。通过该模型能力,结合一定的工程化配套,可以实现对全库测试用例的批量自动补全,智能提升项目质量水位。
      • -
      -
    • -
    • -

      工程框架 本地模型快速发布和体验工程化框架

      -
        -
      • ChatBot页面
      • -
      • 模型快速启动
      • -
      • 私有化部署,本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100%安全
      • -
      -
    • -
    -

    后续我们会持续迭代模型和工程化能力:

    -
      -
    • 不断加入更多令人激动的测试域应用场景,如领域知识问答、测试场景分析等
    • -
    • 支撑面向测试场景的copilot 工程框架开放,如测试领域知识智能embedding、测试通用工具API体系、智能测试Agent等,敬请期待!
    • -
    • 以7B为基础,逐步扩展至13B、34B模型。欢迎关注!
    • -
    -

    性能最强的7B测试领域大模型(Model)

    -

    目前在TestAgent中,我们默认使用了TestGPT-7B模型。与当前已有开源模型相比,TestGPT-7B模型在用例执行通过率(pass@1)、用例场景覆盖(平均测试场景数)上都处于业界领先水平。 -TestGPT-7B模型核心能力的评测结果如下:

    -
      -
    • 多语言测试用例生成 -针对模型支持的三种语言:Java、Python、Javascript,Pass@1评测结果如下:
    • -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModelJava pass@1Java Average number of test scenariosPython pass@1Python Average number of test scenariosJavascript pass@1Javascript Average number of test scenarios
    TestGPT-7B48.6%4.3735.67%3.5636%2.76
    CodeLlama-13B-Instruct40.54%1.0830.57%1.6531.7%3.13
    Qwen-14B-Chat10.81%2.7815.9%1.329.15%4.22
    Baichuan2-13B-Chat13.5%2.2412.7%2.126.1%3.31
    -
      -
    • 测试用例Assert补全 -目前模型支持Java用例的Assert补全,Pass@1评测结果如下:
    • -
    - - - - - - - - - - - - - - - -
    Modelpass@1Percentage of strong validation
    Codefuse-TestGPT-7B71.1%100%
    -

    工程架构(Engineering Architecture)

    -

    JG

    -

    大模型的号角已经吹响,测试领域大模型也在不断进化中,通过预训练过程中积累的丰富世界知识,在复杂交互环境中展现出了非凡的推理与决策能力。

    -

    尽管在测试领域中基础模型取得了显著的成果,但仍然存在一些局限性,特定领域的测试任务通常需要专业化的工具或领域知识来解决。例如,基础模型可以通过预训练知识完成单次测试代码生成和测试文本生成等任务,但处理复杂的集成用例生成、特定领域用例生成和测试流程pipeline交互等问题时,需要更专业的工具和领域知识。因此将专用工具与基础模型整合在一起,可以充分发挥它们各自的优势。专用工具可以解决模型时效性不足、增强专业知识、提高可解释性和鲁棒性的问题。而基础模型则具备类人的推理规划能力,可以理解复杂的数据和场景,并与现实世界进行交互。

    -

    在本期开放模型工程化部署和ChatBot基础上,我们将继续在测试开源领域深耕投入。协同社区志趣相投开发者们,一起打造测试领域最领先的Tools工程体系、智能测试助理和测试开源工程!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/test-agent-zh/index.html b/docs/zh/docs/test-agent-zh/index.html deleted file mode 100644 index 511e472..0000000 --- a/docs/zh/docs/test-agent-zh/index.html +++ /dev/null @@ -1,559 +0,0 @@ - - - - - - - - -Test-Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Test-Agent

    -
    -
    - - -

    Test-Agent

    -

    Test-Agent

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/docs/zh_overview/index.html b/docs/zh/docs/zh_overview/index.html deleted file mode 100644 index 548e02b..0000000 --- a/docs/zh/docs/zh_overview/index.html +++ /dev/null @@ -1,790 +0,0 @@ - - - - - - - - -概览 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    概览

    -
    -
    - - -

    - -

    - -

    Hello World! This is CodeFuse!

    -

    CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs),涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。

    -

    - -

    -

    我们非常有激情去构建创新的解决方案来支持全生命周期AI驱动的软件开发,如上图所示。同时,我们也诚邀志同道合的工程师和研究人员加入这个社区,共同构建和增强CodeFuse。

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/\345\220\257\345\212\250\346\230\216\347\273\206/index.html" "b/docs/zh/docs/\345\220\257\345\212\250\346\230\216\347\273\206/index.html" deleted file mode 100644 index 00d03bc..0000000 --- "a/docs/zh/docs/\345\220\257\345\212\250\346\230\216\347\273\206/index.html" +++ /dev/null @@ -1,838 +0,0 @@ - - - - - - - - -启动明细 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    启动明细

    -
    -
    - - -

    - 中文  |  English  -

    -

    如需使用私有化模型部署,请自行安装 nvidia 驱动程序。。

    -

    python 环境准备

    -
      -
    • 推荐采用 conda 对 python 环境进行管理(可选)
    • -
    -
    # 准备 conda 环境
    -conda create --name devopsgpt python=3.9
    -conda activate devopsgpt
    -
      -
    • 安装相关依赖
    • -
    -
    cd codefuse-chatbot
    -# python=3.9,notebook用最新即可,python=3.8用notebook=6.5.6
    -pip install -r requirements.txt
    -

    沙盒环境准备

    - -
    # 构建沙盒环境的镜像,notebook版本问题见上述
    -bash docker_build.sh
    -

    模型下载(可选)

    -

    如需使用开源 LLM 与 Embedding 模型可以从 HuggingFace 下载。 -此处以 THUDM/chatglm2-6bm 和 text2vec-base-chinese 为例:

    -
    # install git-lfs
    -git lfs install
    -
    -# install LLM-model
    -git lfs clone https://huggingface.co/THUDM/chatglm2-6b
    -cp ~/THUDM/chatglm2-6b ~/codefuse-chatbot/llm_models/
    -
    -# install Embedding-model
    -git lfs clone https://huggingface.co/shibing624/text2vec-base-chinese
    -cp ~/shibing624/text2vec-base-chinese ~/codefuse-chatbot/embedding_models/
    -

    基础配置

    -
    # 修改服务启动的基础配置
    -cd configs
    -cp model_config.py.example model_config.py
    -cp server_config.py.example server_config.py
    -
    -# model_config#11~12 若需要使用openai接口,openai接口key
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -# 可自行替换自己需要的api_base_url
    -os.environ["API_BASE_URL"] = "https://api.openai.com/v1"
    -
    -# vi model_config#LLM_MODEL 你需要选择的语言模型
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -
    -# vi model_config#EMBEDDING_MODEL 你需要选择的私有化向量模型
    -EMBEDDING_ENGINE = 'model'
    -EMBEDDING_MODEL = "text2vec-base"
    -
    -# 向量模型接入示例,修改 model_config#embedding_model_dict
    -# 若模型地址为:
    -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese
    -# 配置如下
    -"text2vec-base": "shibing624/text2vec-base-chinese"
    -
    -# vi server_config#8~14, 推荐采用容器启动服务
    -DOCKER_SERVICE = True
    -# 是否采用容器沙箱
    -SANDBOX_DO_REMOTE = True
    -

    启动服务

    -

    默认只启动webui相关服务,未启动fastchat(可选)。

    -
    # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁
    -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# examples/llm_api.py#258 修改为 kwargs={"gptq_wbits": 4},
    -
    -# start llm-service(可选)
    -python examples/llm_api.py
    -

    更多LLM接入方法见详情… -

    -
    # 完成server_config.py配置后,可一键启动
    -cd examples
    -python start.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/\345\244\232\346\231\272\350\203\275\344\275\223/index.html" "b/docs/zh/docs/\345\244\232\346\231\272\350\203\275\344\275\223/index.html" deleted file mode 100644 index d24707f..0000000 --- "a/docs/zh/docs/\345\244\232\346\231\272\350\203\275\344\275\223/index.html" +++ /dev/null @@ -1,670 +0,0 @@ - - - - - - - - -多智能体 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    多智能体

    -
    -
    - - -

    📜 目录

    - -

    简介

    -

    为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。

    -

    但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。

    -

    经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。

    -

    因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。

    -

    本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。

    -
    - 图片 -
    -

    以下模块将从5个方面介绍Multi Agent框架所需要素:

    -
      -
    • Agent Communication在Multi Agent框架中,确保Agent可以有效地进行信息交流对于管理上下文以及提高问答效率至关重要。 -a. 遵循简洁直观易于理解的链式对话原则,将Agent以线性方式排列串连成一个执行链路。 -b. 借鉴metaGPT中的Message Pool框架,允许Agent对Message Pool进行推送和订阅,使链路更加灵活。有利于精细化Prompt工程的场景,但难以把握复杂链路的关系分析。
    • -
    • Standard Operation Process(SOP):对LLM的生成结果进行标准化解析和处理。 -a. 定义Agent的 Input 和 Output 范围,能够组装和解析相关Action和Status,保证框架运行的稳定性 -b. 封装多种基础Action执行模块,如Tool Using、Planning、Coding、Direct Answering、final answer等SOP标识,以满足Agent的基本工作需求。
    • -
    • Plan and Executor:增加LLM的Tool使用、Agent调度、代码的生成。设置了几种基本链路,例如: -a. 单轮问答,也可以扩展到CoT、ToT、GoT等形式。 -b. ReAct,基础的响应决策过程,模型设置SOP 状态以终止循环 -c. TaskPlaning - Executor,任务完成即可结束
    • -
    • Long-short term memory Management:Multi-Agent与单Agent的关键区别在于,Multi-Agent需要处理大量的交流信息,类似人类团队协作的过程。增加一个专门负责内容总结(类似于会议助理)的Agent,对长期记忆进行总结并提更有效信息传递给下一位Agent,而非传递所有内容给下一位Agent。
    • -
    • Human-agent interaction:面对复杂场景时,需要人类介入Agent交互过程并提供反馈。通过上述 Long-short term memory Management 和 Agent Communication 过程,使LLM能准确理解人类的意图,从而更有效地完成任务。
    • -
    -

    总的来说,这五个要素共同构建了一个Multi Agent框架,确保Agent之间的协作更加紧密和高效,同时也能够适应更复杂的任务需求和更多样的交互场景。通过组合多个Agent链路来实现一个完整且复杂的项目上线场景(Dev Phase),如Demand Chain(CEO)、Product Arguement Chain(CPO、CFO、CTO)、Engineer Group Chain(Selector、Developer1~N)、QA Engineer Chain(Developer、Tester)、Deploy Chain(Developer、Deploer)。

    -

    模块介绍

    -

    为了便于大家理解整个Multi-Agent的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建

    -
    - 图片 -
    -


    下面,我们先介绍相关的模块

    -

    Agent

    -

    在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用

    -
      -
    1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出
    2. -
    3. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务
    4. -
    5. ReactAgent:提供标准React的功能,根据问题实现当前任务
    6. -
    7. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答.
    8. -
    -

    输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理

    -

    Chain

    -

    基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理

    -

    Phase

    -

    基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理

    -

    Prompt Manager

    -

    Mutli-Agent链路中每一个agent的prompt创建

    -
      -
    1. 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置
    2. -
    3. 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt -Memory Manager -主要用于 chat history 的管理,暂未完成 -● 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval -● 对 chat history 进行关键信息总结 summary context,作为 prompt context -● 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    4. -
    -

    Role Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    role_promptString角色描述
    role_typeStringEnum: assistant
    role_nameString角色名称,用于后续prompt context的组装和筛选
    agent_typeStringEnum:BaseAgent、SelectorAgent、ExecutorAgent、ReactAgent 也可以继承以上几种Agent然后去构造相关的Agent
    focus_agentsList[String]metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name
    focus_message_keysList[String]额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys
    promtp_input_keysList[String]Enum:
    promtp_output_keysList[String]Enum:
    chat_turnint只针对ReactAgent有效
    -

    Chain Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    chain_promptStringchain的描述
    chain_nameString角色名称,用于后续prompt context的组装和筛选
    chain_typeStringEnum:BaseChain 也可以继承以上Chain,构造相关的Chain
    agentsList[String]chain当中存在的agent以及agent的执行顺序
    chat_turnint agent之间的交互轮数
    -

    Phase Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    phase_nameString场景名称
    phase_typeStringEnum:BasePhase 也可以继承以上Phase,自定义构造相关的Phase
    chainsList[String]phase当中存在的chain以及chain的执行顺序
    do_doc_retrievalbool在场景执行开始判断是否需要补充额外信息
    do_code_retrievalbool在场景执行开始判断是否需要补充额外信息
    do_tool_retrievalbool在场景执行开始判断是否需要补充额外信息
    -

    快速使用

    -

    Comming soon

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/zh/docs/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" deleted file mode 100644 index 96bd0d9..0000000 --- "a/docs/zh/docs/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ /dev/null @@ -1,477 +0,0 @@ - - - - - - - - -快速开始 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速开始

    -
    -
    - - -

    - 中文  |  English  -

    -

    🚀 快速使用

    -

    如需使用私有化模型部署,请自行安装 nvidia 驱动程序,本项目已在 Python 3.9.18,CUDA 11.7 环境下,Windows、X86 架构的 macOS 系统中完成测试。

    -

    Docker安装、私有化LLM接入及相关启动问题见:快速使用明细

    -

    python 环境准备

    -
      -
    • 推荐采用 conda 对 python 环境进行管理(可选)
    • -
    -
    # 准备 conda 环境
    -conda create --name devopsgpt python=3.9
    -conda activate devopsgpt
    -
      -
    • 安装相关依赖
    • -
    -
    cd codefuse-chatbot
    -pip install -r requirements.txt
    -

    基础配置

    -
    # 修改服务启动的基础配置
    -cd configs
    -cp model_config.py.example model_config.py
    -cp server_config.py.example server_config.py
    -
    -# model_config#11~12 若需要使用openai接口,openai接口key
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -# 可自行替换自己需要的api_base_url
    -os.environ["API_BASE_URL"] = "https://api.openai.com/v1"
    -
    -# vi model_config#LLM_MODEL 你需要选择的语言模型
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -
    -# vi model_config#EMBEDDING_MODEL 你需要选择的私有化向量模型
    -EMBEDDING_ENGINE = 'model'
    -EMBEDDING_MODEL = "text2vec-base"
    -
    -# 向量模型接入示例,修改 model_config#embedding_model_dict
    -# 若模型地址为:
    -model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese
    -# 配置如下
    -"text2vec-base": "shibing624/text2vec-base-chinese"
    -
    -# vi server_config#8~14, 推荐采用容器启动服务,避免使用codeInterpreter功能时安装其它依赖导致环境冲突
    -DOCKER_SERVICE = True
    -# 是否采用容器沙箱
    -SANDBOX_DO_REMOTE = True
    -

    启动服务

    -

    默认只启动webui相关服务,未启动fastchat(可选)。

    -
    # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁
    -# cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# examples/llm_api.py#258 修改为 kwargs={"gptq_wbits": 4},
    -
    -# start llm-service(可选)
    -python examples/llm_api.py
    -

    更多LLM接入方法见详情… -

    -
    # 完成server_config.py配置后,可一键启动
    -cd examples
    -python start.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/\346\225\260\346\215\256\344\273\213\347\273\215/index.html" "b/docs/zh/docs/\346\225\260\346\215\256\344\273\213\347\273\215/index.html" deleted file mode 100644 index 65e0215..0000000 --- "a/docs/zh/docs/\346\225\260\346\215\256\344\273\213\347\273\215/index.html" +++ /dev/null @@ -1,871 +0,0 @@ - - - - - - - - -数据 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    数据

    -
    -
    - - -

    ⏬ 数据

    -

    下载

    -
      -
    • -

      方法一:下载zip压缩文件(你也可以直接用浏览器打开下面的链接):

      -
      wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip
      -

      然后可以使用 pandas加载数据:

      -
      import os
      -import pandas as pd
      -
      -File_Dir="devopseval-exam"
      -test_df=pd.read_csv(os.path.join(File_Dir,"test","UnitTesting.csv"))
      -
    • -
    • -

      方法二:使用Hugging Face datasets直接加载数据集。示例如下:

      -
      from datasets import load_dataset
      -dataset=load_dataset(r"DevOps-Eval/devopseval-exam",name="UnitTesting")
      -
      -print(dataset['val'][0])
      -# {"id": 1, "question": "单元测试应该覆盖以下哪些方面?", "A": "正常路径", "B": "异常路径", "C": "边界值条件","D": 所有以上,"answer": "D", "explanation": ""}  ```
      -
    • -
    • -

      方法三:使用modelscope下载相关所有数据。示例如下:

      -
      from modelscope.msdatasets import MsDataset
      -MsDataset.clone_meta(dataset_work_dir='./xxx', dataset_id='codefuse-ai/devopseval-exam')```
      -
    • -
    -

    👀 说明

    -

    为了方便使用,我们已经整理出了 55 个细分类别以及它们的中英文名称。具体细节请查看 category_mapping.json 。格式如下:

    -
    {
    -  "UnitTesting.csv": [
    -    "unit testing",
    -    "单元测试",
    -    {"dev": 5, "test": 32}
    -    "TEST"
    -  ],
    -  ...
    -  "file_name":[
    -  "英文名称",
    -  "中文名称",
    -  "样本数量",
    -  "类别(PLAN,CODE,BUILD,TEST,RELEASE,DEPOLY,OPERATE,MONITOR八选一)"
    -  ]
    -}
    -

    每个细分类别由两个部分组成:dev 和 test。每个细分类别的 dev 集包含五个示范实例以及为 few-shot 评估提供的解释。而 test 集则用于模型评估,并且test数据已包含准确标签。

    -

    下面是 dev 数据的示例,来自"版本控制"细分类别:

    -
    id: 4
    -question: 如何找到Git特定提交中已更改的文件列表?
    -A: 使用命令 `git diff --name-only SHA`
    -B: 使用命令 `git log --name-only SHA`
    -C: 使用命令 `git commit --name-only SHA`
    -D: 使用命令 `git clone --name-only SHA`
    -answer: A
    -explanation: 
    -分析原因:
    -git diff --name-only SHA命令会显示与SHA参数对应的提交中已修改的文件列表。参数--name-only让命令只输出文件名,而忽略其他信息。其它选项中的命令并不能实现此功能。
    -

    🔥 AIOps样本示例

    -

    👀 👀 此处以日志解析和时序异常检测为例,对AIOps样本做一些简要的展示:

    -

    日志解析

    -
    id: 0
    -question:
    -下面是一些运行日志
    - 0 04:21:15,429 WARN Cannot open channel to 2 at election address /10.10.34.12:3888
    - 1 19:18:56,377 WARN ******* GOODBYE /10.10.34.11:52703 ********
    - 2 19:13:46,128 WARN ******* GOODBYE /10.10.34.11:52308 ********
    - 3 19:16:26,268 WARN ******* GOODBYE /10.10.34.11:52502 ********
    - 4 09:11:16,012 WARN Cannot open channel to 3 at election address /10.10.34.13:3888
    - 5 16:37:13,837 WARN Cannot open channel to 2 at election address /10.10.34.12:3888
    - 6 09:09:16,008 WARN Cannot open channel to 3 at election address /10.10.34.13:3888
    - 7 15:27:03,681 WARN Cannot open channel to 3 at election address /10.10.34.13:3888
    -日志最前面三部分别为序号、时间戳和日志Level,在不考虑这三部分内容的情况下,此处我们设定日志的变量用'<*>'代替,token与token之间用空格分隔,那么请问上述日志的日志模版具体是什么?
    -A: Notification time out: <*> 和 Connection broken for id <*>, my id = <*>, error =
    -B: Send worker leaving thread 和 Connection broken for id <*>, my id = <*>, error =
    -C: Received connection request /<*>:<*> 和 Interrupting SendWorker
    -D: Cannot open channel to <*> at election address /<*>:<*> 和 ******* GOODBYE /<*>:<*> ********
    -answer: D
    -explanation: 根据日志中的内容,选项D是最符合日志模板的。日志中包含了"Cannot open channel to &lt;*&gt; at election address /&lt;*&gt;:&lt;*&gt;"和"******* GOODBYE /&lt;*&gt;:&lt;*&gt; ********"这两个固定的模板片段,它们都在选项D中出现了。同时,其他选项中的模板片段与日志中的内容不匹配。因此,选项D是最符合日志模板的。
    -

    时序异常检测

    -
    id: 0
    -question:
    -分析如下时间序列
    -[50,62,74,84,92,97,99,98,94,87,77,65,265,40,28,17,8,3,0,0,4,10,20,31,43,56,68,79,89,95,99,99,96,91,82,71,59,46,34,22,12,5,1,0,2,7,15,25,37,49]
    -请找出其中明显异常点的下标。所谓的异常点一般指的是明显与数据整体趋势不符的点。
    -A: 46
    -B: 0
    -C: 37
    -D: 12
    -answer: D
    -explanation: 根据分析,题目中的时间序列在12点出的值265要明显大于周围数据,存在着突增现象,因此选择D是正确的。
    -

    🔧 ToolLearning样本示例

    -

    工具学习样本的数据格式与OpenAI的函数调用格式兼容。 -详情请参阅tool_learning_info_zh.md。 -工具学习评测过程,详情请参阅见 tool_learning_evalution.md。 -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/\346\234\254\345\234\260\347\247\201\346\234\211\345\214\226\345\244\247\346\250\241\345\236\213\346\216\245\345\217\243\346\216\245\345\205\245/index.html" "b/docs/zh/docs/\346\234\254\345\234\260\347\247\201\346\234\211\345\214\226\345\244\247\346\250\241\345\236\213\346\216\245\345\217\243\346\216\245\345\205\245/index.html" deleted file mode 100644 index 2425ee7..0000000 --- "a/docs/zh/docs/\346\234\254\345\234\260\347\247\201\346\234\211\345\214\226\345\244\247\346\250\241\345\236\213\346\216\245\345\217\243\346\216\245\345\205\245/index.html" +++ /dev/null @@ -1,905 +0,0 @@ - - - - - - - - -本地私有化&大模型接口接入 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    本地私有化&大模型接口接入

    -
    -
    - - -

    - 中文  |  English  -

    -

    本地私有化/大模型接口接入

    -

    依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。

    -

    本地私有化模型接入

    -


    模型地址配置示例,model_config.py配置修改

    -
    # 建议:走huggingface接入,尽量使用chat模型,不要使用base,无法获取正确输出
    -# 注意:当llm_model_dict和VLLM_MODEL_DICT同时存在时,优先启动VLLM_MODEL_DICT中的模型配置
    -
    -# llm_model_dict 配置接入示例如下
    -
    -# 1、若把模型放到 ~/codefuse-chatbot/llm_models 路径下
    -# 若模型地址如下
    -model_dir: ~/codefuse-chatbot/llm_models/THUDM/chatglm-6b
    -
    -# 参考配置如下
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "THUDM/chatglm-6b",
    -}
    -
    -# or 若模型地址如下
    -model_dir: ~/codefuse-chatbot/llm_models/chatglm-6b
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "chatglm-6b",
    -}
    -
    -# 2、若不想移动相关模型到 ~/codefuse-chatbot/llm_models
    -# 同时删除 `模型路径重置` 以下的相关代码,具体见model_config.py
    -# 若模型地址如下
    -model_dir: ~/THUDM/chatglm-6b
    -# 参考配置如下
    -llm_model_dict = {
    -    "chatglm-6b": {
    -        "local_model_path": "your personl dir/THUDM/chatglm-6b",
    -        "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url"
    -        "api_key": "EMPTY"
    -    }
    -}
    -
    -VLLM_MODEL_DICT = {
    - 'chatglm2-6b':  "your personl dir/THUDM/chatglm-6b",
    -}
    -
    # 3、指定启动的模型服务,两者保持一致
    -LLM_MODEL = "chatglm-6b"
    -LLM_MODELs = ["chatglm-6b"]
    -
    # server_config.py配置修改, 若LLM_MODELS无多个模型配置不需要额外进行设置
    -# 修改server_config.py#FSCHAT_MODEL_WORKERS的配置
    -"model_name": {'host': DEFAULT_BIND_HOST, 'port': 20057}
    -


    量化模型接入

    -
    # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁
    -cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -
    -# 若需要支撑qwen-72b-int4模型,需要给fastchat打一个补丁
    -cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
    -# 量化需修改llm_api.py的配置
    -# examples/llm_api.py#559 取消注释 kwargs["gptq_wbits"] = 4
    -

    公开大模型接口接入

    -
    # model_config.py配置修改
    -# ONLINE_LLM_MODEL
    -# 其它接口开发来自于langchain-chatchat项目,缺少相关账号未经测试
    -
    -# 指定启动的模型服务,两者保持一致
    -LLM_MODEL = "gpt-3.5-turbo"
    -LLM_MODELs = ["gpt-3.5-turbo"]
    -

    外部大模型接口接入示例

    -
    # 1、实现新的模型接入类
    -# 参考  ~/examples/model_workers/openai.py#ExampleWorker
    -# 实现do_chat函数即可使用LLM的能力
    -
    -class XXWorker(ApiModelWorker):
    -    def __init__(
    -            self,
    -            *,
    -            controller_addr: str = None,
    -            worker_addr: str = None,
    -            model_names: List[str] = ["gpt-3.5-turbo"],
    -            version: str = "gpt-3.5",
    -            **kwargs,
    -    ):
    -        kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
    -        kwargs.setdefault("context_len", 16384) #TODO 16K模型需要改成16384
    -        super().__init__(**kwargs)
    -        self.version = version
    -
    -    def do_chat(self, params: ApiChatParams) -> Dict:
    -        '''
    -        执行Chat的方法,默认使用模块里面的chat函数。
    -        :params.messages : [
    -            {"role": "user", "content": "hello"}, 
    -            {"role": "assistant", "content": "hello"}
    -            ]
    -        :params.xx: 详情见 ApiChatParams 
    -        要求返回形式:{"error_code": int, "text": str}
    -        '''
    -        return {"error_code": 500, "text": f"{self.model_names[0]}未实现chat功能"}
    -
    -
    -# 最后在 ~/examples/model_workers/__init__.py 中完成注册
    -# from .xx import XXWorker
    -
    -# 2、通过已有模型接入类完成接入
    -# 或者直接使用已有的相关大模型类进行使用(缺少相关账号测试,欢迎大家测试后提PR)
    -
    # model_config.py#ONLINE_LLM_MODEL 配置修改
    -# 填写专属模型的 version、api_base_url、api_key、provider(与上述类名一致)
    -ONLINE_LLM_MODEL = {
    -    # 线上模型。请在server_config中为每个在线API设置不同的端口
    -
    -    "openai-api": {
    -        "model_name": "gpt-3.5-turbo",
    -        "api_base_url": "https://api.openai.com/v1",
    -        "api_key": "",
    -        "openai_proxy": "",
    -    },
    -    "example": {
    -        "version": "gpt-3.5",  # 采用openai接口做示例
    -        "api_base_url": "https://api.openai.com/v1",
    -        "api_key": "",
    -        "provider": "ExampleWorker",
    -    },
    -}
    -

    启动大模型服务

    -
    # start llm-service(可选)  单独启动大模型服务
    -python examples/llm_api.py
    -
    # 启动测试
    -import openai
    -# openai.api_key = "EMPTY" # Not support yet
    -openai.api_base = "http://127.0.0.1:8888/v1"
    -
    -# 选择你启动的模型
    -model = "example"
    -
    -# create a chat completion
    -completion = openai.ChatCompletion.create(
    -    model=model,
    -    messages=[{"role": "user", "content": "Hello! What is your name? "}],
    -    max_tokens=100,
    -)
    -# print the completion
    -print(completion.choices[0].message.content)
    -
    -# 正确输出后则确认LLM可正常接入
    -

    or

    -
    # model_config.py#USE_FASTCHAT 判断是否进行fastchat接入本地模型
    -USE_FASTCHAT = "gpt" not in LLM_MODEL
    -python start.py #221 自动执行 python llm_api.py
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/docs/\350\207\264\350\260\242/index.html" "b/docs/zh/docs/\350\207\264\350\260\242/index.html" deleted file mode 100644 index 3506ef6..0000000 --- "a/docs/zh/docs/\350\207\264\350\260\242/index.html" +++ /dev/null @@ -1,445 +0,0 @@ - - - - - - - - -致谢 · CodeFuse - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    致谢

    -
    -
    - - -

    CodeFuse-ai 主页基于docura构建,在此深深感谢他们的开源贡献!

    -

    ChatBot 项目基于langchain-chatchatcodebox-api,在此深深感谢他们的开源贡献!

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/index.html b/docs/zh/index.html deleted file mode 100644 index aa333dd..0000000 --- a/docs/zh/index.html +++ /dev/null @@ -1,392 +0,0 @@ - - - - - - - - - -CodeFuse-AI · CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs), 涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    -
    -

    CodeFuse-AI

    -

    CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs), 涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。

    - - - - - -
    - -
    -
    -

    CodeFuse-Query

    -

    Query-Based Code Analysis Engine

    - -
    - - - - -
    - - CodeFuse-Query - Star - Fork -
    -
    - -
    - -
    -
    -

    MFTCoder

    -

    High Accuracy and efficiency multi-task fine-tuning framework for Code LLMs

    - -
    - - - - -
    - - MFTCoder - Star - Fork -
    -
    - -
    - - - - -
    - - CodeFuse-MFT-VLM - Star - Fork -
    -
    - -
    - -
    -
    -

    Test-Agent

    -

    Agent that empowers software testing with LLMs; industrial-first in China

    - -
    - - - - -
    - - Test-Agent - Star - Fork -
    -
    - -
    - -
    -
    -

    CodeFuse-ModelCache

    -

    A LLM semantic caching system aiming to reducing response time via cached query-result pairs.

    - -
    - - - - -
    - - CodeFuse-ModelCache - Star - Fork -
    -
    - -
    - -
    -
    -

    DevOps-Series

    -

    An intelligent assistant serving the entire software development lifecycle.

    - -
    - - - - -
    - - codefuse-chatbot - Star - Fork -
    -
    - -
    - - - - -
    - - codefuse-devops-eval - Star - Fork -
    -
    - -
    - - - - -
    - - CodeFuse-DevOps-Model - Star - Fork -
    -
    - -
    - -
    -
    -

    Codefuse-evaluation

    -

    Industrial-level evaluation benchmarks for Coding LLMs in the full life-cycle of AI native software developing

    - -
    - - - - -
    - - codefuse-evaluation - Star - Fork -
    -
    - -
    - -
    - - - - - - - -
    -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/index.xml b/docs/zh/index.xml deleted file mode 100644 index 6a66611..0000000 --- a/docs/zh/index.xml +++ /dev/null @@ -1,536 +0,0 @@ - - - - CodeFuse-AI - /zh/ - Recent content on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - - /zh/docs/codefuse-query/1_abstract/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-query/1_abstract/ - 引言 随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。 这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。 在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。 CodeQuery 的特点和优势可以概括为以下几点: 高度可扩展:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。 以数据为中心:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。 高度集成:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。 支持多元化的需求:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。 CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。 - - - - /zh/docs/devops_eval/tool_learning_evalution/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/devops_eval/tool_learning_evalution/ - tool learning 数据集评测教程 chatml接入方式 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: 编写 ~/evals/FuncCallEvalution 的 create_prompts 函数 编写 ~/models/base_model 的 相关函数 注册模型和评估函数 执行测试脚本 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 1. 编写 loader 函数 如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 src.context_builder.context_builder_family.py 中继承 ModelAndTokenizerLoader 类来覆写对应的 load_model 和 load_tokenizer 函数,具体可以参照以下示例: class FuncCallEvalution(ToolEvalution): def create_prompts(self, func_call_datas): &#39;&#39;&#39; datas: [ { &#34;instruction&#34;: history[his_idx], &#34;input&#34;: &#34;&#34;, &#34;output&#34;: output, &#34;history&#34;: [(human_content, ai_content), (), ()], &#34;functions&#34;: tools } ] &#39;&#39;&#39; system_content = &#39;&#39;&#39;CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。 你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。&#39;&#39;&#39; function_format = &#39;&#39;&#39;You are ToolGPT, you have access to the following APIs:\n{tools}&#39;&#39;&#39; func_call_train_datas = [] history_error_cnt = 0 funccall_error_cnt = 0 for data in func_call_datas: tools = data[&#34;functions&#34;] chatrounds = data[&#34;chatrounds&#34;] function_content = &#34;&#34; if len(tools) &gt; 0: function_content = function_format. - - - - /zh/docs/devops_eval/tool_learning_info_zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/devops_eval/tool_learning_info_zh/ - 数据样例 在数据上我们完全兼容了 OpenAI Function Calling,具体格式如下: Function Call的数据格式 Input Key Input Type Input Description functions List[Swagger] 工具集合 chatrounds List[chatround] 多轮对话数据 chatrounds的数据格式 Input Key Input Type Input Description role string 角色名称,包含三种类别,user、assistant、function name string 若role为function,则存在name字段,为function的名称 content string role的返回内容 function_call dict 工具调用 { &#34;functions&#34;: [ { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;description&#34;: &#34;查询复旦大学往年分数线,例如:查询2020年复旦大学的分数线&#34;, &#34;parameters&#34;: { &#34;type&#34;: &#34;object&#34;, &#34;properties&#34;: { &#34;year&#34;: { &#34;type&#34;: &#34;string&#34;, &#34;description&#34;: &#34;年份,例如:2020,2019,2018&#34; } }, &#34;required&#34;: [ &#34;year&#34; ] } } ], &#34;chatrounds&#34;: [ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;CodeFuse是一个面向研发领域的智能助手,旨在中立的、无害的帮助用户解决开发相关的问题,所有的回答均使用Markdown格式返回。\n你能利用许多工具和功能来完成给定的任务,在每一步中,你需要分析当前状态,并通过执行函数调用来确定下一步的行动方向。你可以进行多次尝试。如果你计划连续尝试不同的条件,请每次尝试一种条件。若给定了Finish函数,则以Finish调用结束,若没提供Finish函数,则以不带function_call的对话结束。&#34; }, { &#34;role&#34;: &#34;user&#34;, &#34;content&#34;: &#34;查询2020年复旦大学的分数线&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: null, &#34;function_call&#34;: { &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;arguments&#34;: &#34;{\n \&#34;year\&#34;: \&#34;2020\&#34;\n}&#34; } }, { &#34;role&#34;: &#34;function&#34;, &#34;name&#34;: &#34;get_fudan_university_scoreline&#34;, &#34;content&#34;: &#34;{\n \&#34;scoreline\&#34;:{\n \&#34;文科一批\&#34;: 630, \n \&#34;文科二批\&#34;: 610, \n \&#34;理科一批\&#34;: 650, \n \&#34;理科二批\&#34;: 630 \n }\n}&#34; }, { &#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: &#34;2020年复旦大学的分数线如下:\n\n- 文科一批:630分\n- 文科二批:610分\n- 理科一批:650分\n- 理科二批:630分&#34; } ] } 上述Function Call的数据样例为给定特定工具集后,用于回答用户查询某高校录取分数线的问题。 - - - - /zh/docs/devops_eval/tutorial_zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/devops_eval/tutorial_zh/ - 数据集评测教程 🚀 如何进行测试 如果需要在自己的 huggingface 格式的模型上进行测试的话,总的步骤分为如下几步: 编写 Model 的 loader 函数 编写 Model 的 context_builder 函数 注册模型到配置文件中 执行测试脚本 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 1. 编写 loader 函数 如果模型在加载进来还需要做一些额外的处理(e.g. tokenizer 调整),需要去 src.context_builder.context_builder_family.py 中继承 ModelAndTokenizerLoader 类来覆写对应的 load_model 和 load_tokenizer 函数,具体可以参照以下示例: class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass def load_model(self, model_path: str): model = super().load_model(model_path) model.generation_config = GenerationConfig.from_pretrained(model_path) return model def load_tokenizer(self, model_path: str): tokenizer = super().load_tokenizer(model_path) # read generation config with open(model_path + &#39;/generation_config. - - - Agent 编排 - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - 核心Connector介绍 为了便于大家理解整个 CoAgent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 下面,我们先介绍相关的核心组件 Agent 在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用 BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 =&gt; 输出 ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 ReactAgent:提供标准React的功能,根据问题实现当前任务 SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. 输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理 Chain 基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理 Phase 基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理 Prompt Manager Mutli-Agent链路中每一个agent的prompt创建 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt Memory Manager 主要用于 chat history 的管理,暂未完成 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - Agent 编排 - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - 核心Connector介绍 为了便于大家理解整个 muagent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 下面,我们先介绍相关的核心组件 Agent 在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用 BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 =&gt; 输出 ReactAgent:提供标准React的功能,根据问题实现当前任务 ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. 输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理 Chain 基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理 Phase 基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理 Prompt Manager Mutli-Agent链路中每一个agent的prompt创建 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt Memory Manager 主要用于 chat history 的管理 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - ChatBot 技术路线 - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - 中文&nbsp | &nbspEnglish&nbsp RoadMap 完整路线 Sandbox 环境 ✅ 环境隔离的sandbox环境与代码执行 ✅ 上传、下载文件 ✅ 支持java执行环境 Vector Database &amp; Retrieval task retrieval ✅ tool retrieval ✅ Prompt Management ✅ memory Management ✅ Multi Agent ✅ PRD需求文档、系分、接口设计 ⬜ 根据需求文档、系分、接口设计生产代码 ⬜ 自动测试、自动debugger ⬜ 运维流程接入(ToolLearning)⬜ 全流程自动 ⬜ 基于fastchat接入LLM ✅ 基于sentencebert接入Text Embedding ✅ 向量加载速度提升 ✅ Connector ✅ 基于langchain的react模式 ✅ 基于langchain完成tool检索 ✅ Web Crawl 通用能力 ✅ 技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ issue document ⬜ SDK Library Document ⬜ v0.0 Sandbox 环境 ✅ 环境隔离的sandbox环境与代码执行 ✅ 基于fastchat接入LLM ✅ 基于sentencebert接入Text Embedding ✅ Web Crawl 通用能力:技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等 ✅ v0. - - - CoAgent 概览 - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - 简介 为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。 经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。 以下模块将从5个方面介绍Multi Agent框架所需要素: Agent Communication在Multi Agent框架中,确保Agent可以有效地进行信息交流对于管理上下文以及提高问答效率至关重要。 a. 遵循简洁直观易于理解的链式对话原则,将Agent以线性方式排列串连成一个执行链路。 b. 借鉴metaGPT中的Message Pool框架,允许Agent对Message Pool进行推送和订阅,使链路更加灵活。有利于精细化Prompt工程的场景,但难以把握复杂链路的关系分析。 Standard Operation Process(SOP):对LLM的生成结果进行标准化解析和处理。 a. 定义Agent的 Input 和 Output 范围,能够组装和解析相关Action和Status,保证框架运行的稳定性 b. 封装多种基础Action执行模块,如Tool Using、Planning、Coding、Direct Answering、final answer等SOP标识,以满足Agent的基本工作需求。 Plan and Executor:增加LLM的Tool使用、Agent调度、代码的生成。设置了几种基本链路,例如: a. 单轮问答,也可以扩展到CoT、ToT、GoT等形式。 b. ReAct,基础的响应决策过程,模型设置SOP 状态以终止循环 c. TaskPlaning - Executor,任务完成即可结束 Long-short term memory Management:Multi-Agent与单Agent的关键区别在于,Multi-Agent需要处理大量的交流信息,类似人类团队协作的过程。增加一个专门负责内容总结(类似于会议助理)的Agent,对长期记忆进行总结并提更有效信息传递给下一位Agent,而非传递所有内容给下一位Agent。 Human-agent interaction:面对复杂场景时,需要人类介入Agent交互过程并提供反馈。通过上述 Long-short term memory Management 和 Agent Communication 过程,使LLM能准确理解人类的意图,从而更有效地完成任务。 总的来说,这五个要素共同构建了一个Multi Agent框架,确保Agent之间的协作更加紧密和高效,同时也能够适应更复杂的任务需求和更多样的交互场景。通过组合多个Agent链路来实现一个完整且复杂的项目上线场景(Dev Phase),如Demand Chain(CEO)、Product Arguement Chain(CPO、CFO、CTO)、Engineer Group Chain(Selector、Developer1~N)、QA Engineer Chain(Developer、Tester)、Deploy Chain(Developer、Deploer)。 - - - CodeFuse-ChatBot Development by Private Knowledge Augmentation - /zh/docs/codefuse-chatbot-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-chatbot-zh/ - 中文&nbsp | &nbspEnglish&nbsp DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。 📜 目录 🤝 介绍 🎥 演示视频 🧭 技术路线 🤝 介绍 💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 本项目核心差异技术、功能点: 🧠 智能调度核心: 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 使用说明 💻 代码整库分析: 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。 📄 文档分析增强: 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。 🔧 垂类专属知识: 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 🤖 垂类模型兼容: 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。 🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。接入Demo 👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。 🎥 演示视频 为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。 知识库导入和问答:演示视频 本地代码库导入和问答:演示视频 🧭 技术路线 🧠 Multi-Agent Schedule Core: 多智能体调度核心,简易配置即可打造交互式智能体。 🕷️ Multi Source Web Crawl: 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。 🗂️ Data Processor: 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。 🔤 Text Embedding &amp; Index::文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。 🗄️ Vector Database &amp; Graph Database: 向量与图数据库,提供灵活强大的数据管理解决方案。 📝 Prompt Control &amp; Management::Prompt 控制与管理,精确定义智能体的上下文环境。 🚧 SandBox::沙盒环境,安全地执行代码编译和动作。 💬 LLM::智能体大脑,支持多种开源模型和 LLM 接口。 🛠️ API Management:: API 管理工具,实现对开源组件和运维平台的快速集成。 具体实现明细见:技术路线明细 - - - CodeFuse-ChatBot Development by Private Knowledge Augmentation - /zh/docs/overview/codefuse-chatbot-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-chatbot-zh/ - 中文&nbsp | &nbspEnglish&nbsp DevOps-ChatBot是由蚂蚁CodeFuse团队开发的开源AI智能助手,致力于简化和优化软件开发生命周期中的各个环节。该项目结合了Multi-Agent的协同调度机制,并集成了丰富的工具库、代码库、知识库和沙盒环境,使得LLM模型能够在DevOps领域内有效执行和处理复杂任务。 📜 目录 🤝 介绍 🎥 演示视频 🧭 技术路线 🤝 介绍 💡 本项目旨在通过检索增强生成(Retrieval Augmented Generation,RAG)、工具学习(Tool Learning)和沙盒环境来构建软件开发全生命周期的AI智能助手,涵盖设计、编码、测试、部署和运维等阶段。 逐渐从各处资料查询、独立分散平台操作的传统开发运维模式转变到大模型问答的智能化开发运维模式,改变人们的开发运维习惯。 本项目核心差异技术、功能点: 🧠 智能调度核心: 构建了体系链路完善的调度核心,支持多模式一键配置,简化操作流程。 使用说明 💻 代码整库分析: 实现了仓库级的代码深入理解,以及项目文件级的代码编写与生成,提升了开发效率。 📄 文档分析增强: 融合了文档知识库与知识图谱,通过检索和推理增强,为文档分析提供了更深层次的支持。 🔧 垂类专属知识: 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。 🤖 垂类模型兼容: 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。 🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。接入Demo 👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。 🎥 演示视频 为了帮助您更直观地了解 Codefuse-ChatBot 的功能和使用方法,我们录制了一系列演示视频。您可以通过观看这些视频,快速了解本项目的主要特性和操作流程。 知识库导入和问答:演示视频 本地代码库导入和问答:演示视频 🧭 技术路线 🧠 Multi-Agent Schedule Core: 多智能体调度核心,简易配置即可打造交互式智能体。 🕷️ Multi Source Web Crawl: 多源网络爬虫,提供对指定 URL 的爬取功能,以搜集所需信息。 🗂️ Data Processor: 数据处理器,轻松完成文档载入、数据清洗,及文本切分,整合不同来源的数据。 🔤 Text Embedding &amp; Index::文本嵌入索引,用户可以轻松上传文件进行文档检索,优化文档分析过程。 🗄️ Vector Database &amp; Graph Database: 向量与图数据库,提供灵活强大的数据管理解决方案。 📝 Prompt Control &amp; Management::Prompt 控制与管理,精确定义智能体的上下文环境。 🚧 SandBox::沙盒环境,安全地执行代码编译和动作。 💬 LLM::智能体大脑,支持多种开源模型和 LLM 接口。 🛠️ API Management:: API 管理工具,实现对开源组件和运维平台的快速集成。 具体实现明细见:技术路线明细 - - - CodeFuse-DevOps - /zh/docs/codefuse-devops/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops/ - CodeFuse-DevOps CodeFuse-DevOps - - - CodeFuse-DevOps-Eval - /zh/docs/codefuse-devops-eval-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops-eval-zh/ - codefuse-devops-eval codefuse-devops-eval - - - CodeFuse-DevOps-Eval - /zh/docs/overview/codefuse-devops-eval-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-devops-eval-zh/ - DevOps-Eval是一个专门为DevOps领域大模型设计的综合评估数据集。我们希望DevOps-Eval能够帮助开发者,尤其是DevOps领域的开发者,追踪进展并分析他们拥有的DevOps大模型的优势和不足之处。 📚 该仓库包含与DevOps和AIOps相关的问题和练习, 还添加了关于ToolLearning相关的样本。 💥 目前有 7486 个多项选择题,根据DevOps的通用流程将其归纳未8个模块,如下图所示。 🔥 AIOps样本总计 2840 个,覆盖的场景包括日志解析、时序异常检测、时序分类、时序预测和根因分析。 🔧 ToolLearning样本 1509 个,涵盖59个领域,总计 239 种工具类别。 🏆 排行榜 以下是我们获得的初版评测结果,包括多个开源模型的zero-shot和five-shot准确率。我们注意到,对于大多数指令模型来说,five-shot的准确率要优于zero-shot。 👀 DevOps Zero Shot 模型 plan code build test release deploy operate monitor 平均分 DevOpsPal-14B-Chat 60.61 78.35 84.86 84.65 87.26 82.75 69.89 79.17 78.23 DevOpsPal-14B-Base 54.55 77.82 83.49 85.96 86.32 81.96 71.18 82.41 78.23 Qwen-14B-Chat 60.61 75.4 85.32 84.21 89.62 82.75 69.57 80.56 77.18 Qwen-14B-Base 57.58 73.81 84.4 85. - - - CodeFuse-DevOps-Model - /zh/docs/codefuse-devops-model-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops-model-zh/ - codeFuse-devops-model codeFuse-devops-model - - - CodeFuse-DevOps-Model - /zh/docs/overview/codefuse-devops-model-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-devops-model-zh/ - codeFuse-devops-model DevOps-Model 是蚂蚁集团联合北京大学发布面向中文 DevOps 领域的大语言模型,通过收集 DevOps 领域相关的专业数据,再针对模型进行语言模型的加训和对齐训练,产出可以帮助工程师在整个开发运维生命周期提效的大模型。弥补当前大模型在 DevOps 领域的缺失,旨在做到有问题,问 DevOps-Model ! 当前我们已经开源了 7B 和 14B 两种规格的经过加训得 Base 模型和经过对齐后的 Chat 模型,同时还开源了对应的训练代码,欢迎大家一起合作建设! 项目地址 Github 地址:https://github.com/codefuse-ai/CodeFuse-DevOps-Model/tree/main ModelScope 地址: DevOps-Model-7B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Base/summary DevOps-Model-7B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-7B-Chat/summary DevOps-Model-14B-Base:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Base/summary DevOps-Model-14B-Chat:https://modelscope.cn/models/codefuse-ai/CodeFuse-DevOps-Model-14B-Chat/summary 评测考题 针对模型评测,最初并没有这样的一个 benchmark 用来 DevOps 领域进行测试,所以我们首先选用了一些通用开源测试中和 DevOps 领域相关的选择题进行测试,具体测试数据如下: 数据集 考试科目 题目总数 CMMLU Computer science 204 Computer security 171 Machine learning 122 CEval college programming 37 CEval computer_architecture 21 CEval computer_network 19 总计 总计题目数 574 评测方式 由于都是单选题,我们采用的是选取模型产出的第一个 Token 中四个选项 Token 中得分最高的作为模型对于问题的回答。同时我们还测试了 Zero-shot 和 Five-shot 的结果。 - - - CodeFuse-MFT-VLM - /zh/docs/overview/codefuse-mft-vlm/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-mft-vlm/ - CodeFuse-VLM CodeFuse-VLM 是一个多模态大语言模型框架,该框架为用户提供多种视觉编码器,模态对齐模块和大语言模型的选择,以适配用户对不同任务的需求。 随着huggingface开源社区的不断更新,会有更多的vision encoder 和 LLM 底座发布,这些vision encoder 和 LLM底座都有各自的强项,例如 code-llama 适合生成代码类任务,但是不适合生成中文类的任务;因此我们搭建了CodeFuse-VLM 框架,支持多种视觉模型和语言大模型,使得CodeFuse-VLM可以适应不同种类的任务。 我们在CodeFuse-VLM 框架下, 使用Qwen-VL的视觉编码器, cross attention模态对齐模块, 和 Qwen-14B 模型训练了 CodeFuse-VLM-14B CodeFuse-VLM-14B 在多个benchmarks 上的性能超过了Qwen-VL和LLAVA-1.5 各个模型得分如下表所示: 模型 MMBench MMBench-CN VqaV2 GQA TextVQA Vizwiz LLAVA-1.5 67.7 63.6 80.0 63.3 61.3 53.6 Qwen-VL 60.6 56.7 78.2 57.5 63.8 38.9 CodeFuse-VLM-14B 75.7 69.8 79.3 59.4 63.9 45.3 我们的模型在MMBenchmark 多模态大模型榜单上取得了很高的排名: https://mmbench.opencompass.org.cn/leaderboard 这是我们模型的展示视频 https://private-user-images.githubusercontent.com/22836551/300386230-8e64f615-ac0e-447e-9695-c96b254d484f.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MDY1MjExODksIm5iZiI6MTcwNjUyMDg4OSwicGF0aCI6Ii8yMjgzNjU1MS8zMDAzODYyMzAtOGU2NGY2MTUtYWMwZS00NDdlLTk2OTUtYzk2YjI1NGQ0ODRmLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAxMjklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwMTI5VDA5MzQ0OVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQ5NzNjM2U1ZWU4NDU0Yzc5NmE4ZTM1NzY2ZjU4YjRjY2ZhNjMzODk0ZDgzMDg4N2FjYjZhYTllM2E3NTAyMWQmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.pr-ad7rKYBgk26DTItj2q2q9I5dRWnBNHbV9M7GSVCo - - - CodeFuse-ModelCache - /zh/docs/codefuse-modelcache-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-modelcache-zh/ - CodeFuse-ModelCache CodeFuse-ModelCache - - - CodeFuse-ModelCache - /zh/docs/overview/codefuse-modelcache-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-modelcache-zh/ - 中文 | English Contents 新闻 项目简介 架构大图 致谢 Contributing 新闻 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 [2023.10.31] codefuse-ModelCache&hellip; 项目简介 Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。 架构大图 致谢 本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。 GPTCache Contributing ModelCache是一个非常有趣且有用的项目,我们相信这个项目有很大的潜力,无论你是经验丰富的开发者,还是刚刚入门的新手,都欢迎你为这个项目做出一些贡献,包括但不限于:提交问题和建议,参与代码编写,完善文档和示例。你的参与将会使这个项目变得更好,同时也会为开源社区做出贡献。 - - - CodeFuse-Query - /zh/docs/codefuse-query-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-query-zh/ - CodeFuse-Query CodeFuse-Query - - - CodeFuse-Query - /zh/docs/overview/codefuse-query-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/codefuse-query-zh/ - CodeFuse-Query 随着大规模软件开发的普及,对可扩展且易于适应的静态代码分析技术的需求正在加大。传统的静态分析工具,如 Clang Static Analyzer (CSA) 或 PMD,在检查编程规则或样式问题方面已经展现出了良好的效果。然而,这些工具通常是为了满足特定的目标而设计的,往往无法满足现代软件开发环境中多变和多元化的需求。这些需求可以涉及服务质量 (QoS)、各种编程语言、不同的算法需求,以及各种性能需求。例如,安全团队可能需要复杂的算法,如上下文敏感的污点分析,来审查较小的代码库,而项目经理可能需要一种相对较轻的算法,例如计算圈复杂度的算法,以在较大的代码库上测量开发人员的生产力。 这些多元化的需求,加上大型组织中常见的计算资源限制,构成了一项重大的挑战。由于传统工具采用的是问题特定的计算方式,往往无法在这种环境中实现扩展。因此,我们推出了 CodeQuery,这是一个专为大规模静态分析设计的集中式数据平台。 在 CodeQuery 的实现中,我们把源代码和分析结果看作数据,把执行过程看作大数据处理,这与传统的以工具为中心的方法有着显著的不同。我们利用大型组织中的常见系统,如数据仓库、MaxCompute 和 Hive 等数据计算设施、OSS 对象存储和 Kubernetes 等灵活计算资源,让 CodeQuery 能够无缝地融入这些系统中。这种方法使 CodeQuery 高度可维护和可扩展,能够支持多元化的需求,并有效应对不断变化的需求。此外,CodeQuery 的开放架构鼓励各种内部系统之间的互操作性,实现了无缝的交互和数据交换。这种集成和交互能力不仅提高了组织内部的自动化程度,也提高了效率,降低了手动错误的可能性。通过打破信息孤岛,推动更互联、更自动化的环境,CodeQuery 显著提高了软件开发过程的整体生产力和效率。 此外,CodeQuery 的以数据为中心的方法在处理静态源代码分析的领域特定挑战时具有独特的优势。例如,源代码通常是一个高度结构化和互联的数据集,与其他代码和配置文件有强烈的信息和连接。将代码视为数据,CodeQuery 可以巧妙地处理这些问题,这使得它特别适合在大型组织中使用,其中代码库持续但逐步地进行演变,大部分代码在每天进行微小的改动同时保持稳定。 CodeQuery 还支持如基于代码数据的商业智能 (BI) 这类用例,能生成报告和仪表板,协助监控和决策过程。此外,CodeQuery 在分析大型语言模型 (LLM) 的训练数据方面发挥了重要作用,提供了增强这些模型整体效果的深入见解。 在当前的静态分析领域,CodeQuery 带来了一种新的范式。它不仅满足了大规模、复杂的代码库分析需求,还能适应不断变化和多元化的静态分析场景。CodeQuery 的以数据为中心的方法,使得其在处理大数据环境中的代码分析问题时具有独特优势。CodeQuery 的设计,旨在解决大规模软件开发环境中的静态分析问题。它能够将源代码和分析结果视作数据,使得其可以灵活地融入大型组织的各种系统中。这种方法不仅可以有效地处理大规模的代码库,还可以应对各种复杂的分析需求,从而使得静态分析工作变得更加高效和准确。 CodeQuery 的特点和优势可以概括为以下几点: 高度可扩展:CodeQuery 可以处理大规模的代码库,且能够适应不同的分析需求。这种高度的可扩展性使得 CodeQuery 可以在大型组织中发挥重要作用。 以数据为中心:CodeQuery 将源代码和分析结果视作数据,这种以数据为中心的方法使其在处理大数据环境中的代码分析问题时具有独特优势。 高度集成:CodeQuery 能够无缝地融入大型组织的各种系统中,包括数据仓库、数据计算设施、对象存储和灵活计算资源等。这种高度的集成性使得 CodeQuery 在大型组织中的使用变得更加方便和高效。 支持多元化的需求:CodeQuery 不仅可以处理大规模的代码库,还可以应对各种复杂的分析需求,包括服务质量分析需求、跨编程语言分析需求、算法需求和性能需求等。 CodeQuery 是一种强大的静态代码分析平台,适合大规模、复杂的代码库分析场景。它的以数据为中心的方法和高度的可扩展性使得它在现代软件开发环境中具有独特的优势。未来,随着静态代码分析技术的不断发展,CodeQuery 有望在这个领域中扮演更加重要的角色。 - - - CodeFuse-Query 介绍 - /zh/docs/codefuse-query-introduction-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-query-introduction-zh/ - 概述 CodeFuse-Query 是一个支持对 各种编程语言 进行 结构化分析 的 代码数据平台。核心思想是利用各种语言解析器将所有代码转化为数据,并将其结构化存储到代码数据库中。通过使用自定义查询语言,按照业务需求进行数据分析。如下图所示: 2.1 CodeFuse-Query的架构 从整体上来说,CodeFuse-Query代码数据平台分为三大部分:代码数据模型、代码查询DSL、平台产品化服务。主要工作流程如下图所示: 代码数据化和标准化:COREF 我们定义了一种代码数据化和标准化的模型:COREF,要求所有代码都要能通过各种语言抽取器转化到该模型。 COREF主要包含以下几种信息: COREF = AST (抽象语法树) + ASG(抽象语义图) + CFG(控制流图) + PDG(程序依赖图)+ Call Graph(函数调用图) + Class Hierarchy (类继承关系)+ Documentation(文档/注释信息) 注:由于每种信息的计算难度不一,所以并不是所有语言的COREF信息均包含以上全部信息,基础信息主要有AST、ASG、Call Graph、Class Hierarchy和Documentation,其他信息( CFG 和 PDG )仍在建设中,后续会逐步支持。 代码查询DSL 基于生成的COREF代码数据,CodeFuse-Query 使用一种自定义的DSL语言 Gödel 来进行查询,从而完成代码分析需求。 Gödel是一种逻辑推理语言,它的底层实现是基于逻辑推理语言Datalog,通过描述“事实”和“规则”, 程序可以不断地推导出新的事实。Gödel也是一个声明式语言,相较于命令式编程,声明式编程更加着重描述“要什么”,而把如何实现交给计算引擎。 既然代码已经转化为关系型数据(COREF数据以关系型数据表的形式存储),相信大家会有疑问,为什么不直接用SQL,或者是直接使用SDK,而是又要专门去学习一个新的DSL语言呢?因为Datalog的计算具备单调性和终止性,简单理解就是,Datalog是在牺牲了表达能力的前提下获得了更高的性能,而Gödel继承了这个特点。 相比较SDK,Gödel的主要优点是易学易用,声明式的描述,用户不需要关注中间的运算过程,只需要像SQL一样简单描述清楚需求即可。 相比较SQL,Gödel的优点主要是描述能力更强、计算速度更快,例如描述递归算法和多表联合查询,而这些对于SQL来说都是比较困难的。 平台化、产品化 CodeFuse-Query 包括Sparrow CLI 和CodeFuse-Query在线服务Query中心。Sparrow CLI包含了所有组件和依赖,例如抽取器,数据模型,编译器等,用户完全可以通过使用Sparrow CLI在本地进行代码数据生成和查询(Sparrow CLI的使用方式请见 第3节 安装、配置、运行)。如果用户有在线查询的需求,可以使用Query中心进行实验。 2.2 CodeFuse-Query支持的分析语言 截至2023-10-31为止,CodeFuse-Query支持对11种编程语言进行数据分析。其中对5种编程语言( Java、JavaScript、TypeScript、XML、Go )的支持度非常成熟,对剩余6种编程语言(Object-C、C++、Python3、Swift、SQL、Properties )的支持度处于beta阶段,还有进一步提升和完善的空间,具体的支持情况见下表: 语言 状态 COREF模型节点数 Java 成熟 162 XML 成熟 12 TS/JS 成熟 392 Go 成熟 40 OC/C++ beta 53/397 Python3 beta 93 Swift beta 248 SQL beta 750 Properties beta 9 注:以上语言状态的成熟程度判断标准是根据COREF包含的信息种类和实际落地情况来进行判定,除了OC/C++外,所有语言均支持了完整的AST信息和Documentation信息,以Java为例,COREF for Java还支持了ASG、Call Graph、Class Hierarchy、以及部分CFG信息。 - - - CodeFuseEval: 代码大语言模型的多任务评估基准 - /zh/docs/overview/b10.codefuse-evalution/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/b10.codefuse-evalution/ - English| CodeFuseEval on ModelScope| CodeFuseEval on Hugging Face CodeFuseEval在HumanEval-x、MBPP的基准上,结合CodeFuse大模型多任务场景,开发的编程领域多任务的评测基准, 可用于评估模型在代码补全,自然语言生成代码,测试用例生成、跨语言代码翻译,中文指令生成代码等多类任务的性能。持续开放中,敬请期待! - - - Connector Agent - /zh/coagent/connector-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-agent-zh/ - 快速构建一个Agent 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.agents import BaseAgent from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选一个role来做示例 # 从已有的配置中选择一个config,具体参数细节见下面 role_configs = load_role_configs(AGETN_CONFIGS) agent_config = role_configs[&#34;general_planner&#34;] # 生成agent实例 base_agent = BaseAgent( role=agent_config. - - - Connector Agent - /zh/muagent/connector-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-agent-zh/ - 快速构建一个Agent 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 配置相关 LLM 和 Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent.connector.chains import BaseChain from muagent.connector.schema import Role, Message, ChainConfig from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) Agent 配置 定义两个react agent,进行实际任务执行 # 这里采用了预定义的prompt,也可以参考上述prompt完成编写 from muagent. - - - Connector Chain - /zh/coagent/connector-chain-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-chain-zh/ - 快速构建一个 agent chain 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) # 设置openai的api-key import os, sys import openai import importlib os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxxx&#34; openai.api_key = &#34;sk-xxxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的agent配置选多个role组合成 agent chain from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent. - - - Connector Chain - /zh/muagent/connector-chain-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-chain-zh/ - 快速构建一个Agent Chain 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 配置相关 LLM 和 Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent.connector.chains import BaseChain from muagent.connector.schema import Role, Message, ChainConfig from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0. - - - Connector Memory - /zh/coagent/connector-memory-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-memory-zh/ - Memory Manager 主要用于 chat history 的管理,暂未完成 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 使用示例 创建 memory manager 实例 import os import openai from coagent.base_configs.env_config import KB_ROOT_PATH from coagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.schema import Message os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os. - - - Connector Memory - /zh/muagent/connector-memory-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-memory-zh/ - Memory Manager 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 使用示例 完整示例见 ~/tests/connector/memory_manager_test.py 创建 memory manager 实例 import os import openai from muagent.base_configs.env_config import KB_ROOT_PATH from muagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.connector.schema import Message # OPENAI_API_BASE = &#34;https://api.openai.com/v1&#34; os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; openai.api_key = &#34;sk-xxx&#34; os.environ[&#34;model_name&#34;] = &#34;gpt-3.5-turbo&#34; # os. - - - Connector Phase - /zh/coagent/connector-phase-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-phase-zh/ - 快速构建一个 agent phase 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) from coagent.base_configs.env_config import JUPYTER_WORK_PATH, KB_ROOT_PATH from coagent.llm_models.llm_config import EmbedConfig, LLMConfig from coagent.connector.configs import AGETN_CONFIGS from coagent.connector.phase import BasePhase from coagent.connector.schema import Message, load_role_configs os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 配置相关 LLM 和 Embedding Model # LLM 和 Embedding Model 配置 llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 这里从已有的 phase 配置中选一个 phase 来做示例 # log-level,print prompt和llm predict os. - - - Connector Phase - /zh/muagent/connector-phase-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-phase-zh/ - 快速构建一个Agent Phase 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 配置相关 LLM 和 Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent.connector.chains import BaseChain from muagent.connector.schema import Role, Message, ChainConfig from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0. - - - Connector Prompt - /zh/coagent/connector-prompt-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/connector-prompt-zh/ - Prompt 的标准结构 在整个Prompt的整个结构中,我们需要去定义三个部分 Agent Profil Input Format Response Output Format #### Agent Profile Agent Description ... #### Input Format **Origin Query:** the initial question or objective that the user wanted to achieve **Context:** the current status and history of the tasks to determine if Origin Query has been achieved. #### Response Output Format **Action Status:** finished or continued If it&#39;s &#39;finished&#39;, the context can answer the origin query. If it&#39;s &#39;continued&#39;, the context cant answer the origin query. - - - Connector Prompt - /zh/muagent/connector-prompt-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-prompt-zh/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Prompt 的标准结构 在整个Prompt的整个结构中,我们需要去定义三个部分 Agent Profil Input Format Response Output Format #### Agent Profile Agent Description ... #### Input Format **Origin Query:** the initial question or objective that the user wanted to achieve **Context:** the current status and history of the tasks to determine if Origin Query has been achieved. #### Response Output Format **Action Status:** finished or continued If it&#39;s &#39;finished&#39;, the context can answer the origin query. - - - Customed Examples - /zh/coagent/customed-examples-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/customed-examples-zh/ - 如何创建你个性化的 agent phase 场景 下面通过 autogen 的 auto_feedback_from_code_execution 构建过来,来详细演示如何自定义一个 agent phase 的构建 设计你的prompt结构 import os, sys, requests # from configs.model_config import * from coagent.connector.phase import BasePhase from coagent.connector.chains import BaseChain from coagent.connector.schema import Message from coagent.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS import importlib # update new agent configs auto_feedback_from_code_execution_PROMPT = &#34;&#34;&#34;#### Agent Profile You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - - - Customed Examples - /zh/muagent/custom-examples-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/custom-examples-zh/ - 如何创建你个性化的 agent phase 场景 下面通过 代码库来实现代码转API文档的自动生成, 来详细演示如何自定义一个 agent phase 的构建 设计你的prompt结构 codeGenDocGroup_PROMPT, 构建 group Agent Prompt # update new agent configs codeGenDocGroup_PROMPT = &#34;&#34;&#34;#### Agent Profile Your goal is to response according the Context Data&#39;s information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. When you need to select the appropriate role for handling a user&#39;s query, carefully read the provided role names, role descriptions and tool list. - - - Embedding 配置 - /zh/muagent/embedding-model-config-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/embedding-model-config-zh/ - 准备相关参数 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; 构建LLM Config 通过本地模型文件构建 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) 通过openai构建 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;openai&#34;, api_key=api_key, api_base_url=api_base_url, ) 自定义langchain embeddings传入 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig class CustomizedEmbeddings(Embeddings): def embed_documents(self, texts: List[str]) -&gt; List[List[float]]: embeddings = [] # add your embedding code return embeddings def embed_query(self, text: str) -&gt; List[float]: &#34;&#34;&#34;Compute query embeddings using a HuggingFace transformer model. - - - FasterTransformer4CodeFuse - /zh/docs/fastertransformer4codefuse-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/fastertransformer4codefuse-zh/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - FasterTransformer4CodeFuse - /zh/docs/overview/fastertransformer4codefuse-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/fastertransformer4codefuse-zh/ - FasterTransformer4CodeFuse FasterTransformer4CodeFuse - - - LLM 配置 - /zh/muagent/llm-model-config-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/llm-model-config-zh/ - 准备相关参数 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; 构建LLM Config 通过调用 类openai 传入 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) 自定义 langchain LLM 传入 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from langchain.llms.base import BaseLLM, LLM class CustomizedModel(LLM): repetition_penalty = 1.1 temperature = 0.2 top_k = 40 top_p = 0.9 def predict(self, prompt: str, stop: Optional[List[str]] = None) -&gt; str: return self. - - - MFTCoder - /zh/docs/mftcoder-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/mftcoder-zh/ - MFTCoder MFTCoder - - - MFTCoder 介绍 - /docs/mftcoder-introduction-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-introduction-zh/ - 项目简介 国际首个高精度、高效率、多任务、多模型支持、多训练算法,大模型代码能力微调框架; Codefuse-MFTCoder 是一个开源的多任务代码大语言模型项目,包含代码大模型的模型、数据、训练等。我们希望通过开源,分享交流大语言模型在代码领域的进步。 项目框架 项目优势 :white_check_mark: 多任务:一个模型同时支持多个任务,会保证多个任务之间的平衡,甚至可以泛化到新的没有见过的任务上去; :white_check_mark: 多模型:支持最新的多个开源模型,包括gpt-neox,llama,llama-2,baichuan,Qwen,chatglm2等; :white_check_mark: 多框架:既支持主流开源的Accelerate+DeepSpeed/FSDP,也支持新开源的ATorch 框架; :white_check_mark: 高效微调:支持LoRA和QLoRA,可以用很少的资源去微调很大的模型,且训练速度能满足几乎所有微调场景; 本项目主要内容如下: 同时支持单任务SFT(Supervised FineTuning)和MFT(Multi-task FineTuning), 当前开源支持数据均衡,未来将持续开源难易均衡, 收敛均衡等 支持QLoRA低成本高效指令微调、LoRA高效指令微调、全量参数高精度微调。 支持绝大部分主流的开源大模型,重点关注代码能力优秀的开源大模型,如DeepSeek-coder, Mistral, Mistral(MoE), Chatglm3, Qwen, GPT-Neox, Starcoder, Codegeex2, Code-LLaMA等。 支持lora与base model进行权重合并,推理更便捷。 整理并开源2个指令微调数据集:Evol-instruction-66k和CodeExercise-Python-27k。 开源多个[Codefuse系列指令微调模型权重],具体参见我们的huggingface组织和modelscope组织下的模型:codefuse-ai huggingface or codefuse-ai 魔搭。 - - - MFTCoder: Accelerate + DeepSpeed/FSDP 框架篇 - /docs/mftcoder-accelerate-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-accelerate-zh/ - [中文] [English] 1. 更新 🔥 MFTCoder-accelerate 新增支持accelerate + FSDP框架, 支持全量微调和LoRA; 🔥 MFTCoder-accelerate 支持最新更多主流开源模型: mistral, mixtral-8x7b(Mixture of Experts), deepseek, chatglm3; 🔥 MFTCoder-accelerate 新增self-paced Loss, 用于收敛均衡; 🔥 MFTCoder-accelerate 支持使用accelerate + DeepSpeed框架下支持 全量参数/QLoRA/LoRA微调; 🔥 MFTCoder-accelerate 在训练中支持了多任务微调MFT, 可以同时平衡多个任务的训练,训练的模型支持多任务推理; 🔥 MFTCoder-accelerate 在训练中支持多种模型基座: codellama, llama2, llama, starcoder, codegeex2, chatglm2, qwen等 2. 数据格式 2.1 训练数据格式 训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 可以参考项目中的xxx.jsonl文件。 { &#34;id&#34;:0, &#34;data_name&#34;:&#34;code-helper&#34;, &#34;chat_rounds&#34;:[ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;你是一个智能代码助手,可以回复用户与代码相关的问题&#34; }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;写一个快速排序&#34; }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;以下是一个快速排序算法xxxxxx&#34; }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;解释一下这段代码&#34; }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;好的,这段代码xxx&#34; } ] } 2. - - - MFTCoder: 高效准确的多任务大模型微调框架 - /zh/docs/overview/mftcoder-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/mftcoder-zh/ - 🤗 HuggingFace • 🤖 魔搭 [中文] [English] 目录 新闻 文章 项目简介 环境 训练 模型 数据集 新闻 🔥🔥🔥 [2024/01/17] MFTCoder-v0.3.0发布。新增对Mixtral(MoE), DeepSeek等模型的支持;新增支持FSDP(Fully Sharded Data Parallel);新增Self-paced Loss, 支持多任务收敛均衡。 感兴趣详见微信公众号CodeFuse的文章MFTCoder 重磅升级v0.3.0发布 🔥🔥🔥 [2024/01/17] 开源了CodeFuse-DeepSeek-33B模型,在HumanEval pass@1(greedy decoding)上可以达到78.7%。该模型在Big Code榜单的结果近期发布,请关注公众号获取最新信息。 🔥🔥🔥 [2024/01/17] 开源了CodeFuse-Mixtral-8x7B模型,在HumanEval pass@1(greedy decoding)上可以达到56.1%。感兴趣详见微信公众号CodeFuse的文章MFTCoder提升Mixtral-8x7B混合专家模型的代码能力实践 🔥🔥 [2023/11/07] MFTCoder论文在Arxiv公布,介绍了多任务微调的技术细节。 🔥🔥 [2023/10/20] 开源了CodeFuse-QWen-14B模型,在HumanEval pass@1(greedy decoding)上可以达到48.8%。相比较与基座模型Qwen-14b提升16%。感兴趣详见微信公众号CodeFuse文章 🔥🔥 [2023/09/27] 开源了CodeFuse-StarCoder-15B模型,在HumanEval pass@1(greedy decoding)上可以达到54.9%。 🔥🔥 [2023/09/26] CodeFuse-CodeLlama-34B-4bits量化版本发布,量化后模型在HumanEval pass@1指标为73.8% (贪婪解码)。 🔥🔥 [2023/09/07]MFTCoder微调的模型CodeFuse-CodeLlama-34B在HumanEval Benchmarks的Python Pass@1 取得了74.4%(greedy decoding)的开源SOTA成绩。 🔥🔥 [2023/08/26]MFTCoder-v0.1.0 支持使用LoRA/QLoRA对Code Llama、Llama、Llama2、StarCoder、ChatGLM2、CodeGeeX2、Qwen和GPT-NeoX模型进行微调。 HumanEval表现 模型 HumanEval(Pass@1) 日期 CodeFuse-DeepSeek-33B 78. - - - MFTCoder训练: Atorch框架篇 - /docs/mftcoder-atorch-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-atorch-zh/ - [中文] [English] 1. 更新 🔥 MFTCoder在Atorch框架下支持GPTNeoX模型的微调; 🔥 MFTCoder支持全量的有监督微调; 🔥 MFTCoder支持LoRA微调; 2. 数据格式 2.1 训练数据格式 训练数据为jsonl格式,每一行的数据格式如下,其中chat_rounds字段是必需的,可以根据实际需求添加或删除其他字段。 可以参考项目中的xxx.jsonl文件。 { &#34;id&#34;:0, &#34;data_name&#34;:&#34;code-helper&#34;, &#34;chat_rounds&#34;:[ { &#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;你是一个智能代码助手,可以回复用户与代码相关的问题&#34;, &#34;chat_round_id&#34;: 0 }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;写一个快速排序&#34;, &#34;chat_round_id&#34;: 1 }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;以下是一个快速排序算法xxxxxx&#34;, &#34;chat_round_id&#34;: 1 }, { &#34;role&#34;: &#34;human&#34;, &#34;content&#34;: &#34;解释一下这段代码&#34;, &#34;chat_round_id&#34;: 2 }, { &#34;role&#34;: &#34;bot&#34;, &#34;content&#34;: &#34;好的,这段代码xxx&#34;, &#34;chat_round_id&#34;: 2 } ] } 2.2 推理数据格式 推理数据格式为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式: &#34;&#34;&#34; &lt;|role_start|&gt;system&lt;|role_end|&gt;这是System指令 &lt;|role_start|&gt;human&lt;|role_end|&gt;这是第1轮用户输入的问题 &lt;|role_start|&gt;bot&lt;|role_end|&gt;这是第1轮模型生成的内容&lt;/s&gt; &lt;|role_start|&gt;human&lt;|role_end|&gt;这是第2轮用户输入的问题 &lt;|role_start|&gt;bot&lt;|role_end|&gt;这是第2轮模型生成的内容&lt;/s&gt; . - - - MuAgent 概览 - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - 简介 为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。 经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。 MuAgent框架 在MuAgent中,我们除了定义Agent交互链路和AgentBase基础执行流以外,还额外设计了 Prompt Manager 和 Memory Manager 两个基础组件,分别用于自动化构建Prompt和chat history管理。最终构建出一个可扩展、易于使用的Multi-Agent框架,包括以下内容 Agent Base:构建了四种基本的Agent类型BaseAgent、ReactAgent、ExecutorAgent、SelectorAgent,支撑各种场景的基础活动 Communication:通过Message和Parse Message 实体完成Agent间的信息传递,并与Memory Manager交互再Memory Pool完成记忆管理 Prompt Manager:通过Role Handler、Doc/Tool Handler、Session Handler、Customized Handler,来自动化组装Customized 的Agent Prompt Memory Manager: 用于支撑 chat history 的存储管理、信息压缩、记忆检索等管理,最后通过Memory Pool在数据库、本地、向量数据库中完成存储 Component:用于构建Agent的辅助生态组件,包括Retrieval、Tool、Action、Sandbox等 Customized Model:支持私有化的LLM和Embedding的接入 Agent Base 在Agent层面,提供四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用。所有的Action都由Agent执行。 BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 =&gt; 输出 ReactAgent:提供标准React的功能,根据问题实现当前任务 ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 Agent接受到任务清单([List[task]),对这个任务清单Task进行循环执行(中间也可添加 Feedback Agent来进行任务重新优化),直到任务完成 SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. Communication 为了让Agent之间进行更好的交互,以及能够让每一个Agent接受到足够的信息完成它们特定任务,我们将Message信息体分成了多个部分,System Content、Info Content、LLM Content和LLM Parsed Content等 System Content:用于存储管理当前LLM输出的时间,Role信息等 Info Content:LLM辅助信息,比如像知识库查询信息、代码库检索信息、工具信息、Agent信息等 LLM Content:直接存储和传递LLM 产生的信息 LLM Parsed Content:对LLM进行解析转成更易操作的key-value数据结构,方便对LLM内容进行过滤 Customized Content:用于管理自定义action产生的key-value数据内容,用于后续自定义Prompt模板的组装构建 通过对以上消息格式的定义,我们便可以完成通用消息的传递和管理。具体组装见Prompt Manager模块 - - - Prompt 管理器 - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Response:在与智能体的对话中,如果用户希望智能体继续某个话题或内容,可以在此模块中输入续写的上文。例如,在运用REACT模式时,可以在此区域内详细阐述智能体先前的行为和观察结果,以便于智能体构建连贯的后续响应。 Prompt自定义配置 Prompt模块参数 field_name:唯一的字段名称标识,必须提供。 function:指定如何处理输入数据的函数,必须提供。 title:定义模块的标题。若未提供,将自动生成一个标题,该标题通过把字段名称中的下划线替换为空格并将每个单词的首字母大写来构建。 description:提供模块的简要描述,位于模块最上方(标题下方)。默认为空,可选填。 is_context:标识该字段是否属于上下文模块的一部分。默认为True,意味着除非显式指定为False,否则都被视为上下文的一部分。 omit_if_empty:设定当模块内容为空时,是否在prompt中省略该模块,即不显示相应的模板标题和内容。默认为False,意味着即使内容为空也会显示标题。如果希望内容为空时省略模块,需显式设置为True。 Prompt配置示例 Prompt配置由一系列定义prompt模块的字典组成,这些模块将根据指定的参数和功能来处理输入数据并组织成一个完整的prompt。 在配置中,每个字典代表一个模块,其中包含相关的参数如 field_name, function_name, is_context, title, description, 和 omit_if_empty,用以控制模块的行为和呈现方式。 context_placeholder 字段用于标识上下文模板的位置,允许在prompt中插入动态内容。 [ {&#34;field_name&#34;: &#39;agent_profile&#39;, &#34;function_name&#34;: &#39;handle_agent_profile&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;context_placeholder&#39;, &#34;function_name&#34;: &#39;&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;tool_information&#39;,&#34;function_name&#34;: &#39;handle_tool_data&#39;, &#34;is_context&#34;: True}, {&#34;field_name&#34;: &#39;reference_documents&#39;, &#34;function_name&#34;: &#39;handle_doc_info&#39;}, {&#34;field_name&#34;: &#39;session_records&#39;, &#34;function_name&#34;: &#39;handle_session_records&#39;}, {&#34;field_name&#34;: &#39;task_records&#39;, &#34;function_name&#34;: &#39;handle_task_records&#39;}, {&#34;field_name&#34;: &#39;output_format&#39;, &#34;function_name&#34;: &#39;handle_output_format&#39;, &#39;title&#39;: &#39;Response Output Format&#39;, &#34;is_context&#34;: False}, {&#34;field_name&#34;: &#39;response&#39;, &#34;function_name&#34;: &#39;handle_response&#39;, &#34;title&#34;=&#34;begin! - - - QuickStart - /docs/codefuse-modelcache-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-quickstart-zh/ - ModelCache易于使用,只需1步骤即可构建缓存测试Demo 快速开始 构建Cache Cache的默认接口如下所示: class Cache: # it should be called when start the cache system def __init__(self): self.has_init = False self.cache_enable_func = None self.embedding_func = None self.post_process_messages_func = None self.config = Config() 在创建ModelCache之前,请考虑以下问题: 你将如何为查询生成嵌入向量?(embedding_func) 该函数将文本嵌入到一个用于上下文相似性搜索的密集向量中。ModelCache可以支持多种嵌入上下文的方法:Huggingface、ONNX和SentenceTransformers。默认逻辑中,使用了在中文领域表现更好的huggingface中的text2vec模型。只需将你的嵌入函数初始化为:text2vec.to_embeddings data_manager = get_data_manager(CacheBase(&#34;mysql&#34;, config=mysql_config), VectorBase(&#34;milvus&#34;, dimension=data2vec.dimension, milvus_config=milvus_config)) cache.init( embedding_func=data2vec.to_embeddings, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), query_pre_embedding_func=query_multi_splicing, insert_pre_embedding_func=insert_multi_splicing, ) 你将在哪里缓存数据?(data_manager缓存存储) 缓存存储用于存储所有标量数据,例如原始问题、提示、答案和访问时间。ModelCache支持多种缓存存储选项,如SQLite、MySQL和OceanBase。未来还将添加更多的NoSQL数据库选项。 你将在哪里存储和搜索向量嵌入?(data_manager向量存储) 向量存储组件用于存储和搜索所有嵌入向量,以便在语义上找到最相似的结果。ModelCache支持使用FAISS等向量搜索库或Milvus等向量数据库。未来还将添加更多的向量数据库和云服务选项。 以下是一些示例: data_manager = get_data_manager(CacheBase(&#34;sqlite&#34;), VectorBase(&#34;faiss&#34;, dimension=data2vec.dimension)) data_manager = get_data_manager(CacheBase(&#34;oceanbase&#34;), VectorBase(&#34;milvus&#34;, dimension=data2vec.dimension)) - - - QuickStart - /docs/mftcoder-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/mftcoder-quickstart-zh/ - 环境 首先, 你需要将CUDA(&gt;=11.4, 推荐11.7)及其相关驱动安装成功,并确保其工作正常, 并且安装基本的torch(&gt;=2.0.0) 在requirements.txt下固定了几个主要的python包的版本,执行如下脚本即可: sh init_env.sh 我们强烈建议您安装flash attention(&gt;=2.1.0, 推荐2.3.6), 安装请参考 https://github.com/Dao-AILab/flash-attention 训练 如果你熟悉大模型训练的各种主流开源资源,例如 transformers, DeepSpeed, FSDP等, 为了用开源项目快速上手高性能微调,我们建议您尝试: 🚀🚀 MFTCoder-accelerate: Accelerate + DeepSpeed/FSDP Codebase for MFT(Multi-task Finetuning) 如果你想探索一些新兴的训练框架,可以尝试: 🚀 MFTCoder-atorch: Atorch Codebase for MFT(Multi-task Finetuning) 模型 使用本项目的训练代码,以及上述训练数据,我们训练并在huggingface, modelscope开源了以下模型。 模型 HuggingFace链接 魔搭 链接 基座模型 训练数据 Batch Size Seq Length 🔥🔥🔥 CodeFuse-DeepSeek-33B h-link m-link DeepSeek-coder-33B 60万 80 4096 🔥🔥🔥 CodeFuse-Mixtral-8x7B h-link m-link Mixtral-8x7B 60万 80 4096 🔥🔥🔥 CodeFuse-CodeLlama-34B h-link m-link CodeLlama-34b-Python 60万 80 4096 🔥🔥🔥 CodeFuse-CodeLlama-34B-4bits h-link m-link CodeLlama-34b-Python 4096 🔥🔥🔥 CodeFuse-StarCoder-15B h-link m-link StarCoder-15B 60万 80 4096 🔥🔥🔥 CodeFuse-QWen-14B h-link m-link Qwen-14b 110万 256 4096 🔥🔥🔥 CodeFuse-CodeGeex2-6B h-link m-link CodeGeex2-6B 110万 256 4096 数据集 目前本项目主要整理了如下指令数据集,并将其整理成统一的数据格式,这两个指令微调数据集是我们多任务训练中数十个任务中的2个,未来我们会陆续开源更多的代码任务指令微调数据集: - - - Test-Agent - /zh/docs/test-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/test-agent-zh/ - Test-Agent Test-Agent - - - Test-Agent: 您的智能测试助理 - /zh/docs/overview/test-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/overview/test-agent-zh/ - 本地Mac M1体验效果 魔搭体验效果 魔搭模型访问链接:ModelScope TestGPT-7B 什么是Test Agent?(Introduction) Test Agent 旨在构建测试领域的“智能体”,融合大模型和质量领域工程化技术,促进质量技术代系升级。我们期望和社区成员一起合作,打造创新的测试领域解决方案,构建24小时在线的测试助理服务,让测试如丝般顺滑。 本期特性(Features) 模型 本期我们开源了测试领域模型TestGPT-7B。模型以CodeLlama-7B为基座,进行了相关下游任务的微调: 多语言测试用例生成(Java/Python/Javascript) 一直以来都是学术界和工业界非常关注的领域,近年来不断有新产品或工具孵化出来,如EvoSuite、Randoop、SmartUnit等。然而传统的用例生成存在其难以解决的痛点问题,基于大模型的测试用例生成在测试用例可读性、测试场景完整度、多语言支持方面都优于传统用例生成工具。本次重点支持了多语言测试用例生成,在我们本次开源的版本中首先包含了Java、Python、Javascript的测试用例生成能力,下一版本中逐步开放Go、C++等语言。 测试用例Assert补全 对当前测试用例现状的分析与探查时,我们发现代码仓库中存在一定比例的存量测试用例中未包含Assert。没有Assert的测试用例虽然能够在回归过程中执行通过,却无法发现问题。因此我们拓展了测试用例Assert自动补全这一场景。通过该模型能力,结合一定的工程化配套,可以实现对全库测试用例的批量自动补全,智能提升项目质量水位。 工程框架 本地模型快速发布和体验工程化框架 ChatBot页面 模型快速启动 私有化部署,本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100%安全 后续我们会持续迭代模型和工程化能力: 不断加入更多令人激动的测试域应用场景,如领域知识问答、测试场景分析等 支撑面向测试场景的copilot 工程框架开放,如测试领域知识智能embedding、测试通用工具API体系、智能测试Agent等,敬请期待! 以7B为基础,逐步扩展至13B、34B模型。欢迎关注! 性能最强的7B测试领域大模型(Model) 目前在TestAgent中,我们默认使用了TestGPT-7B模型。与当前已有开源模型相比,TestGPT-7B模型在用例执行通过率(pass@1)、用例场景覆盖(平均测试场景数)上都处于业界领先水平。 TestGPT-7B模型核心能力的评测结果如下: 多语言测试用例生成 针对模型支持的三种语言:Java、Python、Javascript,Pass@1评测结果如下: Model Java pass@1 Java Average number of test scenarios Python pass@1 Python Average number of test scenarios Javascript pass@1 Javascript Average number of test scenarios TestGPT-7B 48.6% 4.37 35.67% 3.56 36% 2.76 CodeLlama-13B-Instruct 40.54% 1.08 30.57% 1.65 31.7% 3. - - - VSCode插件 - /docs/codefuse-query-toolchain-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-toolchain-zh/ - 开发插件(VSCode) 安装 从VSCode官方插件市场安装(推荐) 插件地址 使用VSIX安装包安装 下载插件 手动从 vsix 安装: 或者使用指令直接从终端安装: code --install-extension [扩展vsix文件路径] 环境准备 Sparrow CLI ,参照 3 安装、配置、运行 扩展特性 本扩展提供了以下功能模块: COREF AST Viewer Gödel Language Server Gödel Language Runner COREF AST Viewer 以下功能需要在扩展设置中设置相关项后启用。目前仅支持于Java语言 Java 文件转成树状的 COREF Node Node 与代码位置的相互定位 在Lib API Viewer 查看 Node 的API,Node 复制 Lib API Viewer:查询与复制使用 Gödel Language Server Features 以下功能均需要在设置扩展后启用。不设置相关项的情况下,语法高亮仍然可用。 错误信息提示 错误信息会随着代码的更新而自动更新。 符号信息提示和补全 包含local变量和全局符号信息的补全提示,关键字等信息会提供对应的使用样例,全局符号信息会提供更详细的内部信息,如包含的成员变量、成员方法、静态方法。 关键字补全和使用样例提示 local 变量类型信息和符号补全 . 跟随的符号信息和补全 :: 跟随的符号信息和补全 注解使用样例提示 全局符号类型信息 (内部结构,成员方法,静态方法) 跳转到定义 可以通过右键跳转定义或者ctrl/command+left click直接跳转到准确的符号定义位置。 - - - 本地私有化&大模型接口接入 - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - 中文&nbsp | &nbspEnglish&nbsp 本地私有化/大模型接口接入 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。 本地私有化模型接入 模型地址配置示例,model_config.py配置修改 # 建议:走huggingface接入,尽量使用chat模型,不要使用base,无法获取正确输出 # 注意:当llm_model_dict和VLLM_MODEL_DICT同时存在时,优先启动VLLM_MODEL_DICT中的模型配置 # llm_model_dict 配置接入示例如下 # 1、若把模型放到 ~/codefuse-chatbot/llm_models 路径下 # 若模型地址如下 model_dir: ~/codefuse-chatbot/llm_models/THUDM/chatglm-6b # 参考配置如下 llm_model_dict = { &#34;chatglm-6b&#34;: { &#34;local_model_path&#34;: &#34;THUDM/chatglm-6b&#34;, &#34;api_base_url&#34;: &#34;http://localhost:8888/v1&#34;, # &#34;name&#34;修改为fastchat服务中的&#34;api_base_url&#34; &#34;api_key&#34;: &#34;EMPTY&#34; } } VLLM_MODEL_DICT = { &#39;chatglm2-6b&#39;: &#34;THUDM/chatglm-6b&#34;, } # or 若模型地址如下 model_dir: ~/codefuse-chatbot/llm_models/chatglm-6b llm_model_dict = { &#34;chatglm-6b&#34;: { &#34;local_model_path&#34;: &#34;chatglm-6b&#34;, &#34;api_base_url&#34;: &#34;http://localhost:8888/v1&#34;, # &#34;name&#34;修改为fastchat服务中的&#34;api_base_url&#34; &#34;api_key&#34;: &#34;EMPTY&#34; } } VLLM_MODEL_DICT = { &#39;chatglm2-6b&#39;: &#34;chatglm-6b&#34;, } # 2、若不想移动相关模型到 ~/codefuse-chatbot/llm_models # 同时删除 `模型路径重置` 以下的相关代码,具体见model_config. - - - 查询语言介绍 - /docs/codefuse-query-godellanguage-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-godellanguage-zh/ - GödelScript 查询语言 目录 GödelScript 基本概念和语法 简介 基本程序构成 基础类型和编译器内建函数 函数 语句 Schema 数据库 Trait Import Query Ungrounded Error: 未赋值/未绑定错误 查询示例 Java Python JavaScript XML Go 查询调试和优化技巧 Schema 传参导致笛卡尔积过大 多层 for 导致笛卡尔积过大 不要滥用@inline 在本机使用查询脚本流程 GödelScript 基本概念和语法 简介 // script fn hello(greeting: string) -&gt; bool { return greeting = &#34;hello world!&#34; } fn main() { output(hello()) } GödelScript 即 Gödel 查询语言。GödelScript 是 CodeQuery 用于查询和数据处理的领域专用语言 (DSL)。GödelScript 使用了类 Rust 的语法,提供了严格的类型检查、方便快捷的类型推导、智能友好的错误提示信息,使用户能够快速上手。 GödelScript 编译器主要应用场景为: 面向用户编写简单或复杂查询,提供更便捷的写法,提高编写查询的效率; 提供严格类型检查与类型推导,给予更智能的代码修改提示; 提供严格的 ungrounded(未赋值/未绑定) 检测,避免触发 Soufflé Ungrounded Error; Language Server 以及 IDE Extension 支持。 基本程序构成 程序结构 GödelScript 程序可能包含: - - - 概览 - /zh/docs/zh_overview/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/zh_overview/ - HuggingFace | 魔搭社区 | 产品主页 Hello World! This is CodeFuse! CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs),涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。 我们非常有激情去构建创新的解决方案来支持全生命周期AI驱动的软件开发,如上图所示。同时,我们也诚邀志同道合的工程师和研究人员加入这个社区,共同构建和增强CodeFuse。 - - - 功能特性 - /docs/codefuse-modelcache-feature-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-feature-zh/ - 功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 模块 功能 ModelCache GPTCache 基础接口 数据查询接口 &#9745; &#9745; 数据写入接口 &#9745; &#9745; Embedding embedding模型配置 &#9745; &#9745; 大模型embedding层 &#9745; bert模型长文本处理 &#9745; Large model invocation 是否与大模型解耦 &#9745; embeddingg模型本地加载 &#9745; 数据隔离 模型数据隔离 &#9745; &#9745; 超参数隔离 数据库 MySQL &#9745; &#9745; Milvus &#9745; &#9745; OceanBase &#9745; 会话管理 单轮回话 &#9745; &#9745; system指令 &#9745; 多轮回话 &#9745; 数据管理 数据持久化 &#9745; &#9745; 一键清空缓存 &#9745; 租户管理 支持多租户(多模型) &#9745; milvus多表能力 &#9745; 其他 长短对话区分能力 &#9745; 核心功能 在ModelCache中,沿用了GPTCache的主要思想,包含了一系列核心模块:adapter、embedding、similarity和data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将embedding、similarity、data_manager等模块串联起来;embedding模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank模块用于对召回的向量进行相似度排序和评估;data_manager模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: 架构调整(轻量化集成):以类redis的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰LLM调用和安全审核等功能,适配所有大模型服务。 多种模型加载方案: 支持加载本地embedding模型,解决huggingface网络连通问题 支持加载多种预训练模型embeding层 数据隔离能力 环境隔离:可依据环境,拉取不同的数据库配置,实现环境隔离(开发、预发、生产) 多租户数据隔离:根据模型动态创建collection,进行数据隔离,用于大模型产品中多个模型/服务数据隔离问题 支持系统指令:采用拼接的方式,解决propmt范式中sys指令问题。 长短文本区分:长文本会给相似评估带来更多挑战,增加了长短文本的区分,可单独配置判断阈值。 milvus性能优化:milvus consistency_level调整为&quot;Session&quot;级别,可以得到更好的性能。 数据管理能力: 一键清空缓存的能力,用于模型升级后的数据管理。 召回hitquery,用于后续的数据分析和模型迭代参考。 异步日志回写能力,用于数据分析和统计 增加model字段和数据统计字段,用于功能拓展。 未来会持续建设的功能: - - - 贡献指南 - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - 中文&nbsp | &nbspEnglish&nbsp 非常感谢您对 Codefuse 项目感兴趣,我们非常欢迎您对 Codefuse 项目的各种建议、意见(包括批评)、评论和贡献。 您对 Codefuse 的各种建议、意见、评论可以直接通过 GitHub 的 Issues 提出。 参与 Codefuse 项目并为其作出贡献的方法有很多:代码实现、测试编写、流程工具改进、文档完善等等。任何贡献我们都会非常欢迎,并将您加入贡献者列表. 进一步,有了足够的贡献后,您还可以有机会成为 Codefuse 的 Committer。 任何问题,您都可以联系我们得到及时解答,联系方式包括微信、Gitter(GitHub提供的即时聊天工具)、邮件等等。 初次接触 初次来到 Codefuse 社区,您可以: 关注 Codefuse Github 代码库 加入 Codefuse 相关的微信群 随时提问; 通过以上方式及时了解 Codefuse 项目的开发动态并为您关注的话题发表意见。 贡献方式 这份贡献指南并不仅仅关于编写代码。我们重视并感激在各个领域的帮助。以下是一些您可以贡献的方式 文档 Issue PR 改进文档 文档是您了解 Codefuse 的最主要的方式,也是我们最需要帮助的地方! 浏览文档,可以加深您对 Codefuse 的了解,也可以帮助您理解 Codefuse 的功能和技术细节,如果您发现文档有问题,请及时联系我们; 如果您对改进文档的质量感兴趣,不论是修订一个页面的地址、更正一个链接、以及写一篇更优秀的入门文档,我们都非常欢迎! 我们的文档大多数是使用 markdown 格式编写的,您可以直接通过在 GitHub 中的 docs/ 中修改并提交文档变更。如果提交代码变更,可以参阅 Pull Request。 如果发现了一个 Bug 或问题 如果发现了一个 Bug 或问题,您可以直接通过 GitHub 的 Issues 提一个新的 Issue,我们会有人定期处理。详情见Issue模板 - - - 快速开始 - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - 快速使用 首先,填写LLM配置 import os, sys import openai # llm config os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; openai.api_key = &#34;sk-xxx&#34; # os.environ[&#34;OPENAI_PROXY&#34;] = &#34;socks5h://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 from coagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=&#34;gpt-3.5-turbo&#34;, model_device=&#34;cpu&#34;,api_key=os.environ[&#34;OPENAI_API_KEY&#34;], api_base_url=os.environ[&#34;API_BASE_URL&#34;], temperature=0.3 ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=&#34;text2vec-base-chinese&#34;, embed_model_path=&#34;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/embedding_models/text2vec-base-chinese&#34; ) 最后选择一个已有场景进行执行 from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from coagent.connector.phase import BasePhase from coagent.connector.schema import Message # 选择一个已实现得场景进行执行 # 如果需要做一个数据分析,需要将数据放到某个工作目录,同时指定工作目录(也可使用默认目录) import shutil source_file = &#39;D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv&#39; shutil.copy(source_file, JUPYTER_WORK_PATH) # 选择一个场景 phase_name = &#34;baseGroupPhase&#34; phase = BasePhase( phase_name, embed_config=embed_config, llm_config=llm_config, ) # round-1 需要通过代码解释器来完成 query_content = &#34;确认本地是否存在employee_data. - - - 快速开始 - /zh/docs/codefuse-chatbot-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-chatbot-quickstart-zh/ - 中文&nbsp | &nbspEnglish&nbsp 🚀 快速使用 如需使用私有化模型部署,请自行安装 nvidia 驱动程序,本项目已在 Python 3.9.18,CUDA 11.7 环境下,Windows、X86 架构的 macOS 系统中完成测试。 Docker安装、私有化LLM接入及相关启动问题见:快速使用明细 python 环境准备 推荐采用 conda 对 python 环境进行管理(可选) # 准备 conda 环境 conda create --name devopsgpt python=3.9 conda activate devopsgpt 安装相关依赖 cd codefuse-chatbot pip install -r requirements.txt 基础配置 # 修改服务启动的基础配置 cd configs cp model_config.py.example model_config.py cp server_config.py.example server_config.py # model_config#11~12 若需要使用openai接口,openai接口key os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; # 可自行替换自己需要的api_base_url os.environ[&#34;API_BASE_URL&#34;] = &#34;https://api.openai.com/v1&#34; # vi model_config#LLM_MODEL 你需要选择的语言模型 LLM_MODEL = &#34;gpt-3. - - - 快速开始 - /docs/codefuse-query-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-quickstart-zh/ - 安装、配置、运行 硬件和软件要求 硬件:4C8G 环境要求:java 1.8 和 python3.8 以上执行环境, 请保证 java python 可执行环境 Sparrow 安装步骤和指导 CodeFuse-Query 下载包是一个 zip 存档,其中包含工具、脚本和各种特定于 CodeFuse-Query 的文件。如果您没有 CodeFuse-Query 许可证,那么下载此存档即表示您同意 CodeFuse-Query 条款和条件。 目前仅支持 mac,linux 系统下使用 CodeFuse-Query,下载地址为:(目前仅给出示例,开源后给出正式下载地址) mac: CodeFuse-Query 2.0.0 linux: CodeFuse-Query 2.0.0 您应该始终使用 CodeFuse-Query 捆绑包,确保版本兼容性 Tips: mac系统下直接下载软件包会提示需要验证开发者 可在安全性设置中进行修改验证 点击仍然允许 详细步骤可参照:Mac 官方文档: 如何在 Mac 上安全地打开 App 或使用xattr -d com.apple.quarantine命令,删除 CodeFuse-Query 被 macOS 赋予的外部属性 xattr -d com.apple.quarantine是一个命令行指令,用于删除文件的 com.apple.quarantine 扩展属性。该扩展属性是 macOS 系统用来标记从外部来源下载的文件或应用程序的属性,以确保安全性。 xattr -d com.apple.quarantine path/to/file 配置和初始化 CodeFuse-Query 开发环境 解压缩:命令行解压或者直接点一下解压缩即可 需要具备 java8 和 python3. - - - 快速开始 - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - Quick Start 完整示例见,examples/muagent_examples 首先,准备相关配置信息 import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后,设置LLM配置和Embedding模型配置 from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.connector.phase import BasePhase from muagent.connector.schema import Message llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) 最后选择一个已有场景进行执行 # if you want to analyze a data. - - - 快速使用 - /zh/docs/codefuse-evalution-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-evalution-quickstart-zh/ - 推理环境: CodeFuse-13B: python 3.8及以上版本,pytorch 2.0及以上版本,transformers 4.24.0及以上版本,CUDA 11.4及以上; CodeFuse-CodeLlama-34B: python 3.8及以上版本,pytorch2.0及以上版本,transformers==4.32.0 ,Sentencepiece,CUDA 11.4及以上。 评测执行环境 评测生成的代码需要使用多种语言编译、运行。我们使用的各编程语言依赖及所用包的版本如下: 依赖 版本 Python 3.10.9 JDK 18.0.2.1 Node.js 16.14.0 js-md5 0.7.3 C++ 11 g++ 7.5.0 Boost 1.75.0 OpenSSL 3.0.0 go 1.18.4 cargo 1.71.1 为了省去使用者配置这些语言环境的麻烦,我们构建了一个Docker镜像,并在其中配置了所需要的环境,你可以按照下面的指令拉取使用 docker pull registry.cn-hangzhou.aliyuncs.com/codefuse/codefuseeval:latest 如果您熟悉Dockerfile,也可以从codefuseEval/docker/Dockerfile构建镜像,或者修改之以定制自己的配置: cd codefuseEval/docker docker build [OPTIONS] . 获取镜像后,使用如下命令创建容器: docker run -it --gpus all --mount type=bind,source=&lt;LOCAL PATH&gt;,target=&lt;PATH IN CONTAINER&gt; [OPTIONS] &lt;IMAGE NAME:TAG&gt; 检查推理结果指令 我们提供脚本来检查所提供代码 LLM 的结果。请使用以下脚本检查相应的推理结果。 bash codefuseEval/script/check_reference.sh codefuseEval/result/CodeFuse-CodeLlama-34B/humaneval_result_python.jsonl humaneval_python bash codefuseEval/script/check_reference. - - - 快速使用 - /zh/docs/codefuse-mft-vlm/%E5%BF%AB%E9%80%9F%E4%BD%BF%E7%94%A8/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-mft-vlm/%E5%BF%AB%E9%80%9F%E4%BD%BF%E7%94%A8/ - Contents Install Datasets Multimodal Alignment Visual Instruction Tuning Evaluation Install 请执行 sh init_env.sh Datasets 使用了以下数据集训练模型: 数据集 任务种类 样本量 synthdog-en OCR 800,000 synthdog-zh OCR 800,000 cc3m(downsampled) Image Caption 600,000 cc3m(downsampled) Image Caption 600,000 SBU Image Caption 850,000 Visual Genome VQA (Downsampled) Visual Question Answer(VQA) 500,000 Visual Genome Region descriptions (Downsampled) Reference Grouding 500,000 Visual Genome objects (Downsampled) Grounded Caption 500,000 OCR VQA (Downsampled) OCR and VQA 500,000 请到各个数据集的官网上下载这些数据。 Multimodal Alignment 请执行 sh scripts/pretrain. - - - 快速使用 - /docs/codefuse-devops-model-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-quickstart-zh/ - 依赖安装 需要先 PIP 安装一下 Github 地址下的 requirement.txt 中的包,可以参考一下代码 pip install -r requirements.txt 模型下载 模型下载相关信息如下: 🤗 Huggingface 地址 - 基座模型 对齐模型 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat 🤖 ModelScope 地址 - 基座模型 对齐模型 7B DevOps-Model-7B-Base DevOps-Model-7B-Chat 14B DevOps-Model-14B-Base DevOps-Model-14B-Chat 找到自己想要下载的 Chat 模型版本,当前提供了 7B 和 14B 的模型 模型使用 根据以下代码来和 Chat 模型进行交互 from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained(&#34;path_to_DevOps-Model-Chat&#34;, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(&#34;path_to_DevOps-Model-Chat&#34;, device_map=&#34;auto&#34;, trust_remote_code=True, bf16=True).eval() # 指定 generation_config model. - - - 快速使用 - /docs/test-agent-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/test-agent-quickstart-zh/ - 快速使用(QuickStart) 前置准备 模型下载 您可在modelscope或huggingface上获取到模型的详细信息并下载模型文件。 需要注意的是: 1)如果您通过modelscope下载模型,下载方式可参考:下载说明; 2)如果您通过huggingface下载模型,请确保您可以正常访问huggingface。 环境安装 python&gt;=3.8 transformers==4.33.2 git clone https://github.com/codefuse-ai/Test-Agent cd Test-Agent pip install -r requirements.txt 在开始运行TestGPT-7B模型之前,请确保你的执行环境拥有大约14GB的显存。 启动服务 项目提供了网页端快速搭建UI的能力能够更直观的展示模型交互和效果,我们可以使用简单的几个命令把前端页面唤醒并实时调用模型能力。在项目目录下,依次启动以下服务: 1.启动controller python3 -m chat.server.controller 2.启动模型worker python3 -m chat.server.model_worker &ndash;model-path models/TestGPT-7B &ndash;device mps (models/TestGPT-7B 为实际模型文件路径) 对于启动方式,可以按需选择以下几种配置选项: &ndash;device mps 用于在Mac电脑上开启GPU加速的选项(Apple Silicon或AMD GPUs); &ndash;device xpu 用于在Intel XPU上开启加速的选项(Intel Data Center and Arc A-Series GPUs); 需安装Intel Extension for PyTorch 设置OneAPI环境变量:source /opt/intel/oneapi/setvars.sh &ndash;device npu 用于在华为AI处理器上开启加速的选项; 需安装Ascend PyTorch Adapter 设置CANN环境变量:source /usr/local/Ascend/ascend-toolkit/set_env.sh &ndash;device cpu 单独使用CPU运行的选项,不需要GPU; &ndash;num-gpus 2 指定并发gpu运行的选项。 启动web服务 python3 -m chat. - - - 评测 - /zh/docs/codefuse-devops-eval-quickstart-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/codefuse-devops-eval-quickstart-zh/ - 🚀 如何进行测试 如果需要在自己的 HuggingFace 格式的模型上进行测试的话,总的步骤分为如下几步: 编写 Model 的 loader 函数 编写 Model 的 context_builder 函数 注册模型到配置文件中 执行测试脚本 如果模型在加载进来后不需要特殊的处理,而且输入也不需要转换为特定的格式(e.g. chatml 格式或者其他的 human-bot 格式),请直接跳转到第四步直接发起测试。 1. 编写 loader 函数 模型加载时还需要做一些额外的处理(e.g. tokenizer 调整),需要继承 ModelAndTokenizerLoader 类来覆写对应的 load_model 和 load_tokenizer 函数, 如下所示: class QwenModelAndTokenizerLoader(ModelAndTokenizerLoader): def __init__(self): super().__init__() pass @override def load_model(self, model_path: str): # Implementation of the method pass @override def load_tokenizer(self, model_path: str): # Implementation of the method pass 2. 编写 Model 的 context_builder 函数 如果输入需要转换为特定的格式(e. - - - 启动明细 - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - 中文&nbsp | &nbspEnglish&nbsp 如需使用私有化模型部署,请自行安装 nvidia 驱动程序。。 python 环境准备 推荐采用 conda 对 python 环境进行管理(可选) # 准备 conda 环境 conda create --name devopsgpt python=3.9 conda activate devopsgpt 安装相关依赖 cd codefuse-chatbot # python=3.9,notebook用最新即可,python=3.8用notebook=6.5.6 pip install -r requirements.txt 沙盒环境准备 windows Docker 安装: Docker Desktop for Windows 支持 64 位版本的 Windows 10 Pro,且必须开启 Hyper-V(若版本为 v1903 及以上则无需开启 Hyper-V),或者 64 位版本的 Windows 10 Home v1903 及以上版本。 【全面详细】Windows10 Docker安装详细教程 Docker 从入门到实践 Docker Desktop requires the Server service to be enabled 处理 安装wsl或者等报错提示 Linux Docker 安装: Linux 安装相对比较简单,请自行 baidu/google 相关安装 - - - 如何提交Issue - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - 中文&nbsp | &nbspEnglish&nbsp Issue Type Issue分为三种类型 Bug: 代码或者执行示例存在bug或缺少依赖导致无法正确执行 Documentation:文档表述存在争议、文档内容与代码不一致等 Feature:在当前代码基础继续演进的新功能 Issue Template Issue: Bug Template 提交Issue前的确认清单 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 我搜索了Codefuse相关的所有文档。 我使用GitHub搜索寻找了一个类似的问题,但没有找到。 我为这个问题添加了一个非常描述性的标题。 系统信息 确认系统,如 mac -xx 、windwos-xx、linux-xx 代码版本 确认代码版本或者分支,master、release等 问题描述 描述您碰到的问题,想要实现的事情、或代码执行Bug 代码示例 附上你的执行代码和相关配置,以便能够快速介入进行复现 报错信息、日志 执行上述代码示例后的报错日志和相关信息 相关依赖的模块 以chatbot项目为例 connector codechat sandbox &hellip; Issue: Documentation Template Issue with current documentation: 请帮忙指出当前文档中的问题、错别字或者令人困惑的地方 Idea or request for content 您觉得合理的文档表述方式应该是什么样的 Issue: Feature Template 提交Issue前的确认清单 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 我搜索了Codefuse相关的所有文档。 我使用GitHub Issue搜索寻找了一个类似的问题,但没有找到。 我为这个问题添加了一个非常描述性的标题。 功能描述 描述这个功能作何用途 - - - 如何提交PR - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - 中文&nbsp | &nbspEnglish&nbsp Contribution Pre-Checklist 要先确认是否查看 document、issue、discussion(github 功能) 等公开的文档信息 找到你想处理的GitHub问题。如果不存在,创建一个问题或草案PR,并请求维护者进行检查。 检查相关的、相似的或重复的拉取请求。 创建一个草案拉取请求。 完成PR模板中的描述。 链接任何被你的PR解决的GitHub问题。 Description PR的描述信息,用简洁的语言表达PR完成的事情,具体规范见Commit 格式规范 Related Issue #xx if has Test Code with Result 请提供相关的测试代码如果有必要的话 Commit 格式规范 Commit 分为“标题”和“内容”。原则上标题全部小写。内容首字母大写。 标题 commit message的标题:[&lt;type&gt;](&lt;scope&gt;) &lt;subject&gt; (#pr) type 可选值 本次提交的类型,限定在以下类型(全小写) fix:bug修复 feature:新增功能 feature-wip:开发中的功能,比如某功能的部分代码。 improvement:原有功能的优化和改进 style:代码风格调整 typo:代码或文档勘误 refactor:代码重构(不涉及功能变动) performance/optimize:性能优化 test:单元测试的添加或修复 deps:第三方依赖库的修改 community:社区相关的修改,如修改 Github Issue 模板等。 几点说明: 如在一次提交中出现多种类型,需增加多个类型。 如代码重构带来了性能提升,可以同时添加 [refactor][optimize] 不得出现如上所列类型之外的其他类型。如有必要,需要将新增类型添加到这个文档中。 scope 可选值 本次提交涉及的模块范围。因为功能模块繁多,在此仅罗列部分,后续根据需求不断完善。 以 chatbot的框架为例 connector codechat sandbox &hellip; 几点说明: 尽量使用列表中已存在的选项。如需添加,请及时更新本文档。 - - - 数据 - /zh/docs/%E6%95%B0%E6%8D%AE%E4%BB%8B%E7%BB%8D/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/docs/%E6%95%B0%E6%8D%AE%E4%BB%8B%E7%BB%8D/ - ⏬ 数据 下载 方法一:下载zip压缩文件(你也可以直接用浏览器打开下面的链接): wget https://huggingface.co/datasets/codefuse-admin/devopseval-exam/resolve/main/devopseval-exam.zip 然后可以使用 pandas加载数据: import os import pandas as pd File_Dir=&#34;devopseval-exam&#34; test_df=pd.read_csv(os.path.join(File_Dir,&#34;test&#34;,&#34;UnitTesting.csv&#34;)) 方法二:使用Hugging Face datasets直接加载数据集。示例如下: from datasets import load_dataset dataset=load_dataset(r&#34;DevOps-Eval/devopseval-exam&#34;,name=&#34;UnitTesting&#34;) print(dataset[&#39;val&#39;][0]) # {&#34;id&#34;: 1, &#34;question&#34;: &#34;单元测试应该覆盖以下哪些方面?&#34;, &#34;A&#34;: &#34;正常路径&#34;, &#34;B&#34;: &#34;异常路径&#34;, &#34;C&#34;: &#34;边界值条件&#34;,&#34;D&#34;: 所有以上,&#34;answer&#34;: &#34;D&#34;, &#34;explanation&#34;: &#34;&#34;} ``` 方法三:使用modelscope下载相关所有数据。示例如下: from modelscope.msdatasets import MsDataset MsDataset.clone_meta(dataset_work_dir=&#39;./xxx&#39;, dataset_id=&#39;codefuse-ai/devopseval-exam&#39;)``` 👀 说明 为了方便使用,我们已经整理出了 55 个细分类别以及它们的中英文名称。具体细节请查看 category_mapping.json 。格式如下: { &#34;UnitTesting.csv&#34;: [ &#34;unit testing&#34;, &#34;单元测试&#34;, {&#34;dev&#34;: 5, &#34;test&#34;: 32} &#34;TEST&#34; ], ... &#34;file_name&#34;:[ &#34;英文名称&#34;, &#34;中文名称&#34;, &#34;样本数量&#34;, &#34;类别(PLAN,CODE,BUILD,TEST,RELEASE,DEPOLY,OPERATE,MONITOR八选一)&#34; ] } 每个细分类别由两个部分组成:dev 和 test。每个细分类别的 dev 集包含五个示范实例以及为 few-shot 评估提供的解释。而 test 集则用于模型评估,并且test数据已包含准确标签。 - - - 训练解析 - /docs/codefuse-devops-model-train-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-devops-model-train-zh/ - 训练流程 根据查阅文献可知,大部分领域模型都是在对话模型的基础上,通过SFT微调来进行知识注入。而SFT微调所需要QA预料基本都来自于ChatGPT生成。然而,该方案可能存在QA语料无法完全覆盖领域知识的情况。 因此,DevOps-Model采用的是预训练加训 + SFT微调的方案,如图2.1所示。我们认为针对领域大模型,预训练的加训是必要的,因为其可以将领域内的一些知识在预训练阶段注入到大模型,如果这些知识在通用大模型预训练时没有出现过,那会让大模型学习到新的知识;如果出现过,就可以让大模型进一步加深印象。第二步则是大模型对齐,目的是让大模型可以根据问题来回答最合适的内容。 训练数据 数据收集 模型的定位是中文 DevOps 领域大模型,因此收集与中文DevOps相关的预训练数据和QA数据。 预训练数据主要来自互联网技术博客、技术文档、技术书籍等,最终收集到了 50G+ 的预训练语料数据; 针对 QA 数据,我们的目的是想让模型不但对齐到通用的问答能力,而且针对 DevOps 领域也可以学会如何更好的回答问题,因此不但收集了通用领域的单轮和多轮对话数据,还针对 DevOps 领域,通过爬取和 ChatGPT 生成的方式产出了属于 DevOps 领域的问答数据。最终我们精心筛选了约 200K 的 QA 数据进行 SFT微调训练,具体数据量如下表所示。 数据类型 数据量级 通用单轮 QA 50K 通用多轮 QA 20K DevOps 领域 QA 130K 数据筛选 由于预训练数据大部分是从互联网上收集的数据,质量会参差不齐,而大模型训练中数据是最重要的一环,我们建立了如上图所示的清洗 Pipeline,来针对收集到的数据进行质量的全面过滤。 首先,由专家经验和人工筛选,总结出来了一批文档级别的 Heuristic 过滤规则,这一步主要用来过滤掉那些质量非常差的文档; 然后,即便是一篇质量稍差的文章中,也有可能还是含有一些有价值的领域知识,我们也需要尽可能的进行收集。此处,我们对文章进行段落拆分,将文章拆分成一个个段落; 然后,我们将拆分后的段落会再次通过步骤1进行过滤,便得到了一批经过规则过滤后的段落; 然后,我们摘取了其中 1000 个段落,由经验丰富的专业开发人员来进行打标,获得高质量的打标数据; 最后,我们根据打标后的结果来训练了一个打分模型来针对段落进行质量的打分,段落的向量模型选用了预训练好的中文版本的 Sentence-Bert,打分算法选用了逻辑回归,为了避免打分模型的误差,会再通过帕累托分布来根据段落的质量打分进行采样来决定要不要过滤这个段落。 经过这个 Pipeline 后,我们最终沉淀下 15G 左右的数据来进行大模型的预训练加训。 - - - 用户案例 - /docs/codefuse-query-usercase-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-query-usercase-zh/ - 使用场景 查询代码特征 小开发同学想知道 Repo A 里面使用了哪些 String 型的变量,所以他写了一个 Gödel 如下,交给 CodeFuse-Query 系统给他返回了结果。 // script use coref::java::* fn out(var: string) -&gt; bool { for(v in Variable(JavaDB::load(&#34;coref_java_src.db&#34;))) { if (v.getType().getName() = &#34;String&#34; &amp;&amp; var = v.getName()) { return true } } } fn main() { output(out()) } 类似需求:查询:类,函数,变量,返回值,调用图,类继承等等。 代码规则检查器 小 TL 同学发现团队总是写出很多类似的 Bug A,他想针对 Bug A 制定一个代码规则和其检查器,并在 CodeReview 阶段做个卡点。小 TL 通过在 CodeFuse-Query 平台上面编写了一段分析 Query,在平台上面测试符合要求,把这段分析 Query 固化下来作为一个代码规则,并上线到了 CodeReview/CI 阶段。从此这个 Bug 再也没发生过了。 类似需求:编写静态缺陷扫描规则进行代码风险拦截。 获取统计数据 小研究发现传统的代码复杂度指标很难准确地衡量代码的复杂情况,通过学习国际先进经验加上自我灵光一闪,设计了一套复杂度指标和算法。通过 Gödel 实现出来以后,发现不怎么优化就已经性能非常高了,很快就应用到了 10 几种语言,11+万个仓库当中去了。马上就对代码仓库整体的复杂度有了深入的了解。相比较以前需要自己解析代码,分析语法树,对接系统,不知道方便了多少。 类似需求:代码统计,代码度量,算法设计,学术研究。 - - - 致谢 - /zh/contribution/%E8%87%B4%E8%B0%A2/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/contribution/%E8%87%B4%E8%B0%A2/ - CodeFuse-ai 文档主页基于docura构建! ChatBot 项目基于langchain-chatchat和codebox-api! &hellip;&hellip; 在此深深感谢他们的开源贡献! - - - 自定义 Retrieval 接入 - /zh/muagent/custom-retrieval-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/custom-retrieval-zh/ - 基本介绍 Doc Retrieval 文档向量数据库是当前最主流的知识库构建方法,使用Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。 Code Retrieval LLM在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为LLM提供知识体系外的代码。 Search Retrieval 除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了duckduckgosearch这款开源的搜索工具,能够为LLM提供知识储备以外的内容。 Rertrieval 结构 class IMRertrieval: def __init__(self,): &#39;&#39;&#39; init your personal attributes &#39;&#39;&#39; pass def run(self, ): &#39;&#39;&#39; execute interface, and can use init&#39; attributes &#39;&#39;&#39; pass class BaseDocRetrieval(IMRertrieval): def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): self.knowledge_base_name = knowledge_base_name self.search_top = search_top self.score_threshold = score_threshold self.embed_config = embed_config self.kb_root_path = kb_root_path def run(self, query: str, search_top=None, score_threshold=None, ): docs = DocRetrieval. - - - 自定义 Tool 接入 - /zh/muagent/custom-tool-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/custom-tool-zh/ - 基本介绍 在MuAgent中也支持Agent完成Tool的注册,通过Python注册模板BaseToolModel类,编写 Tool_nam Tool_descriptio ToolInputArgs ToolOutputArgs run 等相关属性和方法即可实现工具的快速接入,同时支持langchain Tool接口的直接使用。 例如像上述 XXRetrieval 的功能也可以注册为Tool,最终由LLM执行调用。 BaseTool 结构 from langchain.agents import Tool from pydantic import BaseModel, Field from typing import List, Dict import json class BaseToolModel: name = &#34;BaseToolModel&#34; description = &#34;Tool Description&#34; class ToolInputArgs(BaseModel): &#34;&#34;&#34; Input for MoveFileTool. Tips: default control Required, e.g. key1 is not Required/key2 is Required &#34;&#34;&#34; key1: str = Field(default=None, description=&#34;hello world!&#34;) key2: str = Field(..., description=&#34;hello world!!&#34;) class ToolOutputArgs(BaseModel): &#34;&#34;&#34; Input for MoveFileTool. - - - 最佳配置 - /docs/codefuse-modelcache-config-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-config-zh/ - 环境依赖 python版本: 3.8及以上 依赖包安装: pip install requirements.txt 服务启动 在启动服务前,应该进行如下环境配置: 安装关系数据库 mysql, 导入sql创建数据表,sql文件: reference_doc/create_table.sql 安装向量数据库milvus 在配置文件中添加数据库访问信息,配置文件为: modelcache/config/milvus_config.ini modelcache/config/mysql_config.ini 离线模型bin文件下载, 参考地址:https://huggingface.co/shibing624/text2vec-base-chinese/tree/main,并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 通过flask4modelcache.py脚本启动后端服务。 - - - 最佳配置 - /docs/codefuse-modelcache-release-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /docs/codefuse-modelcache-release-zh/ - 时间 功能 版本号 20230430 完成GPTCache调研,开源流程在OpenAI接口上跑通,单节点形式 无 20230509 1、完成技术选型及上下游交互方案 2、重新开发数据库模块,替换SQLalchemy框架 3、重构llm_handler模块,兼容codegpt,适配codegpt模型参数 V0.1.0 20230519 1、根据环境动态选择codegpt服务模式 2、模型本地加载能力,以及预加载能力 3、增加本地路径依据环境动态加载能力 V0.1.1 20230522 1、架构优化,调整为类redis结构,解藕大模型调用 2、关系数据库由sqlite切换至OceanBase 3、向量数据库由faiss切换至milvus 4、模型数据隔离能力 5、增加核心模块adapter_query、adapter_insert V0.2.0 20230531 1、线上环境上线,动态感知能力 2、embedding模型评测及选型 3、增加预发环境及数据隔离能力 4、增加原始query字段透出能力 V0.2.1 20230607 1、优化关系数据库访问性能 2、优化环境和模型隔离能力 V0.2.2 20230630 1、在modelCache中增加大模型embedding层适配模块 2、增加采纳率统计能力 V0.2.3 20230730 1、增加缓存统计功能 2、增加数据删除功能接口 3、缓存一键清空能力上线 4、多轮会话能力研发,支持system指令和多轮对话 v0.3.0 20230830 1、增加异步处理能力,性能提升超20% 2、架构变更,解藕embedding推理和业务处理逻辑 3、黑名单过滤功能 V0.3.1 - - - diff --git "a/docs/zh/muagent/agent-\347\274\226\346\216\222/index.html" "b/docs/zh/muagent/agent-\347\274\226\346\216\222/index.html" deleted file mode 100644 index be46b02..0000000 --- "a/docs/zh/muagent/agent-\347\274\226\346\216\222/index.html" +++ /dev/null @@ -1,413 +0,0 @@ - - - - - - - - -Agent 编排 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Agent 编排

    -
    -
    - - -

    核心Connector介绍

    -

    为了便于大家理解整个 muagent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建

    -
    - 图片 -
    -


    下面,我们先介绍相关的核心组件

    -

    Agent

    -

    在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用

    -
      -
    1. -

      BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出

      -
    2. -
    3. -

      ReactAgent:提供标准React的功能,根据问题实现当前任务

      -
    4. -
    5. -

      ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务

      -
    6. -
    7. -

      SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答.

      -
    8. -
    -

    输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理

    -

    Chain

    -

    基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理

    -

    Phase

    -

    基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理

    -

    Prompt Manager

    -

    Mutli-Agent链路中每一个agent的prompt创建

    -
      -
    • 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置
    • -
    • 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt
    • -
    -

    Memory Manager

    -

    主要用于 chat history 的管理

    -
      -
    • 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval
    • -
    • 对 chat history 进行关键信息总结 summary context,作为 prompt context
    • -
    • 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/connector-agent-zh/index.html b/docs/zh/muagent/connector-agent-zh/index.html deleted file mode 100644 index 199f3cd..0000000 --- a/docs/zh/muagent/connector-agent-zh/index.html +++ /dev/null @@ -1,603 +0,0 @@ - - - - - - - - -Connector Agent · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Agent

    -
    -
    - - -

    快速构建一个Agent

    -

    首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    然后设置LLM配置和向量模型配置

    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Agent 配置

    -
      -
    • 定义两个react agent,进行实际任务执行
    • -
    -
    # 这里采用了预定义的prompt,也可以参考上述prompt完成编写
    -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT
    -# 定义了基于react的tool agent
    -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT)
    -tool_react_agent = ReactAgent(
    -    role=tool_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -# 定义了基于react的code agent
    -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT)
    -code_react_agent = ReactAgent(
    -    role=code_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
      -
    • 定义groupAgent,用于agent选择
    • -
    -
    prompt = """#### Agent Profile
    -
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -#### Response Output Format
    -
    -**Thoughts:** think the reason step by step about why you selecte one role
    -
    -**Role:** Select the role from agent names.
    -"""
    -
    -# 定义了一个groupAgent
    -role = Role(role_type="assistant", role_name="qaer", prompt=prompt)
    -base_agent = SelectorAgent(
    -    role=role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[tool_react_agent, code_react_agent]
    -)
    -

    开始实际问答

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    user_name="test", role_type="user", role_name="user", input_query=question,
    -    tools=tools,
    -)
    -# base_agent.pre_print(query)
    -output_message = base_agent.step(query)
    -print(output_message.input_query)
    -print(output_message.role_content)
    -

    Agent 参数配置

    -
    # 配置结构在这个目录
    -from muagent.connector.schema import Role
    -

    Agent Config

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    roleRole角色描述
    focus_agentsList[String]metagpt的逻辑,关注哪些agent生成的message,可选值范围为:role_name
    focus_message_keysList[String]额外增加的逻辑,关注message里面具体的 key 信息可选值范围为:agent 的 output_keys
    chat_turnint只针对ReactAgent有效
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    -

    Role

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    role_typestr角色类型, Enum: system、user、assistant、function、observation、summary
    role_namestr角色名称
    role_descstr角色描述
    agent_typestr代理类型
    role_promptstr角色instruction
    promptstr完整prompt结构
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/connector-chain-zh/index.html b/docs/zh/muagent/connector-chain-zh/index.html deleted file mode 100644 index 61b8ba7..0000000 --- a/docs/zh/muagent/connector-chain-zh/index.html +++ /dev/null @@ -1,559 +0,0 @@ - - - - - - - - -Connector Chain · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Chain

    -
    -
    - - -

    快速构建一个Agent Chain

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    然后设置LLM配置和向量模型配置

    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Agent 配置

    -
      -
    • 定义两个react agent,进行实际任务执行
    • -
    -
    # 这里采用了预定义的prompt,也可以参考上述prompt完成编写
    -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT
    -# 定义了基于react的tool agent
    -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT)
    -tool_react_agent = ReactAgent(
    -    role=tool_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -# 定义了基于react的code agent
    -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT)
    -code_react_agent = ReactAgent(
    -    role=code_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
      -
    • 定义groupAgent,用于agent选择
    • -
    -
    prompt = """#### Agent Profile
    -
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -#### Response Output Format
    -
    -**Thoughts:** think the reason step by step about why you selecte one role
    -
    -**Role:** Select the role from agent names.
    -"""
    -
    -# 定义了一个groupAgent
    -role = Role(role_type="assistant", role_name="qaer", prompt=prompt)
    -base_agent = SelectorAgent(
    -    role=role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[tool_react_agent, code_react_agent]
    -)
    -

    Chain 配置

    -
    chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1)
    -base_chain = BaseChain(
    -    chainConfig=chain_config, agents=[base_agent], 
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -

    开始实际问答

    -
      -
    • 开始执行
    • -
    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    user_name="test", role_type="user", role_name="user", input_query=question,
    -    tools=tools,
    -)
    -
    -# base_chain.pre_print(query)
    -output_message, output_memory = base_chain.step(query)
    -print(output_message.input_query)
    -print(output_message.role_content)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Chain 参数配置

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    agentsList[BaseAgent]
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/connector-memory-zh/index.html b/docs/zh/muagent/connector-memory-zh/index.html deleted file mode 100644 index 5fb2cba..0000000 --- a/docs/zh/muagent/connector-memory-zh/index.html +++ /dev/null @@ -1,513 +0,0 @@ - - - - - - - - -Connector Memory · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Memory

    -
    -
    - - -

    Memory Manager

    -
      -
    • 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval
    • -
    • 对 chat history 进行关键信息总结 summary context,作为 prompt context
    • -
    • 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答
    • -
    -

    使用示例

    -

    完整示例见 ~/tests/connector/memory_manager_test.py

    -

    创建 memory manager 实例

    -
    import os
    -import openai
    -
    -from muagent.base_configs.env_config import KB_ROOT_PATH
    -from muagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.connector.schema import Message
    -
    -#
    -OPENAI_API_BASE = "https://api.openai.com/v1"
    -os.environ["API_BASE_URL"] = OPENAI_API_BASE
    -os.environ["OPENAI_API_KEY"] = "sk-xxx"
    -openai.api_key = "sk-xxx"
    -os.environ["model_name"] = "gpt-3.5-turbo"
    -
    -# 
    -os.environ["embed_model"] = "{{embed_model_name}}"
    -os.environ["embed_model_path"] = "{{embed_model_path}}"
    -
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
    -
    -# LLM 和 Embedding Model 配置
    -llm_config = LLMConfig(
    -    model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"], 
    -    api_base_url=os.environ["API_BASE_URL"], temperature=0.3
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=os.environ["embed_model"], 
    -    embed_model_path=os.environ["embed_model_path"]
    -)
    -# 
    -phase_name = "test"
    -memory_manager = LocalMemoryManager(
    -            unique_name=phase_name, 
    -            do_init=True, 
    -            kb_root_path = KB_ROOT_PATH, 
    -            embed_config=embed_config, 
    -            llm_config=llm_config
    -        )
    -

    支持Message管理

    -
    message1 = Message(
    -    role_name="test1", role_type="user", role_content="hello",
    -    parsed_output_list=[{"input": "hello"}], user_name="default"
    -)
    -
    -text = "hi! how can I help you?"
    -message2 = Message(
    -    role_name="test2", role_type="assistant", role_content=text, parsed_output_list=[{"answer": text}],
    -    user_name="shuimo"
    -)
    -
    -text = "they say hello and hi to each other"
    -message3 = Message(
    -    role_name="test3", role_type="summary", role_content=text, 
    -    parsed_output_list=[{"summary": text}],
    -    user_name="shanshi"
    -    )
    -
    -local_memory_manager.append(message=message1)
    -local_memory_manager.append(message=message2)
    -local_memory_manager.append(message=message3)
    -

    重新加载

    -
    local_memory_manager = LocalMemoryManager(user_name="shanshi", embed_config=embed_config, llm_config=llm_config, do_init=False)
    -local_memory_manager.load()
    -print(local_memory_manager.get_memory_pool("default").messages)
    -print(local_memory_manager.get_memory_pool("shanshi").messages)
    -print(local_memory_manager.get_memory_pool("shuimo").messages)
    -

    支持 memory 检索

    -
    # embedding retrieval test
    -text = "say hi to each other, i want some help"
    -# retrieval_type=datetime => retrieval from datetime and jieba
    -print(local_memory_manager.router_retrieval(user_name="shanshi", text=text, datetime="2024-03-12 17:48:00", n=4, top_k=5, retrieval_type= "datetime"))
    -# retrieval_type=eembedding => retrieval from embedding
    -print(local_memory_manager.router_retrieval(user_name="shanshi", text=text, top_k=5, retrieval_type= "embedding"))
    -# retrieval_type=text => retrieval from jieba
    -print(local_memory_manager.router_retrieval(user_name="shanshi", text=text, top_k=5, retrieval_type= "text"))
    -

    支持 memory 总结

    -
    # recursive_summary test
    -print(local_memory_manager.recursive_summary(local_memory_manager.get_memory_pool("shanshi").messages, split_n=1))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/connector-phase-zh/index.html b/docs/zh/muagent/connector-phase-zh/index.html deleted file mode 100644 index 8b17d57..0000000 --- a/docs/zh/muagent/connector-phase-zh/index.html +++ /dev/null @@ -1,578 +0,0 @@ - - - - - - - - -Connector Phase · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Phase

    -
    -
    - - -

    快速构建一个Agent Phase

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    然后设置LLM配置和向量模型配置

    -
      -
    • 配置相关 LLM 和 Embedding Model
    • -
    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    Agent 配置

    -
      -
    • 定义两个react agent,进行实际任务执行
    • -
    -
    # 这里采用了预定义的prompt,也可以参考上述prompt完成编写
    -from muagent.connector.configs.prompts import REACT_CODE_PROMPT, REACT_TOOL_PROMPT
    -# 定义了基于react的tool agent
    -tool_role = Role(role_type="assistant", role_name="tool_reacter", prompt=REACT_TOOL_PROMPT)
    -tool_react_agent = ReactAgent(
    -    role=tool_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -# 定义了基于react的code agent
    -code_role = Role(role_type="assistant", role_name="code_reacter", prompt=REACT_CODE_PROMPT)
    -code_react_agent = ReactAgent(
    -    role=code_role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
      -
    • 定义groupAgent,用于agent选择
    • -
    -
    prompt = """#### Agent Profile
    -
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -#### Response Output Format
    -
    -**Thoughts:** think the reason step by step about why you selecte one role
    -
    -**Role:** Select the role from agent names.
    -"""
    -
    -# 定义了一个groupAgent
    -role = Role(role_type="assistant", role_name="qaer", prompt=prompt)
    -base_agent = SelectorAgent(
    -    role=role,
    -    task="",
    -    chat_turn=3,
    -    focus_agents=[],
    -    focus_message_keys=[],
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[tool_react_agent, code_react_agent]
    -)
    -

    Chain 配置

    -
    chain_config = ChainConfig(chain_name="group_chain", agents=[base_agent.role.role_name], chat_turn=1)
    -base_chain = BaseChain(
    -    chainConfig=chain_config, agents=[base_agent], 
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -

    Phase 配置

    -
    base_phase = BasePhase(
    -    phase_name="group_phase", chains=[base_chain],
    -    embed_config=embed_config, llm_config=llm_config
    -)
    -

    开始实际问答

    -
      -
    • 开始执行
    • -
    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -question = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    user_name="test", role_type="user", role_name="user", input_query=question,
    -    tools=tools,
    -)
    -
    -# base_phase.pre_print(query)
    -output_message, output_memory = base_phase.step(query)
    -print(output_message.input_query)
    -print(output_message.role_content)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    Phase 参数配置

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Config Key NameTypeDescription
    phase_nameString场景名称
    chainsList[Chain]chain列表,按顺序执行
    llm_configLLMConfig大语言模型配置
    embed_configEmbedConfig向量模型配置
    sandbox_serverDict沙盒环境即notebook启动配置
    jupyter_work_pathstr沙盒环境的工作目录
    kb_root_pathstrmemory的存储路径
    log_verbosestragent prompt&predict的日志打印级别
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/connector-prompt-zh/index.html b/docs/zh/muagent/connector-prompt-zh/index.html deleted file mode 100644 index 21d33fc..0000000 --- a/docs/zh/muagent/connector-prompt-zh/index.html +++ /dev/null @@ -1,625 +0,0 @@ - - - - - - - - -Connector Prompt · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Connector Prompt

    -
    -
    - - -

    提示管理器(Prompt Manager)

    -

    管理多智能体链路中的prompt创建

    -
      -
    • 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。
    • -
    • 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。
    • -
    -

    Prompt预设模板结构

    -
      -
    • Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。
    • -
    • Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 -
        -
      • Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。
      • -
      • Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。
      • -
      • Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。
      • -
      -
    • -
    • Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。
    • -
    -

    Prompt 的标准结构

    -

    在整个Prompt的整个结构中,我们需要去定义三个部分

    -
      -
    • Agent Profil
    • -
    • Input Format
    • -
    • Response Output Format
    • -
    -
    #### Agent Profile
    -
    -Agent Description ...
    -
    -#### Input Format
    -
    -**Origin Query:** the initial question or objective that the user wanted to achieve
    -
    -**Context:** the current status and history of the tasks to determine if Origin Query has been achieved.
    -
    -#### Response Output Format
    -**Action Status:** finished or continued
    -If it's 'finished', the context can answer the origin query.
    -If it's 'continued', the context cant answer the origin query.
    -
    -**REASON:** Justify the decision of choosing 'finished' and 'continued' by evaluating the progress step by step.
    -Consider all relevant information. If the tasks were aimed at an ongoing process, assess whether it has reached a satisfactory conclusion.
    -

    其中,我们整合了部分 Input Format 的通用操作,内置了一部分字段和操作流程,形成通用的配置化操作。

    -

    未来我们会也会进一步将 Agent Profile和Response Output Format的部分,实现可配置化操作,降低Prompt编写难度

    -

    自定义 Agent

    -
      -
    • 有自定义字段需求,根据实际需求完成构造
    • -
    -
    class CodeGenDocer(BaseAgent):
    -
    -    def start_action_step(self, message: Message) -> Message:
    -        '''do action before agent predict '''
    -        # 根据问题获取代码片段和节点信息
    -        action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, 
    -                                              embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag")
    -        current_vertex = action_json['vertex']
    -        message.customed_kargs["Code Snippet"] = action_json["code"]
    -        message.customed_kargs['Current_Vertex'] = current_vertex
    -        return message
    -    
    -

    pre_print 功能

    -

    在我们构建phase、chain或者agent之后,可以通过函数的预打印功能,实现agents链路确认,避免在执行后才发现问题,可提前进行debug

    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Role, Message, ChainConfig
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -
    -
    -import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -
    -llm_config = LLMConfig(
    -    model_name="gpt-4", api_key=api_key,  api_base_url=api_base_url, temperature=0.3
    -)
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -
    -phase.pre_print(query)
    -

    这里采用预定义好的链路,自定义case可见customed_example -

    -
    >>> 完整信息确认 muagent.connector.configs中进行确认
    -
    -##########################
    -<<<<baseGroup's prompt>>>>
    -##########################
    -
    -### Agent Profile
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -ATTENTION: response carefully referenced "Response Output Format" in format.
    -
    -### Tool Information
    -
    -### Agent Infomation
    -        Please ensure your selection is one of the listed roles. Available roles for selection:
    -        "role name: tool_react
    -role description:  Agent Profile,When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.,Please note that all the tools you can use are listed below. You can only choose from these tools for use. ,If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.,ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.,"
    -"role name: code_react
    -role description:  Agent Profile,When users need help with coding, your role is to provide precise and effective guidance.,Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.,"
    -        Please ensure select the Role from agent names, such as tool_react, code_react
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Current Plan
    -
    -### Response Output Format
    -**Thoughts:** think the reason step by step about why you selecte one role
    -**Role:** Select the role from agent names.
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Role:**
    -
    -
    -###########################
    -<<<<tool_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When interacting with users, your role is to respond in a helpful and accurate manner using the tools available. Follow the steps below to ensure efficient and effective use of the tools.
    -Please note that all the tools you can use are listed below. You can only choose from these tools for use.
    -If there are no suitable tools, please do not invent any tools. Just let the user know that you do not have suitable tools to use.
    -ATTENTION: The Action Status field ensures that the tools or code mentioned in the Action can be parsed smoothly. Please make sure not to omit the Action Status field when replying.
    -
    -### Tool Information
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -#### Task Records
    -
    -### Response Output Format
    -**Thoughts:** According the previous observations, plan the approach for using the tool effectively.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -###########################
    -<<<<code_react's prompt>>>>
    -###########################
    -### Agent Profile
    -When users need help with coding, your role is to provide precise and effective guidance.
    -Write the code step by step, showing only the part necessary to solve the current problem. Each reply should contain only the code required for the current step.
    -
    -### Context Data
    -
    -#### Reference Documents
    -
    -#### Session Records
    -
    -### Response Output Format
    -
    -**Thoughts:** According the previous context, solve the problem step by step, only displaying the thought process necessary for the current step of solving the problem,
    -outline the plan for executing this step.
    -
    -**Action Status:** Set to 'stopped' or 'code_executing'.
    -If it's 'stopped', the action is to provide the final answer to the session records and executed steps.
    -If it's 'code_executing', the action is to write the code.
    -...
    -
    -### Begin!!!
    -
    -###################
    -<<<<LLM PREDICT>>>>
    -###################
    -
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -**Observation:**
    -**Thoughts:**
    -**Action Status:**
    -**Action:**
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/custom-examples-zh/index.html b/docs/zh/muagent/custom-examples-zh/index.html deleted file mode 100644 index 899f1da..0000000 --- a/docs/zh/muagent/custom-examples-zh/index.html +++ /dev/null @@ -1,698 +0,0 @@ - - - - - - - - -Customed Examples · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Customed Examples

    -
    -
    - - -

    如何创建你个性化的 agent phase 场景

    -

    下面通过 代码库来实现代码转API文档的自动生成, 来详细演示如何自定义一个 agent phase 的构建

    -

    设计你的prompt结构

    -
      -
    • codeGenDocGroup_PROMPT, 构建 group Agent Prompt
    • -
    -
    # update new agent configs
    -codeGenDocGroup_PROMPT = """#### Agent Profile
    -
    -Your goal is to response according the Context Data's information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided.
    -
    -When you need to select the appropriate role for handling a user's query, carefully read the provided role names, role descriptions and tool list.
    -
    -#### Input Format
    -
    -#### Response Output Format
    -
    -**Code Path:** Extract the paths for the class/method/function that need to be addressed from the context
    -
    -**Role:** Select the role from agent names
    -"""
    -
      -
    • classGenDoc_PROMPT, 构建 class code to api doc Prompt
    • -
    -
    classGenDoc_PROMPT = """#### Agent Profile
    -As an advanced code documentation generator, you are proficient in translating class definitions into comprehensive documentation with a focus on instantiation parameters. 
    -Your specific task is to parse the given code snippet of a class, extract information regarding its instantiation parameters.
    -
    -#### Input Format
    -
    -**Current_Vertex:** Provide the code vertex of the function or method.
    -
    -**Code Snippet:** Provide the full class definition, including the constructor and any parameters it may require for instantiation.
    -
    -#### Response Output Format
    -**Class Base:** Specify the base class or interface from which the current class extends, if any.
    -
    -**Class Description:** Offer a brief description of the class's purpose and functionality.
    -
    -**Init Parameters:** List each parameter from construct. For each parameter, provide:
    -    - `param`: The parameter name
    -    - `param_description`: A concise explanation of the parameter's purpose.
    -    - `param_type`: The data type of the parameter, if explicitly defined.
    -
    -    ```json
    -    [
    -        {
    -            "param": "parameter_name",
    -            "param_description": "A brief description of what this parameter is used for.",
    -            "param_type": "The data type of the parameter"
    -        },
    -        ...
    -    ]
    -    ```
    -
    -        
    -    If no parameter for construct, return 
    -    ```json
    -    []
    -    ```
    -"""
    -
      -
    • funcGenDoc_PROMPT,构建 function code to api doc Prompt
    • -
    -
    funcGenDoc_PROMPT = """#### Agent Profile
    -You are a high-level code documentation assistant, skilled at extracting information from function/method code into detailed and well-structured documentation.
    -
    -
    -#### Input Format
    -**Code Path:** Provide the code path of the function or method you wish to document. 
    -This name will be used to identify and extract the relevant details from the code snippet provided.
    -
    -**Current_Vertex:** Provide the code vertex of the function or method.
    -
    -**Code Snippet:** A segment of code that contains the function or method to be documented.
    -
    -#### Response Output Format
    -
    -**Class Description:** Offer a brief description of the method(function)'s purpose and functionality.
    -
    -**Parameters:** Extract parameter for the specific function/method Code from Code Snippet. For parameter, provide:
    -    - `param`: The parameter name
    -    - `param_description`: A concise explanation of the parameter's purpose.
    -    - `param_type`: The data type of the parameter, if explicitly defined.
    -    ```json
    -    [
    -        {
    -            "param": "parameter_name",
    -            "param_description": "A brief description of what this parameter is used for.",
    -            "param_type": "The data type of the parameter"
    -        },
    -        ...
    -    ]
    -    ```
    -
    -    If no parameter for function/method, return 
    -    ```json
    -    []
    -    ```
    -
    -**Return Value Description:** Describe what the function/method returns upon completion.
    -
    -**Return Type:** Indicate the type of data the function/method returns (e.g., string, integer, object, void).
    -"""
    -

    导包以及基础参数配置

    -
      -
    • 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)
    • -
    -
    import os, sys
    -from muagent.base_configs.env_config import CB_ROOT_PATH
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.connector.phase import BasePhase
    -from muagent.connector.agents import BaseAgent, SelectorAgent
    -from muagent.connector.chains import BaseChain
    -from muagent.connector.schema import Message, Role, ChainConfig
    -from muagent.codechat.codebase_handler.codebase_handler import CodeBaseHandler
    -
    -from loguru import logger
    -from muagent.tools import CodeRetrievalSingle
    -
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    定义新的agent类

    -

    用于自定义key-value信息

    -
    class CodeGenDocer(BaseAgent):
    -
    -    def start_action_step(self, message: Message) -> Message:
    -        '''do action before agent predict '''
    -        # 根据问题获取代码片段和节点信息
    -        action_json = CodeRetrievalSingle.run(message.code_engine_name, message.input_query, llm_config=self.llm_config, 
    -                                              embed_config=self.embed_config, local_graph_path=message.local_graph_path, use_nh=message.use_nh,search_type="tag")
    -        current_vertex = action_json['vertex']
    -        message.customed_kargs["Code Snippet"] = action_json["code"]
    -        message.customed_kargs['Current_Vertex'] = current_vertex
    -        return message
    -    
    -

    准备LLM & Embedding

    -
    llm_config = LLMConfig(
    -    model_name="gpt-4", api_key=api_key,  api_base_url=api_base_url, temperature=0.3
    -)
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    代码库加载

    -
    
    -# initialize codebase
    -# delete codebase
    -codebase_name = 'client_nebula'
    -code_path = "D://chromeDownloads/devopschat-bot/client_v2/client"
    -use_nh = True
    -do_interpret = False
    -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH,
    -                      llm_config=llm_config, embed_config=embed_config)
    -cbh.delete_codebase(codebase_name=codebase_name)
    -
    -# load codebase
    -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH,
    -                      llm_config=llm_config, embed_config=embed_config)
    -cbh.import_code(do_interpret=do_interpret)
    -

    接下来就构建 phase 实例,开始执行

    -
    
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "1"
    -
    -funcGenDoc_role = Role(role_type="assistant", role_name="funcGenDoc_role", prompt=funcGenDoc_PROMPT)
    -funcGenDoc = CodeGenDocer(
    -    role=funcGenDoc_role,
    -    chat_turn=1,
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -
    -classGenDoc_role = Role(role_type="assistant", role_name="classGenDoc_role", prompt=classGenDoc_PROMPT)
    -classGenDoc = CodeGenDocer(
    -    role=classGenDoc_role,
    -    chat_turn=1,
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -codeGenDocGroup_role = Role(role_type="assistant", role_name="codeGenDocGroup_role", prompt=codeGenDocGroup_PROMPT)
    -codeGenDocGroup = SelectorAgent(
    -    role=codeGenDocGroup_role,
    -    chat_turn=1,
    -    llm_config=llm_config, embed_config=embed_config,
    -    group_agents=[funcGenDoc, classGenDoc]
    -)
    -
    -chain_config = ChainConfig(
    -    chain_name="codeGenDocGroup_chain", agents=[codeGenDocGroup.role.role_name,], 
    -    chat_turn=1)
    -
    -chain = BaseChain(
    -    chainConfig=chain_config, agents=[codeGenDocGroup], 
    -    llm_config=llm_config, embed_config=embed_config,
    -)
    -
    -phase = BasePhase(
    -    phase_name="codeGenDocGroup_phase", chains=[chain],
    -    embed_config=embed_config, llm_config=llm_config
    -)
    -

    开始代码转api文档

    -
    # 根据前面的load过程进行初始化
    -cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH,
    -                      llm_config=llm_config, embed_config=embed_config)
    -
    -cbh.search_vertices(vertex_type="method")
    -
    -# 开始代码转换API文档结构
    -for vertex_type in ["class", "method"]:
    -    vertexes = cbh.search_vertices(vertex_type=vertex_type)
    -    logger.info(f"vertexes={vertexes}")
    -
    -    # round-1
    -    docs = []
    -    for vertex in vertexes:
    -        vertex = vertex.split("-")[0] # -为method的参数
    -        query_content = f"为{vertex_type}节点 {vertex}生成文档"
    -        query = Message(
    -            role_name="human", role_type="user", input_query=query_content,
    -            code_engine_name=codebase_name, score_threshold=1.0, top_k=3, cb_search_type="tag", use_nh=use_nh,
    -            local_graph_path=CB_ROOT_PATH,
    -            )
    -        output_message, output_memory = phase.step(query, reinit_memory=True)
    -        # print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -        docs.append(output_memory.get_spec_parserd_output())
    -
    -        os.makedirs(f"{CB_ROOT_PATH}/docs", exist_ok=True)
    -        with open(f"{CB_ROOT_PATH}/docs/raw_{vertex_type}.json", "w") as f:
    -            json.dump(docs, f)
    -
    -
    -# 下面把生成的文档信息转换成markdown文本
    -from muagent.utils.code2doc_util import *
    -
    -import json
    -with open(f"/home/user/code_base/docs/raw_method.json", "r") as f:
    -    method_raw_data = json.load(f)
    -
    -with open(f"/home/user/code_base/docs/raw_class.json", "r") as f:
    -    class_raw_data = json.load(f)
    -    
    -
    -method_data = method_info_decode(method_raw_data)
    -class_data = class_info_decode(class_raw_data)
    -method_mds = encode2md(method_data, method_text_md)
    -class_mds = encode2md(class_data, class_text_md)
    -
    -docs_dict = {}
    -for k,v in class_mds.items():
    -    method_textmds = method_mds.get(k, [])
    -    for vv in v:
    -        # 理论上只有一个
    -        text_md = vv
    -
    -    for method_textmd in method_textmds:
    -        text_md += "\n<br>" + method_textmd
    -
    -    docs_dict.setdefault(k, []).append(text_md)
    -    
    -    with open(f"/home/user/code_base/docs/{k}.md", "w") as f:
    -        f.write(text_md)
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/custom-retrieval-zh/index.html b/docs/zh/muagent/custom-retrieval-zh/index.html deleted file mode 100644 index eec2c04..0000000 --- a/docs/zh/muagent/custom-retrieval-zh/index.html +++ /dev/null @@ -1,528 +0,0 @@ - - - - - - - - -自定义 Retrieval 接入 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    自定义 Retrieval 接入

    -
    -
    - - -

    基本介绍

    -

    Doc Retrieval 文档向量数据库是当前最主流的知识库构建方法,使用Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。

    -

    Code Retrieval LLM在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为LLM提供知识体系外的代码。

    -

    Search Retrieval 除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了duckduckgosearch这款开源的搜索工具,能够为LLM提供知识储备以外的内容。

    -

    Rertrieval 结构

    -
    class IMRertrieval:
    -
    -    def __init__(self,):
    -        '''
    -        init your personal attributes
    -        '''
    -        pass
    -
    -    def run(self, ):
    -        '''
    -        execute interface, and can use init' attributes
    -        '''
    -        pass
    -
    -class BaseDocRetrieval(IMRertrieval):
    -
    -    def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH):
    -        self.knowledge_base_name = knowledge_base_name
    -        self.search_top = search_top
    -        self.score_threshold = score_threshold
    -        self.embed_config = embed_config
    -        self.kb_root_path = kb_root_path
    -
    -    def run(self, query: str, search_top=None, score_threshold=None, ):
    -        docs = DocRetrieval.run(
    -            query=query, knowledge_base_name=self.knowledge_base_name,
    -            search_top=search_top or self.search_top,
    -            score_threshold=score_threshold or self.score_threshold,
    -            embed_config=self.embed_config,
    -            kb_root_path=self.kb_root_path
    -        )
    -        return docs
    -

    使用示例

    -
    # retrieval your customized register demo
    -from muagent.tools import DocRetrieval
    -class BaseDocRetrieval(IMRertrieval):
    -
    -    def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH):
    -        self.knowledge_base_name = knowledge_base_name
    -        self.search_top = search_top
    -        self.score_threshold = score_threshold
    -        self.embed_config = embed_config
    -        self.kb_root_path = kb_root_path
    -
    -    def run(self, query: str, search_top=None, score_threshold=None, ):
    -        docs = DocRetrieval.run(
    -            query=query, knowledge_base_name=self.knowledge_base_name,
    -            search_top=search_top or self.search_top,
    -            score_threshold=score_threshold or self.score_threshold,
    -            embed_config=self.embed_config,
    -            kb_root_path=self.kb_root_path
    -        )
    -        return docs
    -
    -
    -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config)
    -
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH,
    -    doc_retrieval=doc_retrieval
    -)
    -
    -# round-1
    -query_content = "langchain有哪些模块"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content = "提示(prompts)有什么用?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/custom-tool-zh/index.html b/docs/zh/muagent/custom-tool-zh/index.html deleted file mode 100644 index d6b7492..0000000 --- a/docs/zh/muagent/custom-tool-zh/index.html +++ /dev/null @@ -1,553 +0,0 @@ - - - - - - - - -自定义 Tool 接入 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    自定义 Tool 接入

    -
    -
    - - -

    基本介绍

    -

    在MuAgent中也支持Agent完成Tool的注册,通过Python注册模板BaseToolModel类,编写

    -
      -
    • Tool_nam
    • -
    • Tool_descriptio
    • -
    • ToolInputArgs
    • -
    • ToolOutputArgs
    • -
    • run
    • -
    -

    等相关属性和方法即可实现工具的快速接入,同时支持langchain Tool接口的直接使用。 例如像上述 XXRetrieval 的功能也可以注册为Tool,最终由LLM执行调用。

    -

    BaseTool 结构

    -
    from langchain.agents import Tool
    -from pydantic import BaseModel, Field
    -from typing import List, Dict
    -import json
    -
    -
    -class BaseToolModel:
    -    name = "BaseToolModel"
    -    description = "Tool Description"
    -
    -    class ToolInputArgs(BaseModel):
    -        """
    -        Input for MoveFileTool.
    -        Tips:
    -            default control Required, e.g.  key1 is not Required/key2 is Required
    -        """
    -
    -        key1: str = Field(default=None, description="hello world!")
    -        key2: str = Field(..., description="hello world!!")
    -
    -    class ToolOutputArgs(BaseModel):
    -        """
    -        Input for MoveFileTool.
    -        Tips:
    -            default control Required, e.g.  key1 is not Required/key2 is Required
    -        """
    -
    -        key1: str = Field(default=None, description="hello world!")
    -        key2: str = Field(..., description="hello world!!")
    -
    -    @classmethod
    -    def run(cls, tool_input_args: ToolInputArgs) -> ToolOutputArgs:
    -        """excute your tool!"""
    -        pass
    -

    注册示例

    -
    from pydantic import BaseModel, Field
    -from typing import List, Dict
    -import requests
    -from loguru import logger
    -
    -from .base_tool import BaseToolModel
    -
    -class Multiplier(BaseToolModel):
    -    """
    -    Tips:
    -        default control Required, e.g.  key1 is not Required/key2 is Required
    -    """
    -
    -    name: str = "Multiplier"
    -    description: str = """useful for when you need to multiply two numbers together. \
    -    The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. \
    -    For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
    -
    -    class ToolInputArgs(BaseModel):
    -        """Input for Multiplier."""
    -
    -        # key: str = Field(..., description="用户在高德地图官网申请web服务API类型KEY")
    -        a: int = Field(..., description="num a")
    -        b: int = Field(..., description="num b")
    -
    -    class ToolOutputArgs(BaseModel):
    -        """Output for Multiplier."""
    -
    -        res: int = Field(..., description="the result of two nums")
    -    
    -    @staticmethod
    -    def run(a, b):
    -        return a * b
    -

    使用示例

    -
    from langchain.tools import StructuredTool
    -from muagent.tools import (
    -    WeatherInfo, Multiplier, toLangchainTools,
    -    TOOL_DICT, TOOL_SETS
    -)
    -
    -# 函数执行
    -tools =  [
    -    StructuredTool(
    -            name=Multiplier.name,
    -            func=Multiplier.run,
    -            description=Multiplier.description,
    -            args_schema=Multiplier.ToolInputArgs,
    -        ), 
    -        StructuredTool(
    -            name=WeatherInfo.name,
    -            func=WeatherInfo.run,
    -            description=WeatherInfo.description,
    -            args_schema=WeatherInfo.ToolInputArgs,
    -        )
    -        ]
    -
    -tools = toLangchainTools([TOOL_DICT["Multiplier"]])
    -
    -# tool run 测试
    -print(tools[0].func(1,2))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/embedding-model-config-zh/index.html b/docs/zh/muagent/embedding-model-config-zh/index.html deleted file mode 100644 index 988a29e..0000000 --- a/docs/zh/muagent/embedding-model-config-zh/index.html +++ /dev/null @@ -1,493 +0,0 @@ - - - - - - - - -Embedding 配置 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    Embedding 配置

    -
    -
    - - -

    准备相关参数

    -

    首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -

    构建LLM Config

    -
      -
    • 通过本地模型文件构建
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -
      -
    • 通过openai构建
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -embed_config = EmbedConfig(
    -    embed_engine="openai", api_key=api_key,  api_base_url=api_base_url,
    -)
    -
      -
    • 自定义langchain embeddings传入
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -
    -
    -class CustomizedEmbeddings(Embeddings):
    -
    -    def embed_documents(self, texts: List[str]) -> List[List[float]]:
    -        embeddings = []
    -        # add your embedding code
    -        return embeddings
    -
    -    def embed_query(self, text: str) -> List[float]:
    -        """Compute query embeddings using a HuggingFace transformer model.
    -
    -        Args:
    -            text: The text to embed.
    -
    -        Returns:
    -            Embeddings for the text.
    -        """
    -        # add your embedding code
    -        return embedding
    -
    -embeddings = CustomizedEmbeddings()
    -embed_config = EmbedConfig(
    -    embed_model="default",
    -    langchain_embeddings=embeddings
    -)
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/muagent/index.xml b/docs/zh/muagent/index.xml deleted file mode 100644 index da1ed20..0000000 --- a/docs/zh/muagent/index.xml +++ /dev/null @@ -1,102 +0,0 @@ - - - - Muagents on CodeFuse-AI - /zh/muagent/ - Recent content in Muagents on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - Agent 编排 - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - 核心Connector介绍 为了便于大家理解整个 muagent 的链路,我们采取 Flow 的形式来详细介绍如何通过配置构建 下面,我们先介绍相关的核心组件 Agent 在Agent设计层面,我们提供了四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用 BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 =&gt; 输出 ReactAgent:提供标准React的功能,根据问题实现当前任务 ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. 输出后将 message push 到 memory pool 之中,后续通过Memory Manager进行管理 Chain 基础链路:BaseChain,串联agent的交互,完成相关message和memory的管理 Phase 基础场景:BasePhase,串联chain的交互,完成相关message和memory的管理 Prompt Manager Mutli-Agent链路中每一个agent的prompt创建 通过对promtp_input_keys和promtp_output_keys对的简单设定,可以沿用预设 Prompt Context 创建逻辑,从而实现agent prompt快速配置 也可以对prompt manager模块进行新的 key-context 设计,实现个性化的 Agent Prompt Memory Manager 主要用于 chat history 的管理 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 - - - Connector Agent - /zh/muagent/connector-agent-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-agent-zh/ - 快速构建一个Agent 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 配置相关 LLM 和 Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent.connector.chains import BaseChain from muagent.connector.schema import Role, Message, ChainConfig from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) Agent 配置 定义两个react agent,进行实际任务执行 # 这里采用了预定义的prompt,也可以参考上述prompt完成编写 from muagent. - - - Connector Chain - /zh/muagent/connector-chain-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-chain-zh/ - 快速构建一个Agent Chain 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 配置相关 LLM 和 Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent.connector.chains import BaseChain from muagent.connector.schema import Role, Message, ChainConfig from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0. - - - Connector Memory - /zh/muagent/connector-memory-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-memory-zh/ - Memory Manager 将chat history在数据库进行读写管理,包括user input、 llm output、doc retrieval、code retrieval、search retrieval 对 chat history 进行关键信息总结 summary context,作为 prompt context 提供检索功能,检索 chat history 或者 summary context 中与问题相关信息,辅助问答 使用示例 完整示例见 ~/tests/connector/memory_manager_test.py 创建 memory manager 实例 import os import openai from muagent.base_configs.env_config import KB_ROOT_PATH from muagent.connector.memory_manager import BaseMemoryManager, LocalMemoryManager from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.connector.schema import Message # OPENAI_API_BASE = &#34;https://api.openai.com/v1&#34; os.environ[&#34;API_BASE_URL&#34;] = OPENAI_API_BASE os.environ[&#34;OPENAI_API_KEY&#34;] = &#34;sk-xxx&#34; openai.api_key = &#34;sk-xxx&#34; os.environ[&#34;model_name&#34;] = &#34;gpt-3.5-turbo&#34; # os. - - - Connector Phase - /zh/muagent/connector-phase-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-phase-zh/ - 快速构建一个Agent Phase 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后设置LLM配置和向量模型配置 配置相关 LLM 和 Embedding Model from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.connector.agents import BaseAgent, ReactAgent, ExecutorAgent, SelectorAgent from muagent.connector.chains import BaseChain from muagent.connector.schema import Role, Message, ChainConfig from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0. - - - Connector Prompt - /zh/muagent/connector-prompt-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/connector-prompt-zh/ - 提示管理器(Prompt Manager) 管理多智能体链路中的prompt创建 快速配置:采用预设的处理函数,用户仅需通过定义智能体的输入输出即可轻松配置,实现多智能体的prompt快速组装和配置。 自定义支持:允许用户自定义prompt内部各模块的处理逻辑,以达到个性化的智能体prompt实现。 Prompt预设模板结构 Agent Profile:此部分涉及到智能体的基础描述,包括但不限于代理的类型、功能和指令集。用户可以在这里设置智能体的基本属性,确保其行为与预期相符。 Context:上下文信息,给智能体做参考,帮助智能体更好的进行决策。 Tool Information:此部分为智能体提供了一套可用工具的清单,智能体可以根据当前的场景需求从中挑选合适的工具以辅助其执行任务。 Reference Documents:这里可以包含代理参考使用的文档或代码片段,以便于它在处理请求时能够参照相关资料。 Session Records:在进行多轮对话时,此部分会记录之前的交谈内容,确保智能体能够在上下文中保持连贯性。 Response Output Format:用户可以在此设置智能体的输出格式,以确保生成的响应满足特定的格式要求,包括结构、语法等。 Prompt 的标准结构 在整个Prompt的整个结构中,我们需要去定义三个部分 Agent Profil Input Format Response Output Format #### Agent Profile Agent Description ... #### Input Format **Origin Query:** the initial question or objective that the user wanted to achieve **Context:** the current status and history of the tasks to determine if Origin Query has been achieved. #### Response Output Format **Action Status:** finished or continued If it&#39;s &#39;finished&#39;, the context can answer the origin query. - - - Customed Examples - /zh/muagent/custom-examples-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/custom-examples-zh/ - 如何创建你个性化的 agent phase 场景 下面通过 代码库来实现代码转API文档的自动生成, 来详细演示如何自定义一个 agent phase 的构建 设计你的prompt结构 codeGenDocGroup_PROMPT, 构建 group Agent Prompt # update new agent configs codeGenDocGroup_PROMPT = &#34;&#34;&#34;#### Agent Profile Your goal is to response according the Context Data&#39;s information with the role that will best facilitate a solution, taking into account all relevant context (Context) provided. When you need to select the appropriate role for handling a user&#39;s query, carefully read the provided role names, role descriptions and tool list. - - - Embedding 配置 - /zh/muagent/embedding-model-config-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/embedding-model-config-zh/ - 准备相关参数 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; 构建LLM Config 通过本地模型文件构建 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) 通过openai构建 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig embed_config = EmbedConfig( embed_engine=&#34;openai&#34;, api_key=api_key, api_base_url=api_base_url, ) 自定义langchain embeddings传入 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig class CustomizedEmbeddings(Embeddings): def embed_documents(self, texts: List[str]) -&gt; List[List[float]]: embeddings = [] # add your embedding code return embeddings def embed_query(self, text: str) -&gt; List[float]: &#34;&#34;&#34;Compute query embeddings using a HuggingFace transformer model. - - - LLM 配置 - /zh/muagent/llm-model-config-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/llm-model-config-zh/ - 准备相关参数 首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动) import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; 构建LLM Config 通过调用 类openai 传入 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) 自定义 langchain LLM 传入 from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from langchain.llms.base import BaseLLM, LLM class CustomizedModel(LLM): repetition_penalty = 1.1 temperature = 0.2 top_k = 40 top_p = 0.9 def predict(self, prompt: str, stop: Optional[List[str]] = None) -&gt; str: return self. - - - MuAgent 概览 - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - 简介 为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。 但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。 经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。 因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。 本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。 MuAgent框架 在MuAgent中,我们除了定义Agent交互链路和AgentBase基础执行流以外,还额外设计了 Prompt Manager 和 Memory Manager 两个基础组件,分别用于自动化构建Prompt和chat history管理。最终构建出一个可扩展、易于使用的Multi-Agent框架,包括以下内容 Agent Base:构建了四种基本的Agent类型BaseAgent、ReactAgent、ExecutorAgent、SelectorAgent,支撑各种场景的基础活动 Communication:通过Message和Parse Message 实体完成Agent间的信息传递,并与Memory Manager交互再Memory Pool完成记忆管理 Prompt Manager:通过Role Handler、Doc/Tool Handler、Session Handler、Customized Handler,来自动化组装Customized 的Agent Prompt Memory Manager: 用于支撑 chat history 的存储管理、信息压缩、记忆检索等管理,最后通过Memory Pool在数据库、本地、向量数据库中完成存储 Component:用于构建Agent的辅助生态组件,包括Retrieval、Tool、Action、Sandbox等 Customized Model:支持私有化的LLM和Embedding的接入 Agent Base 在Agent层面,提供四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用。所有的Action都由Agent执行。 BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 =&gt; 输出 ReactAgent:提供标准React的功能,根据问题实现当前任务 ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 Agent接受到任务清单([List[task]),对这个任务清单Task进行循环执行(中间也可添加 Feedback Agent来进行任务重新优化),直到任务完成 SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答. Communication 为了让Agent之间进行更好的交互,以及能够让每一个Agent接受到足够的信息完成它们特定任务,我们将Message信息体分成了多个部分,System Content、Info Content、LLM Content和LLM Parsed Content等 System Content:用于存储管理当前LLM输出的时间,Role信息等 Info Content:LLM辅助信息,比如像知识库查询信息、代码库检索信息、工具信息、Agent信息等 LLM Content:直接存储和传递LLM 产生的信息 LLM Parsed Content:对LLM进行解析转成更易操作的key-value数据结构,方便对LLM内容进行过滤 Customized Content:用于管理自定义action产生的key-value数据内容,用于后续自定义Prompt模板的组装构建 通过对以上消息格式的定义,我们便可以完成通用消息的传递和管理。具体组装见Prompt Manager模块 - - - 快速开始 - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - Quick Start 完整示例见,examples/muagent_examples 首先,准备相关配置信息 import os, sys api_key = &#34;sk-xxx&#34; api_base_url= &#34;https://api.openai.com/v1&#34; model_name = &#34;gpt-3.5-turbo&#34; embed_model = &#34;{{embed_model_name}}&#34; embed_model_path = &#34;{{embed_model_path}}&#34; # os.environ[&#34;DUCKDUCKGO_PROXY&#34;] = os.environ.get(&#34;DUCKDUCKGO_PROXY&#34;) or &#34;socks5://127.0.0.1:13659&#34; 然后,设置LLM配置和Embedding模型配置 from muagent.base_configs.env_config import JUPYTER_WORK_PATH from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS from muagent.llm_models.llm_config import EmbedConfig, LLMConfig from muagent.connector.phase import BasePhase from muagent.connector.schema import Message llm_config = LLMConfig( model_name=model_name, api_key=api_key, api_base_url=api_base_url, temperature=0.3, stop=&#34;**Observation:**&#34; ) embed_config = EmbedConfig( embed_engine=&#34;model&#34;, embed_model=embed_model, embed_model_path=embed_model_path ) 最后选择一个已有场景进行执行 # if you want to analyze a data. - - - 自定义 Retrieval 接入 - /zh/muagent/custom-retrieval-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/custom-retrieval-zh/ - 基本介绍 Doc Retrieval 文档向量数据库是当前最主流的知识库构建方法,使用Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。 Code Retrieval LLM在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为LLM提供知识体系外的代码。 Search Retrieval 除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了duckduckgosearch这款开源的搜索工具,能够为LLM提供知识储备以外的内容。 Rertrieval 结构 class IMRertrieval: def __init__(self,): &#39;&#39;&#39; init your personal attributes &#39;&#39;&#39; pass def run(self, ): &#39;&#39;&#39; execute interface, and can use init&#39; attributes &#39;&#39;&#39; pass class BaseDocRetrieval(IMRertrieval): def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH): self.knowledge_base_name = knowledge_base_name self.search_top = search_top self.score_threshold = score_threshold self.embed_config = embed_config self.kb_root_path = kb_root_path def run(self, query: str, search_top=None, score_threshold=None, ): docs = DocRetrieval. - - - 自定义 Tool 接入 - /zh/muagent/custom-tool-zh/ - Mon, 01 Jan 0001 00:00:00 +0000 - /zh/muagent/custom-tool-zh/ - 基本介绍 在MuAgent中也支持Agent完成Tool的注册,通过Python注册模板BaseToolModel类,编写 Tool_nam Tool_descriptio ToolInputArgs ToolOutputArgs run 等相关属性和方法即可实现工具的快速接入,同时支持langchain Tool接口的直接使用。 例如像上述 XXRetrieval 的功能也可以注册为Tool,最终由LLM执行调用。 BaseTool 结构 from langchain.agents import Tool from pydantic import BaseModel, Field from typing import List, Dict import json class BaseToolModel: name = &#34;BaseToolModel&#34; description = &#34;Tool Description&#34; class ToolInputArgs(BaseModel): &#34;&#34;&#34; Input for MoveFileTool. Tips: default control Required, e.g. key1 is not Required/key2 is Required &#34;&#34;&#34; key1: str = Field(default=None, description=&#34;hello world!&#34;) key2: str = Field(..., description=&#34;hello world!!&#34;) class ToolOutputArgs(BaseModel): &#34;&#34;&#34; Input for MoveFileTool. - - - diff --git a/docs/zh/muagent/llm-model-config-zh/index.html b/docs/zh/muagent/llm-model-config-zh/index.html deleted file mode 100644 index 4f008e5..0000000 --- a/docs/zh/muagent/llm-model-config-zh/index.html +++ /dev/null @@ -1,471 +0,0 @@ - - - - - - - - -LLM 配置 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    LLM 配置

    -
    -
    - - -

    准备相关参数

    -

    首先增加openai配置,也可以是其它类似于openai接口的模型(通过fastchat启动)

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -

    构建LLM Config

    -
      -
    • 通过调用 类openai 传入
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
      -
    • 自定义 langchain LLM 传入
    • -
    -
    from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from langchain.llms.base import BaseLLM, LLM
    -
    -class CustomizedModel(LLM):
    -        repetition_penalty = 1.1
    -        temperature = 0.2
    -        top_k = 40
    -        top_p = 0.9
    -
    -        def predict(self, prompt: str, stop: Optional[List[str]] = None) -> str:
    -            return self._call(prompt, stop)
    -
    -        def _call(self, prompt: str,
    -                  stop: Optional[List[str]] = None) -> str:
    -            """_call
    -            """
    -            return ""
    -
    -llm = CustomizedModel()
    -llm_config = LLMConfig(
    -    llm=llm
    -)
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/muagent/muagent-\346\246\202\350\247\210/index.html" "b/docs/zh/muagent/muagent-\346\246\202\350\247\210/index.html" deleted file mode 100644 index 867c501..0000000 --- "a/docs/zh/muagent/muagent-\346\246\202\350\247\210/index.html" +++ /dev/null @@ -1,481 +0,0 @@ - - - - - - - - -MuAgent 概览 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    MuAgent 概览

    -
    -
    - - -

    简介

    -

    为了提高大型模型在推理准确性方面的表现,业界出现了多种创新的大型语言模型(LLM)玩法。从最早的CoT、ToT到GoT,这些方法不断拓展了LLM的能力边界。在处理复杂问题时,我们可以通过ReAct过程来选择、调用和执行工具反馈,同时实现多轮工具使用和多步骤执行。

    -

    但对于更复杂的场景,例如复杂代码的开发,单一功能的LLM Agent显然难以胜任。因此,社区开始发展出多Agent的组合玩法,比如专注于metaGPT、GPT-Engineer、chatDev等开发领域的项目,以及专注于自动化构建Agent和Agent对话的AutoGen项目。

    -

    经过对这些框架的深入分析,发现大多数的Agent框架整体耦合度较高,其易用性和可扩展性较差。在预设场景中实现特定场景,但想要进行场景扩展却困难重重。

    -

    因此,我们希望构建一个可扩展、易于使用的Multi-Agent框架,以支持ChatBot在获取知识库信息的同时,能够辅助完成日常办公、数据分析、开发运维等各种通用任务。

    -

    本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。

    -
    - 图片 -
    -

    MuAgent框架

    -

    在MuAgent中,我们除了定义Agent交互链路和AgentBase基础执行流以外,还额外设计了 Prompt Manager 和 Memory Manager 两个基础组件,分别用于自动化构建Prompt和chat history管理。最终构建出一个可扩展、易于使用的Multi-Agent框架,包括以下内容

    -
      -
    • Agent Base:构建了四种基本的Agent类型BaseAgent、ReactAgent、ExecutorAgent、SelectorAgent,支撑各种场景的基础活动
    • -
    • Communication:通过Message和Parse Message 实体完成Agent间的信息传递,并与Memory Manager交互再Memory Pool完成记忆管理
    • -
    • Prompt Manager:通过Role Handler、Doc/Tool Handler、Session Handler、Customized Handler,来自动化组装Customized 的Agent Prompt
    • -
    • Memory Manager: 用于支撑 chat history 的存储管理、信息压缩、记忆检索等管理,最后通过Memory Pool在数据库、本地、向量数据库中完成存储
    • -
    • Component:用于构建Agent的辅助生态组件,包括Retrieval、Tool、Action、Sandbox等
    • -
    • Customized Model:支持私有化的LLM和Embedding的接入
    • -
    -

    Agent Base

    -

    在Agent层面,提供四种基本的Agent类型,对这些Agent进行Role的基础设定,可满足多种通用场景的交互和使用。所有的Action都由Agent执行。

    -
      -
    1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出
    2. -
    -
    - 图片 -
    -
      -
    1. ReactAgent:提供标准React的功能,根据问题实现当前任务
    2. -
    -
    - 图片 -
    -
      -
    1. ExecutorAgent:对任务清单进行顺序执行,根据 User 或 上一个Agent编排的计划,完成相关任务 -Agent接受到任务清单([List[task]),对这个任务清单Task进行循环执行(中间也可添加 Feedback Agent来进行任务重新优化),直到任务完成
    2. -
    -
    - 图片 -
    -
      -
    1. SelectorAgent:提供选择Agent的功能,根据User 或 上一个 Agent的问题选择合适的Agent来进行回答.
    2. -
    -
    - 图片 -
    -

    Communication

    -

    为了让Agent之间进行更好的交互,以及能够让每一个Agent接受到足够的信息完成它们特定任务,我们将Message信息体分成了多个部分,System Content、Info Content、LLM Content和LLM Parsed Content等

    -
      -
    • System Content:用于存储管理当前LLM输出的时间,Role信息等
    • -
    • Info Content:LLM辅助信息,比如像知识库查询信息、代码库检索信息、工具信息、Agent信息等
    • -
    • LLM Content:直接存储和传递LLM 产生的信息
    • -
    • LLM Parsed Content:对LLM进行解析转成更易操作的key-value数据结构,方便对LLM内容进行过滤
    • -
    • Customized Content:用于管理自定义action产生的key-value数据内容,用于后续自定义Prompt模板的组装构建
    • -
    -

    通过对以上消息格式的定义,我们便可以完成通用消息的传递和管理。具体组装见Prompt Manager模块

    -

    Context Manager

    -

    Memory Manager

    -

    主要用于 chat history 的管理

    -
      -
    • 存储管理:在数据库或本地实现对chat history进行save和load管理,包括user input、 llm output、observation ouput
    • -
    • 信息压缩:对 chat history 进行关键信息压缩总结 summary context,比如说单文本概况、侧重不同角度进行文本概况、关键信息提取、多文本概况,作为 Prompt context
    • -
    • 记忆检索:提供基础检索功能,检索 chat history 或者 Summary Context 中与问题相关信息,辅助问答
    • -
    • LLM自动触发:后续定义策略或通过LLM来 触发 压缩总结和检索的功能
    • -
    -

    Prompt Manager

    -

    提问LLM已经成为一种常见的实践,但如何让多个大模型分工并协调好LLM间的规划、调用工具、代码编写能力,来引导它们产生期望的输出,成为了一个关键的问题,其本质就是将业务问题抽象并拆解到可执行的Prompt,那与其说我们是在设计Agents,不如说是对当前需求的深入理解后进行框架设计。 -在LLM介入到实际业务场景(不涉及SFT过程),我们能通过设计Agent Prompt的内容来指定LLM完成相应任务得到相应输出。在MuAgent这个过程中,将这个Prompt分成了三个部分,System Prompt、Context Prompt、Customized Prompt

    -
      -
    • System Prompt 包括 Role Name、Role Description、Task等
    • -
    • Context Prompt 包括 Doc Context、Code Context、Tool Context、Agent Context、Session Context等
    • -
    • Customized Prompt 则是 自定义的一些 Input 和 Ouput,比如说 … -我们还可以要求模型输出结构化的文本,比如说tool的json串、code\ncode_content等来完成特定工作流。
    • -
    -

    Automatic Prompt Assemble -在按照上述结构定义后,我们便可以通过以下方式来完成Prompt的自动化组装,不需要每次去做大量的prompt调整工作

    -
      -
    1. 定义Agent时直接配置 Role Name、Role Description、Task等来决定Agent需要做的事情
    2. -
    3. 预封装一些可复用的Context Prompt 通用策略,比如说可筛选 Role 的 SessionContext、可配置的Tool、Code Retrieval、Doc Retrieval、Search Retrieval、Agent来完成对应的组装
    4. -
    5. 由于Agent的Prompt是相对个性化的操作,所以也支持在Prompt Manager 模块内新增新的 key-context 设计,实现个性化的 Agent Prompt。
    6. -
    -

    Automatic Prompt Design -能根据role description、task、query等来自动化设计出最优的prompt;待定义…

    -

    Multi Prompt Design -根据前面Prompt的定义,我们可以了解到Prompt 由 System Prompt、Context Prompt、Customized Prompt 三个部分组成,三个部分的任一变化都有可能会引起LLM最终输出结果的变化。 -对于同种任务而言,即它们的System Prompt是相同的。那么在不考虑Customiezd Prompt 变化时,就可实现不同上下文的组装差异,比如说Prompt A获取10轮的chat history,而Pormpt B采用5轮的chat history,又或者是对chat history进行信息过滤、信息压缩等。 -待实现…

    -

    Component

    -

    Retrieval

    -

    在所有Prompt的Context中,除了Chat History的会话信息外,还需要依赖于从外界文档知识库、代码库、互联网搜索得来的相关信息,这些模型参数知识外的知识体系能够极大提升Agent完成复杂任务的能力。 -于是在MuAgent中我们集成了Doc、Internet Search、Code Retrieval三种检索信息的方式,并定义了一个抽象IMRetrieval类,可支持开发者自定义个性化的知识库,来完成Agent的知识库注册。

    -

    Doc Retrieval -文档向量数据库是当前最主流的知识库构建方法,使用Text Embedding 模型对文档进行向量化并在向量数据库中存储。未来我们也会去支持基于知识图谱查询以及通过大模型自动抽取实体和关系的方式,来挖掘数据中多种复杂关系。

    -

    Code Retrieval -LLM在代码生成、修复以及组件理解的任务上,会面临代码训练数据滞后、无法感知代码上下文依赖结构。以及在开发的过程中,对现有代码库和依赖包的理解、检索相关代码、查询元信息等会占用较长的时间。于是我们希望通过代码结构分析和代码检索生成来,以及为LLM提供知识体系外的代码。

    -

    Search Retrieval -除了现成的文档和代码知识库以及之外,在日常中实践中会去浏览大量网页内容获取更多的知识,帮助我们理解新兴的场景、业务、技术等,于是我们接入了duckduckgosearch这款开源的搜索工具,能够为LLM提供知识储备以外的内容。

    -

    Tool

    -

    随着OpenAI推出了Function Call功能,通过LLM生成指定工具的参数并执行调用,使机器能更好地理解和回应人类的需求,从而解决实际问题和重复性的工作。现如今工具学习能力越来越作为开源模型的标配。那在MuAgent中也支持Agent完成Tool的注册,通过Python注册模板BaseToolModel类,编写Tool_name、Tool_description、ToolInputArgs、ToolOutputArgs、run等相关属性和方法即可实现工具的快速接入,同时支持langchain Tool接口的直接使用。 -例如像上述 XXRetrieval 的功能也可以注册为Tool,最终由LLM执行调用。

    -

    Action

    -

    在MuAgent的定义里,Action是作为LLM具体要执行的动作或动作流,会包括LLM信息处理、知识检索、工具调用以及代码执行等一个综合性的复杂过程,是一个动态过程。比如在React过程中,我们通过LLM获取到了一个Tool参数,接下来"将工具参数放入到Tool并执行调用"这个过程就是Action,它去实践性的调用了Tool。又或者说我们定义了一个Agent,它编排在一个固定Agent的Action步骤之中,这个Agent的输入参数由Action特殊指定。也就是说无论是由LLM产生参数还是工程设定参数,只有涉及具体的执行过程,就是一个Action。

    -

    模块分类

    -
      -
    • connector 主要介绍这块Agent框架的工作
    • -
    • llm_models
    • -
    • retrieval
    • -
    • tools
    • -
    • sandbox
    • -
    • utils
    • -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git "a/docs/zh/muagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/zh/muagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" deleted file mode 100644 index 7701c8a..0000000 --- "a/docs/zh/muagent/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - -快速开始 · CodeFuse-AI - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    -
    - -
    -

    快速开始

    -
    -
    - - -

    Quick Start

    -

    完整示例见,examples/muagent_examples

    -

    首先,准备相关配置信息

    -
    import os, sys
    -
    -api_key = "sk-xxx"
    -api_base_url= "https://api.openai.com/v1"
    -model_name = "gpt-3.5-turbo"
    -embed_model = "{{embed_model_name}}"
    -embed_model_path = "{{embed_model_path}}"
    -#
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
    -

    然后,设置LLM配置和Embedding模型配置

    -
    from muagent.base_configs.env_config import JUPYTER_WORK_PATH
    -from muagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS
    -from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
    -from muagent.connector.phase import BasePhase
    -from muagent.connector.schema import Message
    -
    -
    -llm_config = LLMConfig(
    -    model_name=model_name, api_key=api_key,  api_base_url=api_base_url, temperature=0.3,
    -    stop="**Observation:**"
    -)
    -
    -embed_config = EmbedConfig(
    -    embed_engine="model", embed_model=embed_model, embed_model_path=embed_model_path
    -)
    -

    最后选择一个已有场景进行执行

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# 选择一个场景
    -phase_name = "baseGroupPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -# round-1 需要通过代码解释器来完成
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user", tools=[], input_query=query_content, 
    -)
    -
    -# phase.pre_print(query)  # 该功能用于预打印 Agents 执行链路的Prompt
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2 需要执行工具
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools, input_query=query_content, 
    -)
    -
    -# phase.pre_print(query)  # 该功能用于预打印 Agents 执行链路的Prompt
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    场景自定义

    -

    如何自定义场景

    -

    场景介绍和使用

    -

    下面是一些具体的场景介绍和使用。

    -

    也欢迎大家开脑洞构造一些有趣的case。

    -

    baseTaskPhase

    -

    xAgents的任务拆分及多步骤执行场景

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# 
    -phase_name = "baseTaskPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, 
    -)
    -# round-1
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeReactPhase

    -

    基于 React 的代码解释器场景

    -
    # if you want to analyze a data.csv, please put the csv file into a jupyter_work_path (or your defined path)
    -import shutil
    -source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/book_data.csv'
    -shutil.copy(source_file, JUPYTER_WORK_PATH)
    -
    -# then, create a data analyze phase
    -phase_name = "codeReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -# round-1
    -query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    codeToolReactPhase

    -

    基于 React 模板的工具调用和代码解释器场景

    -
    TOOL_SETS = [
    -     "StockName", "StockInfo", 
    -    ]
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "codeToolReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -query_content = "查询贵州茅台的股票代码,并查询截止到当前日期(2023年12月24日)的最近10天的每日时序数据,然后用代码画出折线图并分析"
    -
    -query = Message(role_name="human", role_type="user", input_query=query_content, tools=tools)
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    docChatPhase

    -

    知识库检索问答链路

    -
      -
    • example 1
    • -
    -
    # create your knowledge base
    -from muagent.service.kb_api import create_kb, upload_files2kb
    -from muagent.utils.server_utils import run_async
    -from muagent.orm import create_tables
    -
    -
    -# use to test, don't create some directory
    -create_tables()
    -# create a knowledge base
    -kb_name = "example_test"
    -run_async(create_kb(knowledge_base_name=kb_name, vector_store_type="faiss", embed_config=embed_config, kb_root_path=KB_ROOT_PATH))
    -# add doc to knowledge base
    -file = os.path.join("D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/sources/docs/langchain_text_10.jsonl")
    -files = [file]
    -upload_files2kb(files, kb_name, embed_config, kb_root_path=KB_ROOT_PATH)
    -
    -
    -
    -## start to chat with knowledge base
    -# log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "0"
    -
    -## exmaple 1
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH,
    -)
    -
    -# round-1
    -query_content = "langchain有哪些模块"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content = "提示(prompts)有什么用?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -    doc_engine_name=kb_name, score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
      -
    • exmaple 2
    • -
    -
    ustomized register demo
    -from muagent.tools import DocRetrieval
    -class BaseDocRetrieval(IMRertrieval):
    -
    -    def __init__(self, knowledge_base_name: str, search_top=5, score_threshold=1.0, embed_config: EmbedConfig=EmbedConfig(), kb_root_path: str=KB_ROOT_PATH):
    -        self.knowledge_base_name = knowledge_base_name
    -        self.search_top = search_top
    -        self.score_threshold = score_threshold
    -        self.embed_config = embed_config
    -        self.kb_root_path = kb_root_path
    -
    -    def run(self, query: str, search_top=None, score_threshold=None, ):
    -        docs = DocRetrieval.run(
    -            query=query, knowledge_base_name=self.knowledge_base_name,
    -            search_top=search_top or self.search_top,
    -            score_threshold=score_threshold or self.score_threshold,
    -            embed_config=self.embed_config,
    -            kb_root_path=self.kb_root_path
    -        )
    -        return docs
    -
    -
    -doc_retrieval = BaseDocRetrieval(knowledge_base_name=kb_name, score_threshold=1.0, search_top=3, embed_config=embed_config)
    -
    -# set chat phase
    -phase_name = "docChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config, kb_root_path=KB_ROOT_PATH,
    -    doc_retrieval=doc_retrieval
    -)
    -
    -# round-1
    -query_content = "langchain有哪些模块"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content = "提示(prompts)有什么用?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content,
    -)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    metagpt_code_devlop

    -

    metagpt的代码构造链路

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "metagpt_code_devlop"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -query_content = "create a snake game"
    -query = Message(role_name="human", role_type="user", input_query=query_content)
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    searchChatPhase

    -

    固定场景链路,先搜索后基于LLM直接回答

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -# 当duckduckgo连接不通的时候可以配置这个
    -os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5h://127.0.0.1:13659"
    -
    -phase_name = "searchChatPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -# round-1
    -query_content1 = "美国当前总统是谁?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content1,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -# round-2
    -query_content2 = "美国上一任总统是谁,两个人有什么关系没?"
    -query = Message(
    -    role_name="human", role_type="user", input_query=query_content2,
    -    search_engine_name="duckduckgo", score_threshold=1.0, top_k=3
    -    )
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -

    toolReactPhase

    -

    基于 React 模板的工具调用场景

    -
    # log-level,print prompt和llm predict
    -os.environ["log_verbose"] = "2"
    -
    -phase_name = "toolReactPhase"
    -phase = BasePhase(
    -    phase_name, embed_config=embed_config, llm_config=llm_config
    -)
    -
    -# round-1
    -tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
    -query_content = "帮我确认下127.0.0.1这个服务器的在10点是否存在异常,请帮我判断一下"
    -query = Message(
    -    role_name="human", role_type="user", tools=tools, input_query=query_content,
    -    )
    -
    -# phase.pre_print(query)
    -output_message, output_memory = phase.step(query)
    -print(output_memory.to_str_messages(return_all=True, content_key="parsed_output_list"))
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - Prev - - - - Next - -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/zh/sitemap.xml b/docs/zh/sitemap.xml deleted file mode 100644 index 7917d59..0000000 --- a/docs/zh/sitemap.xml +++ /dev/null @@ -1,1019 +0,0 @@ - - - - /zh/docs/codefuse-query/1_abstract/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/devops_eval/tool_learning_evalution/ - 2024-04-09T13:54:32+08:00 - - /zh/docs/devops_eval/tool_learning_info_zh/ - 2024-04-09T13:54:32+08:00 - - /zh/docs/devops_eval/tutorial_zh/ - 2024-04-09T13:54:32+08:00 - - /zh/coagent/agent-%E7%BC%96%E6%8E%92/ - 2024-01-24T19:45:27+08:00 - - - - /zh/muagent/agent-%E7%BC%96%E6%8E%92/ - 2024-04-09T13:54:32+08:00 - - - - /zh/categories/ - - - - /zh/docs/chatbot-%E6%8A%80%E6%9C%AF%E8%B7%AF%E7%BA%BF/ - 2024-01-23T14:58:31+08:00 - - - - /zh/coagent/coagent-%E6%A6%82%E8%A7%88/ - 2024-01-24T19:45:27+08:00 - - - - /zh/coagent/ - 2024-01-25T21:08:32+08:00 - - - - /zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-chatbot-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/codefuse-chatbot-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-devops/ - 2024-01-09T14:29:00+08:00 - - /zh/docs/codefuse-devops-eval-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/codefuse-devops-eval-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-devops-model-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/codefuse-devops-model-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/overview/codefuse-mft-vlm/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-modelcache-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/codefuse-modelcache-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-query-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/codefuse-query-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-query-introduction-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/overview/b10.codefuse-evalution/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/connector-agent-zh/ - 2024-01-25T20:54:37+08:00 - - - - /zh/muagent/connector-agent-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/connector-chain-zh/ - 2024-01-25T20:54:37+08:00 - - - - /zh/muagent/connector-chain-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/connector-memory-zh/ - 2024-01-25T20:54:37+08:00 - - - - /zh/muagent/connector-memory-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/connector-phase-zh/ - 2024-01-25T20:54:37+08:00 - - - - /zh/muagent/connector-phase-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/connector-prompt-zh/ - 2024-01-25T20:54:37+08:00 - - - - /zh/muagent/connector-prompt-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/contribution/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/customed-examples-zh/ - 2024-01-25T20:54:37+08:00 - - - - /zh/muagent/custom-examples-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/ - 2024-04-09T13:54:32+08:00 - - - - /zh/muagent/embedding-model-config-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/fastertransformer4codefuse-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/fastertransformer4codefuse-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/muagent/llm-model-config-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/mftcoder-zh/ - 2024-01-09T14:29:00+08:00 - - - - /docs/mftcoder-introduction-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/mftcoder-accelerate-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/overview/mftcoder-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/mftcoder-atorch-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/muagent/muagent-%E6%A6%82%E8%A7%88/ - 2024-04-09T13:54:32+08:00 - - - - /zh/muagent/ - 2024-04-09T13:54:32+08:00 - - - - /zh/coagent/prompt-%E7%AE%A1%E7%90%86%E5%99%A8/ - 2024-01-25T21:08:32+08:00 - - - - /docs/codefuse-modelcache-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/mftcoder-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/tags/ - - - - /zh/docs/test-agent-zh/ - 2024-01-09T14:29:00+08:00 - - - - /zh/docs/overview/test-agent-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-toolchain-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/%E6%9C%AC%E5%9C%B0%E7%A7%81%E6%9C%89%E5%8C%96%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%8F%A3%E6%8E%A5%E5%85%A5/ - 2024-01-25T20:54:37+08:00 - - - - /docs/codefuse-query-godellanguage-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/zh_overview/ - 2024-01-09T14:29:00+08:00 - - - - /docs/codefuse-modelcache-feature-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/contribution/%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97/ - 2024-01-23T20:52:15+08:00 - - - - /zh/coagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - 2024-01-25T20:54:37+08:00 - - - - /zh/docs/codefuse-chatbot-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/muagent/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-evalution-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-mft-vlm/%E5%BF%AB%E9%80%9F%E4%BD%BF%E7%94%A8/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-model-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/test-agent-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/codefuse-devops-eval-quickstart-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/%E5%90%AF%E5%8A%A8%E6%98%8E%E7%BB%86/ - 2024-04-09T13:54:32+08:00 - - - - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4issue/ - 2024-01-23T20:52:15+08:00 - - - - /zh/contribution/%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4pr/ - 2024-04-09T13:54:32+08:00 - - - - /zh/docs/%E6%95%B0%E6%8D%AE%E4%BB%8B%E7%BB%8D/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-devops-model-train-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-query-usercase-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/contribution/%E8%87%B4%E8%B0%A2/ - 2024-01-23T20:52:15+08:00 - - - - /zh/muagent/custom-retrieval-zh/ - 2024-04-09T13:54:32+08:00 - - - - /zh/muagent/custom-tool-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache-config-zh/ - 2024-04-09T13:54:32+08:00 - - - - /docs/codefuse-modelcache-release-zh/ - 2024-04-09T13:54:32+08:00 - - diff --git a/docs/zh/tags/index.xml b/docs/zh/tags/index.xml deleted file mode 100644 index 79b6ae2..0000000 --- a/docs/zh/tags/index.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - Tags on CodeFuse-AI - /zh/tags/ - Recent content in Tags on CodeFuse-AI - Hugo -- gohugo.io - en-CN - - - diff --git a/hugo.yaml b/hugo.yaml deleted file mode 100644 index 89a74e1..0000000 --- a/hugo.yaml +++ /dev/null @@ -1,191 +0,0 @@ -theme: docura - -enableGitInfo: true - -showSidebar: true -siderbar: - collapsible: true - -markup: - highlight: - noClasses: false - goldmark: - renderer: - unsafe: true - -services: - googleAnalytics: - ID: G-xxxx - -defaultContentLanguage: en -languages: - en: - languageName: English - languageCode: en-US - contentDir: content/en - title: CodeFuse-AI - description: "CodeFuse aims to develop Code Large Language Models (Code LLMs) to support and enhance full-lifecycle AI native sotware developing, - covering crucial stages such as design requirements, coding, testing, building, deployment, operations, and insight analysis." - weight: 1 - - zh: - languageName: 中文 - languageCode: en-CN - contentDir: content/zh - title: CodeFuse-AI - description: "CodeFuse的使命是开发专门设计用于支持整个软件开发生命周期的大型代码语言模型(Code LLMs), - 涵盖设计、需求、编码、测试、部署、运维等关键阶段。我们致力于打造创新的解决方案,让软件开发者们在研发的过程中如丝般顺滑。" - weight: 1 - -params: - - author: - name: codefuse-ai - url: https://github.com/codefuse-ai - - themeColors: - light: '#ffffff' - dark: '#121212' - - years: - start: 2023 - present: 2024 - - social: - github: 'codefuse-ai' - youtube: '' - facebook: '' - x: '' - - ads: - googleAdSense: '' - - donate: - buyMeACoffee: '' - githubSponsor: '' - - algolia: - en: - container: '#site-header-search' - appId: '' - indexName: '' - apiKey: '' - - home: - - # repository: 'codefuse-ai/codefuse-chatbot' - - repositories: - - # - name: CodeFuse-Query - # repo: codefuse-ai/CodeFuse-Query - # icon: "" - # description: Query-Based Code Analysis Engine - - # - name: MFTCoder - # repo: codefuse-ai/MFTCoder - # icon: "" - # description: High Accuracy and efficiency multi-task fine-tuning framework for Code LLMs - - # - name: Test-Agent - # repo: codefuse-ai/Test-Agent - # icon: "" - # description: Agent that empowers software testing with LLMs; industrial-first in China - - # - name: CodeFuse-ModelCache - # repo: codefuse-ai/CodeFuse-ModelCache - # icon: "" - # description: A LLM semantic caching system aiming to reducing response time via cached query-result pairs. - - # repo: codefuse-ai/codefuse-chatbot - # icon: "" - # description: An intelligent assistant serving the entire software development lifecycle. - - # - name: DevOps-Eval - # repo: codefuse-ai/codefuse-devops-eval - # icon: "" - # description: Industrial-first evaluation benchmark for LLMs in the DevOps/AIOps domain. - - # - name: DevOps-Model - # repo: codefuse-ai/CodeFuse-DevOps-Model - # icon: "" - # description: A series of industrial-first LLMs for the DevOps domain. - - # - name :Codefuse-evaluation - # repo: codefuse-ai/codefuse-evaluation - # icon: "" - # description: Industrial-level evaluation benchmarks for Coding LLMs in the full life-cycle of AI native software developing - - - - name: CodeFuse-Query - icon: "" - description: Query-Based Code Analysis Engine - repos: - - repo: CodeFuse-Query - - - name: MFTCoder - repos: - - repo: MFTCoder - description: High Accuracy and efficiency multi-task fine-tuning framework for Code LLMs - - repo: CodeFuse-MFT-VLM - description: An intelligent assistant serving the entire software development lifecycle. - icon: "" - description: High Accuracy and efficiency multi-task fine-tuning framework for Code LLMs - - - name: Test-Agent - repos: - - repo: Test-Agent - icon: "" - description: Agent that empowers software testing with LLMs; industrial-first in China - - - name: CodeFuse-ModelCache - repos: - - repo: CodeFuse-ModelCache - icon: "" - description: A LLM semantic caching system aiming to reducing response time via cached query-result pairs. - - - name: DevOps-Series - description: An intelligent assistant serving the entire software development lifecycle. - icon: "" - repos: - - repo: codefuse-chatbot - description: An intelligent assistant serving the entire software development lifecycle. - - - repo: codefuse-devops-eval - description: Industrial-first evaluation benchmark for LLMs in the DevOps/AIOps domain. - - - repo: CodeFuse-DevOps-Model - description: A series of industrial-first LLMs for the DevOps domain. - - - name: Codefuse-evaluation - icon: "" - description: Industrial-level evaluation benchmarks for Coding LLMs in the full life-cycle of AI native software developing - repos: - - repo: codefuse-evaluation - -menu: - main: - - - url: / - name: Home - identifier: home - pre: "" - weight: 1 - - - url: /docs - name: Overview - identifier: docs - pre: "" - weight: 2 - - - url: /muagent - name: MuAgent - identifier: muagent - pre: "" - weight: 3 - - - url: /contribution - name: Contribution - identifier: contribution - pre: "" - weight: 4 \ No newline at end of file diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..a9f331f --- /dev/null +++ b/package-lock.json @@ -0,0 +1,33507 @@ +{ + "name": "CodeFuse-Docs", + "version": "0.0.1", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "CodeFuse-Docs", + "version": "0.0.1", + "license": "MIT", + "dependencies": { + "antd": "^5.16.5", + "react-slick": "^0.30.2", + "slick-carousel": "^1.8.1", + "styled-components": "^6.1.8" + }, + "devDependencies": { + "@commitlint/cli": "^17.1.2", + "@commitlint/config-conventional": "^17.1.0", + "@types/lodash": "^4.17.0", + "dumi": "^2.2.0", + "husky": "^8.0.1", + "lint-staged": "^13.0.3", + "prettier": "^2.7.1" + } + }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npm.alibaba-inc.com/@aashutoshrathi/word-wrap/download/@aashutoshrathi/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@ant-design/colors": { + "version": "7.0.2", + "license": "MIT", + "dependencies": { + "@ctrl/tinycolor": "^3.6.1" + } + }, + "node_modules/@ant-design/cssinjs": { + "version": "1.20.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "classnames": "^2.3.1", + "csstype": "^3.1.3", + "rc-util": "^5.35.0", + "stylis": "^4.0.13" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/icons": { + "version": "5.3.6", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^7.0.0", + "@ant-design/icons-svg": "^4.4.0", + "@babel/runtime": "^7.11.2", + "classnames": "^2.2.6", + "rc-util": "^5.31.1" + }, + "engines": { + "node": ">=8" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/icons-svg": { + "version": "4.4.2", + "license": "MIT" + }, + "node_modules/@ant-design/react-slick": { + "version": "1.1.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.4", + "classnames": "^2.2.5", + "json2mq": "^0.2.0", + "resize-observer-polyfill": "^1.5.1", + "throttle-debounce": "^5.0.0" + }, + "peerDependencies": { + "react": ">=16.9.0" + } + }, + "node_modules/@antfu/install-pkg": { + "version": "0.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.1.1", + "find-up": "^5.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@antfu/utils": { + "version": "0.7.7", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/highlight": "^7.24.2", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.24.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.24.5", + "@babel/helpers": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@babel/core/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/eslint-parser": { + "version": "7.23.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", + "eslint-visitor-keys": "^2.1.0", + "semver": "^6.3.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || >=14.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.11.0", + "eslint": "^7.5.0 || ^8.0.0" + } + }, + "node_modules/@babel/eslint-parser/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.24.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.23.6", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.24.3", + "@babel/helper-simple-access": "^7.24.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/helper-validator-identifier": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.23.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.5", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.23.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.24.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.24.5", + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.24.0", + "@babel/types": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/types": "^7.24.5", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@babel/traverse/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/types": { + "version": "7.24.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.24.1", + "@babel/helper-validator-identifier": "^7.24.5", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bloomberg/record-tuple-polyfill": { + "version": "0.0.4", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/@commitlint/cli": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/format": "^17.8.1", + "@commitlint/lint": "^17.8.1", + "@commitlint/load": "^17.8.1", + "@commitlint/read": "^17.8.1", + "@commitlint/types": "^17.8.1", + "execa": "^5.0.0", + "lodash.isfunction": "^3.0.9", + "resolve-from": "5.0.0", + "resolve-global": "1.0.0", + "yargs": "^17.0.0" + }, + "bin": { + "commitlint": "cli.js" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/config-conventional": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "conventional-changelog-conventionalcommits": "^6.1.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/config-validator": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^17.8.1", + "ajv": "^8.11.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/config-validator/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npm.alibaba-inc.com/ajv/download/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "node_modules/@commitlint/config-validator/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/json-schema-traverse/download/json-schema-traverse-1.0.0.tgz", + "integrity": "sha1-rnvLNlard6c7pcSb9lTzjmtoYOI=", + "dev": true + }, + "node_modules/@commitlint/ensure": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^17.8.1", + "lodash.camelcase": "^4.3.0", + "lodash.kebabcase": "^4.1.1", + "lodash.snakecase": "^4.1.1", + "lodash.startcase": "^4.4.0", + "lodash.upperfirst": "^4.3.1" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/execute-rule": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/format": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^17.8.1", + "chalk": "^4.1.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/is-ignored": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^17.8.1", + "semver": "7.5.4" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/lint": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/is-ignored": "^17.8.1", + "@commitlint/parse": "^17.8.1", + "@commitlint/rules": "^17.8.1", + "@commitlint/types": "^17.8.1" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/load": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/config-validator": "^17.8.1", + "@commitlint/execute-rule": "^17.8.1", + "@commitlint/resolve-extends": "^17.8.1", + "@commitlint/types": "^17.8.1", + "@types/node": "20.5.1", + "chalk": "^4.1.0", + "cosmiconfig": "^8.0.0", + "cosmiconfig-typescript-loader": "^4.0.0", + "lodash.isplainobject": "^4.0.6", + "lodash.merge": "^4.6.2", + "lodash.uniq": "^4.5.0", + "resolve-from": "^5.0.0", + "ts-node": "^10.8.1", + "typescript": "^4.6.4 || ^5.2.2" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/message": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/parse": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^17.8.1", + "conventional-changelog-angular": "^6.0.0", + "conventional-commits-parser": "^4.0.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/read": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/top-level": "^17.8.1", + "@commitlint/types": "^17.8.1", + "fs-extra": "^11.0.0", + "git-raw-commits": "^2.0.11", + "minimist": "^1.2.6" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/resolve-extends": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/config-validator": "^17.8.1", + "@commitlint/types": "^17.8.1", + "import-fresh": "^3.0.0", + "lodash.mergewith": "^4.6.2", + "resolve-from": "^5.0.0", + "resolve-global": "^1.0.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/rules": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/ensure": "^17.8.1", + "@commitlint/message": "^17.8.1", + "@commitlint/to-lines": "^17.8.1", + "@commitlint/types": "^17.8.1", + "execa": "^5.0.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/to-lines": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/top-level": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^5.0.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@commitlint/types": { + "version": "17.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0" + }, + "engines": { + "node": ">=v14" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@csstools/postcss-color-function": { + "version": "1.1.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-font-format-keywords": { + "version": "1.0.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-hwb-function": { + "version": "1.0.2", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-ic-unit": { + "version": "1.0.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class": { + "version": "2.0.7", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/selector-specificity": "^2.0.0", + "postcss-selector-parser": "^6.0.10" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-normalize-display-values": { + "version": "1.0.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-oklab-function": { + "version": "1.1.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-progressive-custom-properties": { + "version": "1.3.0", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.3" + } + }, + "node_modules/@csstools/postcss-stepped-value-functions": { + "version": "1.0.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/postcss-unset-value": { + "version": "1.0.2", + "dev": true, + "license": "CC0-1.0", + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/@csstools/selector-specificity": { + "version": "2.2.0", + "dev": true, + "license": "CC0-1.0", + "engines": { + "node": "^14 || ^16 || >=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss-selector-parser": "^6.0.10" + } + }, + "node_modules/@ctrl/tinycolor": { + "version": "3.6.1", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/@emotion/hash": { + "version": "0.8.0", + "license": "MIT" + }, + "node_modules/@emotion/is-prop-valid": { + "version": "1.2.1", + "license": "MIT", + "dependencies": { + "@emotion/memoize": "^0.8.1" + } + }, + "node_modules/@emotion/memoize": { + "version": "0.8.1", + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.7.5", + "license": "MIT" + }, + "node_modules/@esbuild-kit/cjs-loader": { + "version": "2.4.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@esbuild-kit/core-utils": "^3.2.3", + "get-tsconfig": "^4.7.0" + } + }, + "node_modules/@esbuild-kit/core-utils": { + "version": "3.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.18.20", + "source-map-support": "^0.5.21" + } + }, + "node_modules/@esbuild-kit/core-utils/node_modules/@esbuild/darwin-arm64": { + "version": "0.18.20", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild-kit/core-utils/node_modules/esbuild": { + "version": "0.18.20", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" + } + }, + "node_modules/@esbuild-kit/esm-loader": { + "version": "2.6.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@esbuild-kit/core-utils": "^3.3.2", + "get-tsconfig": "^4.7.0" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.17.19", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npm.alibaba-inc.com/@eslint/eslintrc/download/@eslint/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "peer": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npm.alibaba-inc.com/globals/download/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "peer": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "node_modules/@eslint/eslintrc/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-json-comments/download/strip-json-comments-3.1.1.tgz", + "integrity": "sha1-MfEoGzgyYwQ0gxwxDAHMzajL4AY=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@eslint/eslintrc/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npm.alibaba-inc.com/type-fest/download/type-fest-0.20.2.tgz", + "integrity": "sha1-G/IH9LKPkVg2ZstfvTJ4hzAc1fQ=", + "dev": true, + "peer": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.0", + "resolved": "https://registry.npm.alibaba-inc.com/@eslint/js/download/@eslint/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "dev": true, + "peer": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "0.6.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@floating-ui/dom": { + "version": "0.4.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^0.6.2" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "0.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^0.4.5", + "use-isomorphic-layout-effect": "^1.1.1" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/react-dom-interactions": { + "version": "0.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^0.6.3", + "aria-hidden": "^1.1.3", + "point-in-polygon": "^1.1.0", + "use-isomorphic-layout-effect": "^1.1.1" + } + }, + "node_modules/@formatjs/ecma402-abstract": { + "version": "1.18.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@formatjs/intl-localematcher": "0.5.4", + "tslib": "^2.4.0" + } + }, + "node_modules/@formatjs/fast-memoize": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@formatjs/icu-messageformat-parser": { + "version": "2.7.6", + "dev": true, + "license": "MIT", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/icu-skeleton-parser": "1.8.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@formatjs/icu-skeleton-parser": { + "version": "1.8.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "tslib": "^2.4.0" + } + }, + "node_modules/@formatjs/intl": { + "version": "2.10.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/fast-memoize": "2.2.0", + "@formatjs/icu-messageformat-parser": "2.7.6", + "@formatjs/intl-displaynames": "6.6.6", + "@formatjs/intl-listformat": "7.5.5", + "intl-messageformat": "10.5.11", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "typescript": "^4.7 || 5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@formatjs/intl-displaynames": { + "version": "6.6.6", + "dev": true, + "license": "MIT", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/intl-localematcher": "0.5.4", + "tslib": "^2.4.0" + } + }, + "node_modules/@formatjs/intl-listformat": { + "version": "7.5.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/intl-localematcher": "0.5.4", + "tslib": "^2.4.0" + } + }, + "node_modules/@formatjs/intl-localematcher": { + "version": "0.5.4", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npm.alibaba-inc.com/@humanwhocodes/config-array/download/@humanwhocodes/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "peer": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/@humanwhocodes/module-importer/download/@humanwhocodes/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "peer": true, + "engines": { + "node": ">=12.22" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npm.alibaba-inc.com/@humanwhocodes/object-schema/download/@humanwhocodes/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "dev": true, + "peer": true + }, + "node_modules/@iconify/types": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@iconify/utils": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@antfu/install-pkg": "^0.1.1", + "@antfu/utils": "^0.7.2", + "@iconify/types": "^2.0.0", + "debug": "^4.3.4", + "kolorist": "^1.6.0", + "local-pkg": "^0.4.2" + } + }, + "node_modules/@iconify/utils/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@iconify/utils/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform/node_modules/@jest/types": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform/node_modules/@types/yargs": { + "version": "17.0.32", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@jest/types": { + "version": "27.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^16.0.0", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@loadable/component": { + "version": "5.15.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.7.7", + "hoist-non-react-statics": "^3.3.1", + "react-is": "^16.12.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "react": ">=16.3.0" + } + }, + "node_modules/@loadable/component/node_modules/react-is": { + "version": "16.13.1", + "dev": true, + "license": "MIT" + }, + "node_modules/@makotot/ghostui": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": ">=16" + } + }, + "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { + "version": "5.1.1-v1", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-scope": "5.1.1" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@pkgr/utils": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "fast-glob": "^3.3.0", + "is-glob": "^4.0.3", + "open": "^9.1.0", + "picocolors": "^1.0.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": "^12.20.0 || ^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, + "node_modules/@pkgr/utils/node_modules/fast-glob": { + "version": "3.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/@pkgr/utils/node_modules/is-docker": { + "version": "2.2.1", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@pkgr/utils/node_modules/is-wsl": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pkgr/utils/node_modules/open": { + "version": "9.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "default-browser": "^4.0.0", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@rc-component/color-picker": { + "version": "1.5.3", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.6", + "@ctrl/tinycolor": "^3.6.1", + "classnames": "^2.2.6", + "rc-util": "^5.38.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/context": { + "version": "1.4.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/mini-decimal": { + "version": "1.1.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0" + }, + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@rc-component/mutate-observer": { + "version": "1.1.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/portal": { + "version": "1.1.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/tour": { + "version": "1.14.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "@rc-component/portal": "^1.0.0-9", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/trigger": { + "version": "2.1.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.38.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@selderee/plugin-htmlparser2": { + "version": "0.11.0", + "dev": true, + "license": "MIT", + "dependencies": { + "domhandler": "^5.0.3", + "selderee": "^0.11.0" + }, + "funding": { + "url": "https://ko-fi.com/killymxi" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "dev": true, + "license": "MIT" + }, + "node_modules/@sketch-hq/sketch-file-format-ts": { + "version": "6.5.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@stackblitz/sdk": { + "version": "1.9.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@stylelint/postcss-css-in-js": { + "version": "0.38.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.17.9" + }, + "peerDependencies": { + "postcss": ">=7.0.0", + "postcss-syntax": ">=0.36.2" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/core/node_modules/camelcase": { + "version": "6.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@svgr/core/node_modules/cosmiconfig": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@svgr/core/node_modules/yaml": { + "version": "1.10.2", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "^6.0.0" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "6.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "svgo": "^2.8.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/plugin-svgo/node_modules/cosmiconfig": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@svgr/plugin-svgo/node_modules/yaml": { + "version": "1.10.2", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/@swc/core": { + "version": "1.4.2", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.2", + "@swc/types": "^0.1.5" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.4.2", + "@swc/core-darwin-x64": "1.4.2", + "@swc/core-linux-arm-gnueabihf": "1.4.2", + "@swc/core-linux-arm64-gnu": "1.4.2", + "@swc/core-linux-arm64-musl": "1.4.2", + "@swc/core-linux-x64-gnu": "1.4.2", + "@swc/core-linux-x64-musl": "1.4.2", + "@swc/core-win32-arm64-msvc": "1.4.2", + "@swc/core-win32-ia32-msvc": "1.4.2", + "@swc/core-win32-x64-msvc": "1.4.2" + }, + "peerDependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.4.2", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/@swc/types": { + "version": "0.1.6", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.8", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.56.10", + "resolved": "https://registry.npm.alibaba-inc.com/@types/eslint/download/@types/eslint-8.56.10.tgz", + "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", + "dev": true, + "peer": true, + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.5", + "resolved": "https://registry.npm.alibaba-inc.com/@types/eslint-scope/download/@types/eslint-scope-3.7.5.tgz", + "integrity": "sha512-JNvhIEyxVW6EoMIFIvj93ZOywYFatlpu9deeH6eSx6PE3WHYvHaQtmHmQeNw7aA81bYGBPPQqdtBm6b1SsQMmA==", + "dev": true, + "peer": true, + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/fs-extra": { + "version": "11.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/jsonfile": "*", + "@types/node": "*" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/hapi__joi": { + "version": "17.1.9", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "2.3.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/@types/hoist-non-react-statics": { + "version": "3.3.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/react": "*", + "hoist-non-react-statics": "^3.3.0" + } + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/jsonfile": { + "version": "6.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/lodash": { + "version": "4.17.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "3.0.15", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/@types/minimist": { + "version": "1.2.5", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.5.1", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.4", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/parse5": { + "version": "6.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.12", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/q": { + "version": "1.5.8", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/ramda": { + "version": "0.29.3", + "dev": true, + "license": "MIT", + "dependencies": { + "types-ramda": "^0.29.4" + } + }, + "node_modules/@types/react": { + "version": "18.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/stylis": { + "version": "4.2.0", + "license": "MIT" + }, + "node_modules/@types/unist": { + "version": "2.0.10", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "16.0.9", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.62.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/type-utils": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/parser": { + "version": "5.62.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.62.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "5.62.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/types": { + "version": "5.62.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.62.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/utils": { + "version": "5.62.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.62.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@umijs/ast": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@umijs/bundler-utils": "4.1.10" + } + }, + "node_modules/@umijs/babel-preset-umi": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "7.23.6", + "@bloomberg/record-tuple-polyfill": "0.0.4", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "core-js": "3.34.0" + } + }, + "node_modules/@umijs/babel-preset-umi/node_modules/@babel/runtime": { + "version": "7.23.6", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@umijs/bundler-esbuild": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "enhanced-resolve": "5.9.3", + "postcss": "^8.4.21", + "postcss-flexbugs-fixes": "5.0.2", + "postcss-preset-env": "7.5.0" + }, + "bin": { + "bundler-esbuild": "bin/bundler-esbuild.js" + } + }, + "node_modules/@umijs/bundler-esbuild/node_modules/enhanced-resolve": { + "version": "5.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@umijs/bundler-utils": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@umijs/utils": "4.1.10", + "esbuild": "0.17.19", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "10.1.1", + "spdy": "^4.0.2" + } + }, + "node_modules/@umijs/bundler-vite": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@svgr/core": "6.5.1", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "@vitejs/plugin-react": "4.0.0", + "core-js": "3.34.0", + "less": "4.1.3", + "postcss-preset-env": "7.5.0", + "rollup-plugin-visualizer": "5.9.0", + "systemjs": "^6.14.1", + "vite": "4.5.2" + }, + "bin": { + "bundler-vite": "bin/bundler-vite.js" + } + }, + "node_modules/@umijs/bundler-webpack": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@svgr/core": "6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "@svgr/plugin-svgo": "^6.5.1", + "@types/hapi__joi": "17.1.9", + "@umijs/babel-preset-umi": "4.1.10", + "@umijs/bundler-utils": "4.1.10", + "@umijs/case-sensitive-paths-webpack-plugin": "^1.0.1", + "@umijs/mfsu": "4.1.10", + "@umijs/react-refresh-webpack-plugin": "0.5.11", + "@umijs/utils": "4.1.10", + "cors": "^2.8.5", + "css-loader": "6.7.1", + "es5-imcompatible-versions": "^0.1.78", + "fork-ts-checker-webpack-plugin": "8.0.0", + "jest-worker": "29.4.3", + "lightningcss": "1.22.1", + "node-libs-browser": "2.2.1", + "postcss": "^8.4.21", + "postcss-preset-env": "7.5.0", + "react-error-overlay": "6.0.9", + "react-refresh": "0.14.0" + }, + "bin": { + "bundler-webpack": "bin/bundler-webpack.js" + } + }, + "node_modules/@umijs/case-sensitive-paths-webpack-plugin": { + "version": "1.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/@umijs/core": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10" + } + }, + "node_modules/@umijs/did-you-know": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@umijs/es-module-parser": { + "version": "0.0.7", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@umijs/es-module-parser-darwin-arm64": "0.0.7", + "@umijs/es-module-parser-darwin-x64": "0.0.7", + "@umijs/es-module-parser-linux-arm-gnueabihf": "0.0.7", + "@umijs/es-module-parser-linux-arm64-gnu": "0.0.7", + "@umijs/es-module-parser-linux-arm64-musl": "0.0.7", + "@umijs/es-module-parser-linux-x64-gnu": "0.0.7", + "@umijs/es-module-parser-linux-x64-musl": "0.0.7", + "@umijs/es-module-parser-win32-arm64-msvc": "0.0.7", + "@umijs/es-module-parser-win32-x64-msvc": "0.0.7" + } + }, + "node_modules/@umijs/es-module-parser-darwin-arm64": { + "version": "0.0.7", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@umijs/history": { + "version": "5.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.7.6", + "query-string": "^6.13.6" + } + }, + "node_modules/@umijs/lint": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "7.23.6", + "@babel/eslint-parser": "7.23.3", + "@stylelint/postcss-css-in-js": "^0.38.0", + "@typescript-eslint/eslint-plugin": "^5.62.0", + "@typescript-eslint/parser": "^5.62.0", + "@umijs/babel-preset-umi": "4.1.10", + "eslint-plugin-jest": "27.2.3", + "eslint-plugin-react": "7.33.2", + "eslint-plugin-react-hooks": "4.6.0", + "postcss": "^8.4.21", + "postcss-syntax": "0.36.2", + "stylelint-config-standard": "25.0.0" + } + }, + "node_modules/@umijs/lint/node_modules/@babel/core": { + "version": "7.23.6", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.6", + "@babel/parser": "^7.23.6", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.6", + "@babel/types": "^7.23.6", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@umijs/lint/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@umijs/lint/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/@umijs/lint/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@umijs/mfsu": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@umijs/bundler-esbuild": "4.1.10", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "enhanced-resolve": "5.9.3", + "is-equal": "^1.6.4" + } + }, + "node_modules/@umijs/mfsu/node_modules/enhanced-resolve": { + "version": "5.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@umijs/plugin-run": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "tsx": "3.12.2" + } + }, + "node_modules/@umijs/preset-umi": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@iconify/utils": "2.1.1", + "@svgr/core": "6.5.1", + "@umijs/ast": "4.1.10", + "@umijs/babel-preset-umi": "4.1.10", + "@umijs/bundler-esbuild": "4.1.10", + "@umijs/bundler-utils": "4.1.10", + "@umijs/bundler-vite": "4.1.10", + "@umijs/bundler-webpack": "4.1.10", + "@umijs/core": "4.1.10", + "@umijs/did-you-know": "1.0.3", + "@umijs/es-module-parser": "0.0.7", + "@umijs/history": "5.3.1", + "@umijs/mfsu": "4.1.10", + "@umijs/plugin-run": "4.1.10", + "@umijs/renderer-react": "4.1.10", + "@umijs/server": "4.1.10", + "@umijs/ui": "3.0.1", + "@umijs/utils": "4.1.10", + "@umijs/zod2ts": "4.1.10", + "babel-plugin-dynamic-import-node": "2.3.3", + "click-to-react-component": "^1.0.8", + "core-js": "3.34.0", + "current-script-polyfill": "1.0.0", + "enhanced-resolve": "5.9.3", + "fast-glob": "3.2.12", + "html-webpack-plugin": "5.5.0", + "less-plugin-resolve": "1.0.2", + "path-to-regexp": "1.7.0", + "postcss": "^8.4.21", + "postcss-prefix-selector": "1.16.0", + "react": "18.1.0", + "react-dom": "18.1.0", + "react-router": "6.3.0", + "react-router-dom": "6.3.0", + "regenerator-runtime": "0.13.11" + } + }, + "node_modules/@umijs/preset-umi/node_modules/enhanced-resolve": { + "version": "5.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@umijs/preset-umi/node_modules/regenerator-runtime": { + "version": "0.13.11", + "dev": true, + "license": "MIT" + }, + "node_modules/@umijs/react-refresh-webpack-plugin": { + "version": "0.5.11", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-html-community": "^0.0.8", + "common-path-prefix": "^3.0.0", + "core-js-pure": "^3.23.3", + "error-stack-parser": "^2.0.6", + "find-up": "^5.0.0", + "html-entities": "^2.1.0", + "loader-utils": "^2.0.4", + "schema-utils": "^3.0.0", + "source-map": "^0.7.3" + }, + "engines": { + "node": ">= 10.13" + }, + "peerDependencies": { + "@types/webpack": "4.x || 5.x", + "react-refresh": ">=0.10.0 <1.0.0", + "sockjs-client": "^1.4.0", + "type-fest": ">=0.17.0 <5.0.0", + "webpack": ">=4.43.0 <6.0.0", + "webpack-dev-server": "3.x || 4.x", + "webpack-hot-middleware": "2.x", + "webpack-plugin-serve": "0.x || 1.x" + }, + "peerDependenciesMeta": { + "@types/webpack": { + "optional": true + }, + "sockjs-client": { + "optional": true + }, + "type-fest": { + "optional": true + }, + "webpack-dev-server": { + "optional": true + }, + "webpack-hot-middleware": { + "optional": true + }, + "webpack-plugin-serve": { + "optional": true + } + } + }, + "node_modules/@umijs/renderer-react": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "7.23.6", + "@loadable/component": "5.15.2", + "history": "5.3.0", + "react-helmet-async": "1.3.0", + "react-router-dom": "6.3.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/@umijs/renderer-react/node_modules/@babel/runtime": { + "version": "7.23.6", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@umijs/server": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@umijs/bundler-utils": "4.1.10", + "history": "5.3.0", + "react": "18.1.0", + "react-dom": "18.1.0", + "react-router-dom": "6.3.0" + } + }, + "node_modules/@umijs/test": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-transform-modules-commonjs": "7.23.3", + "@jest/types": "27.5.1", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "babel-jest": "^29.7.0", + "esbuild": "0.17.19", + "identity-obj-proxy": "3.0.0", + "isomorphic-unfetch": "4.0.2" + } + }, + "node_modules/@umijs/ui": { + "version": "3.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/@umijs/utils": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "chokidar": "3.5.3", + "pino": "7.11.0" + } + }, + "node_modules/@umijs/zod2ts": { + "version": "4.1.10", + "dev": true, + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/@ungap/structured-clone/download/@ungap/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true, + "peer": true + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.4", + "@babel/plugin-transform-react-jsx-self": "^7.21.0", + "@babel/plugin-transform-react-jsx-source": "^7.19.6", + "react-refresh": "^0.14.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0" + } + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.12.1", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.12.1.tgz", + "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/floating-point-hex-parser/download/@webassemblyjs/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==", + "dev": true, + "peer": true + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-api-error/download/@webassemblyjs/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==", + "dev": true, + "peer": true + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-buffer/download/@webassemblyjs/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==", + "dev": true, + "peer": true + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-numbers/download/@webassemblyjs/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-wasm-bytecode/download/@webassemblyjs/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==", + "dev": true, + "peer": true + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-wasm-section/download/@webassemblyjs/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" + } + }, + "node_modules/@webassemblyjs/helper-wasm-section/node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ieee754/download/@webassemblyjs/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dev": true, + "peer": true, + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/leb128/download/@webassemblyjs/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dev": true, + "peer": true, + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/utf8/download/@webassemblyjs/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==", + "dev": true, + "peer": true + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-edit/download/@webassemblyjs/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-edit/node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-edit/node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-parser/download/@webassemblyjs/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-gen/download/@webassemblyjs/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-gen/node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-opt/download/@webassemblyjs/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt/node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt/node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-parser/download/@webassemblyjs/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.12.1", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-parser/download/@webassemblyjs/wasm-parser-1.12.1.tgz", + "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wast-printer/download/@webassemblyjs/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/wast-printer/node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/@xtuc/ieee754/download/@xtuc/ieee754-1.2.0.tgz", + "integrity": "sha1-7vAUoxRa5Hehy8AM0eVSM23Ot5A=", + "dev": true, + "peer": true + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npm.alibaba-inc.com/@xtuc/long/download/@xtuc/long-4.2.2.tgz", + "integrity": "sha1-0pHGpOl5ibXGHZrPOWrk/hM6cY0=", + "dev": true, + "peer": true + }, + "node_modules/acorn": { + "version": "8.11.3", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npm.alibaba-inc.com/acorn-import-assertions/download/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "dev": true, + "peer": true, + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npm.alibaba-inc.com/acorn-jsx/download/acorn-jsx-5.3.2.tgz", + "integrity": "sha1-ftW7VZCLOy8bxVxq8WU7rafweTc=", + "dev": true, + "peer": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/agent-base": { + "version": "4.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "es6-promisify": "^5.0.0" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/agentkeepalive": { + "version": "3.5.2", + "dev": true, + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npm.alibaba-inc.com/ajv/download/ajv-6.12.6.tgz", + "integrity": "sha1-uvWmLoArB9l3A0WG+MO69a3ybfQ=", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "node_modules/ajv-keywords": { + "version": "3.5.2", + "dev": true, + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/animated-scroll-to": { + "version": "2.3.0", + "dev": true, + "license": "MIT" + }, + "node_modules/ansi-align": { + "version": "2.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^2.0.0" + } + }, + "node_modules/ansi-escapes": { + "version": "3.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "dev": true, + "engines": [ + "node >= 0.8.0" + ], + "license": "Apache-2.0", + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "4.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/antd": { + "version": "5.16.5", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^7.0.2", + "@ant-design/cssinjs": "^1.18.5", + "@ant-design/icons": "^5.3.6", + "@ant-design/react-slick": "~1.1.2", + "@babel/runtime": "^7.24.4", + "@ctrl/tinycolor": "^3.6.1", + "@rc-component/color-picker": "~1.5.3", + "@rc-component/mutate-observer": "^1.1.0", + "@rc-component/tour": "~1.14.2", + "@rc-component/trigger": "^2.1.1", + "classnames": "^2.5.1", + "copy-to-clipboard": "^3.3.3", + "dayjs": "^1.11.10", + "qrcode.react": "^3.1.0", + "rc-cascader": "~3.24.1", + "rc-checkbox": "~3.2.0", + "rc-collapse": "~3.7.3", + "rc-dialog": "~9.4.0", + "rc-drawer": "~7.1.0", + "rc-dropdown": "~4.2.0", + "rc-field-form": "~1.44.0", + "rc-image": "~7.6.0", + "rc-input": "~1.4.5", + "rc-input-number": "~9.0.0", + "rc-mentions": "~2.11.1", + "rc-menu": "~9.13.0", + "rc-motion": "^2.9.0", + "rc-notification": "~5.4.0", + "rc-pagination": "~4.0.4", + "rc-picker": "~4.4.2", + "rc-progress": "~4.0.0", + "rc-rate": "~2.12.0", + "rc-resize-observer": "^1.4.0", + "rc-segmented": "~2.3.0", + "rc-select": "~14.13.1", + "rc-slider": "~10.6.2", + "rc-steps": "~6.0.1", + "rc-switch": "~4.1.0", + "rc-table": "~7.45.4", + "rc-tabs": "~14.1.1", + "rc-textarea": "~1.6.3", + "rc-tooltip": "~6.2.0", + "rc-tree": "~5.8.5", + "rc-tree-select": "~5.19.0", + "rc-upload": "~4.5.2", + "rc-util": "^5.39.1", + "scroll-into-view-if-needed": "^3.1.0", + "throttle-debounce": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ant-design" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/aproba": { + "version": "1.2.0", + "dev": true, + "license": "ISC" + }, + "node_modules/arg": { + "version": "5.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/aria-hidden": { + "version": "1.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-ify": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/array-includes": { + "version": "3.1.8", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-tree-filter": { + "version": "2.1.0", + "license": "MIT" + }, + "node_modules/array-union": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.reduce": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-array-method-boxes-properly": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.1.0", + "es-shim-unscopables": "^1.0.2" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/arrify": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/asn1.js": { + "version": "4.10.1", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "4.12.0", + "dev": true, + "license": "MIT" + }, + "node_modules/assert": { + "version": "1.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "object.assign": "^4.1.4", + "util": "^0.10.4" + } + }, + "node_modules/assert/node_modules/inherits": { + "version": "2.0.3", + "dev": true, + "license": "ISC" + }, + "node_modules/assert/node_modules/util": { + "version": "0.10.4", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "2.0.3" + } + }, + "node_modules/astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/astral-regex/download/astral-regex-2.0.0.tgz", + "integrity": "sha1-SDFDxWeu7UeFdZwIZXhtx319LjE=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/astring": { + "version": "1.8.6", + "dev": true, + "license": "MIT", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/async-validator": { + "version": "4.2.5", + "license": "MIT" + }, + "node_modules/atob": { + "version": "2.1.2", + "dev": true, + "license": "(MIT OR Apache-2.0)", + "bin": { + "atob": "bin/atob.js" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/atomic-sleep": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.19", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-lite": "^1.0.30001599", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axios": { + "version": "0.18.1", + "dev": true, + "license": "MIT", + "dependencies": { + "follow-redirects": "1.5.10", + "is-buffer": "^2.0.2" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "dev": true, + "license": "MIT", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/big-integer": { + "version": "1.6.52", + "dev": true, + "license": "Unlicense", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/big.js": { + "version": "5.2.2", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/binaryextensions": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/bl": { + "version": "1.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/bluebird": { + "version": "3.7.2", + "dev": true, + "license": "MIT" + }, + "node_modules/bn.js": { + "version": "5.2.1", + "dev": true, + "license": "MIT" + }, + "node_modules/boolbase": { + "version": "1.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/boxen": { + "version": "1.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-align": "^2.0.0", + "camelcase": "^4.0.0", + "chalk": "^2.0.1", + "cli-boxes": "^1.0.0", + "string-width": "^2.0.0", + "term-size": "^1.2.0", + "widest-line": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/boxen/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/boxen/node_modules/camelcase": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/boxen/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/boxen/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/boxen/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/boxen/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/boxen/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/bplist-parser": { + "version": "0.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "big-integer": "^1.6.44" + }, + "engines": { + "node": ">= 5.10.0" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/brorand": { + "version": "1.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/browserify-aes": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/browserify-cipher": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "node_modules/browserify-des": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/browserify-rsa": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^5.0.0", + "randombytes": "^2.0.1" + } + }, + "node_modules/browserify-sign": { + "version": "4.2.3", + "dev": true, + "license": "ISC", + "dependencies": { + "bn.js": "^5.2.1", + "browserify-rsa": "^4.1.0", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.5.5", + "hash-base": "~3.0", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.7", + "readable-stream": "^2.3.8", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/browserify-zlib": { + "version": "0.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "pako": "~1.0.5" + } + }, + "node_modules/browserslist": { + "version": "4.23.0", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer": { + "version": "4.9.2", + "dev": true, + "license": "MIT", + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "node_modules/buffer-alloc": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "node_modules/buffer-alloc-unsafe": { + "version": "1.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/buffer-fill": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/buffer-xor": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/builtin-status-codes": { + "version": "3.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/builtins": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/bundle-name": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "run-applescript": "^5.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cacache": { + "version": "9.3.0", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "bluebird": "^3.5.0", + "chownr": "^1.0.1", + "glob": "^7.1.2", + "graceful-fs": "^4.1.11", + "lru-cache": "^4.1.1", + "mississippi": "^1.3.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.1", + "ssri": "^4.1.6", + "unique-filename": "^1.1.0", + "y18n": "^3.2.1" + } + }, + "node_modules/cacache/node_modules/lru-cache": { + "version": "4.1.5", + "dev": true, + "license": "ISC", + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/cacache/node_modules/yallist": { + "version": "2.1.2", + "dev": true, + "license": "ISC" + }, + "node_modules/call-bind": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-keys": { + "version": "6.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^5.3.1", + "map-obj": "^4.0.0", + "quick-lru": "^4.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/camelize": { + "version": "1.0.1", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001614", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/capture-stack-trace": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ccount": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "dev": true, + "license": "MIT" + }, + "node_modules/chokidar": { + "version": "3.5.3", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chownr": { + "version": "1.1.4", + "dev": true, + "license": "ISC" + }, + "node_modules/chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npm.alibaba-inc.com/chrome-trace-event/download/chrome-trace-event-1.0.3.tgz", + "integrity": "sha1-EBXs7UdB4V0GZkqVfbv1DQQeJqw=", + "dev": true, + "peer": true, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cipher-base": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/classnames": { + "version": "2.5.1", + "license": "MIT" + }, + "node_modules/clean-css": { + "version": "5.3.3", + "dev": true, + "license": "MIT", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cli-boxes": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cli-cursor": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cli-spinners": { + "version": "1.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/cli-truncate": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "slice-ansi": "^5.0.0", + "string-width": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-truncate/node_modules/ansi-regex": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/cli-truncate/node_modules/string-width": { + "version": "5.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-truncate/node_modules/strip-ansi": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/cli-width": { + "version": "2.2.1", + "dev": true, + "license": "ISC" + }, + "node_modules/click-to-react-component": { + "version": "1.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "@floating-ui/react-dom-interactions": "^0.3.1", + "htm": "^3.1.0", + "react-merge-refs": "^1.1.0" + }, + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/coa": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/q": "^1.5.1", + "chalk": "^2.4.1", + "q": "^1.1.2" + }, + "engines": { + "node": ">= 4.0" + } + }, + "node_modules/coa/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/coa/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/coa/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/coa/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/coa/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/coa/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/codesandbox": { + "version": "2.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "axios": "^0.18.1", + "chalk": "^2.4.1", + "codesandbox-import-util-types": "^2.2.3", + "codesandbox-import-utils": "^2.2.3", + "commander": "^2.9.0", + "datauri": "^3.0.0", + "filesize": "^3.6.1", + "fs-extra": "^3.0.1", + "git-branch": "^1.0.0", + "git-repo-name": "^0.6.0", + "git-username": "^0.5.0", + "humps": "^2.0.1", + "inquirer": "^6.2.2", + "lodash": "^4.17.5", + "lz-string": "^1.4.4", + "ms": "^2.0.0", + "open": "^6.3.0", + "ora": "^1.3.0", + "pacote": "^2.7.36", + "shortid": "^2.2.8", + "update-notifier": "^2.2.0" + }, + "bin": { + "codesandbox": "lib/index.js" + } + }, + "node_modules/codesandbox-import-util-types": { + "version": "2.2.3", + "dev": true + }, + "node_modules/codesandbox-import-utils": { + "version": "2.2.3", + "dev": true, + "dependencies": { + "codesandbox-import-util-types": "^2.2.3", + "istextorbinary": "^2.2.1", + "lz-string": "^1.4.4" + } + }, + "node_modules/codesandbox/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/codesandbox/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/codesandbox/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/codesandbox/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/codesandbox/node_modules/fs-extra": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.2", + "jsonfile": "^3.0.0", + "universalify": "^0.1.0" + } + }, + "node_modules/codesandbox/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/codesandbox/node_modules/jsonfile": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/codesandbox/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/codesandbox/node_modules/universalify": { + "version": "0.1.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/color": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.3", + "color-string": "^1.6.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "dev": true, + "license": "MIT" + }, + "node_modules/color-string": { + "version": "1.9.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "node_modules/color/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npm.alibaba-inc.com/colord/download/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "dev": true, + "peer": true + }, + "node_modules/colorette": { + "version": "2.0.20", + "dev": true, + "license": "MIT" + }, + "node_modules/comlink": { + "version": "4.4.1", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "2.20.3", + "dev": true, + "license": "MIT" + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/compare-func": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "array-ify": "^1.0.0", + "dot-prop": "^5.1.0" + } + }, + "node_modules/compute-scroll-into-view": { + "version": "3.1.0", + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-stream": { + "version": "1.6.2", + "dev": true, + "engines": [ + "node >= 0.8" + ], + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/configstore": { + "version": "3.1.5", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "dot-prop": "^4.2.1", + "graceful-fs": "^4.1.2", + "make-dir": "^1.0.0", + "unique-string": "^1.0.0", + "write-file-atomic": "^2.0.0", + "xdg-basedir": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/configstore/node_modules/dot-prop": { + "version": "4.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-obj": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/configstore/node_modules/is-obj": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/configstore/node_modules/make-dir": { + "version": "1.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/configstore/node_modules/pify": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/configstore/node_modules/write-file-atomic": { + "version": "2.4.3", + "dev": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.11", + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.2" + } + }, + "node_modules/console-browserify": { + "version": "1.2.0", + "dev": true + }, + "node_modules/constants-browserify": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/conventional-changelog-angular": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/conventional-changelog-conventionalcommits": { + "version": "6.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/conventional-commits-parser": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-text-path": "^1.0.1", + "JSONStream": "^1.3.5", + "meow": "^8.1.2", + "split2": "^3.2.2" + }, + "bin": { + "conventional-commits-parser": "cli.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/copy-anything": { + "version": "2.0.6", + "dev": true, + "license": "MIT", + "dependencies": { + "is-what": "^3.14.1" + }, + "funding": { + "url": "https://github.com/sponsors/mesqueeb" + } + }, + "node_modules/copy-concurrently": { + "version": "1.0.5", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + } + }, + "node_modules/copy-to-clipboard": { + "version": "3.3.3", + "license": "MIT", + "dependencies": { + "toggle-selection": "^1.0.6" + } + }, + "node_modules/core-js": { + "version": "3.34.0", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.37.0", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/cors": { + "version": "2.8.5", + "dev": true, + "license": "MIT", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "dev": true, + "license": "MIT", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cosmiconfig-typescript-loader": { + "version": "4.4.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v14.21.3" + }, + "peerDependencies": { + "@types/node": "*", + "cosmiconfig": ">=7", + "ts-node": ">=10", + "typescript": ">=4" + } + }, + "node_modules/create-ecdh": { + "version": "4.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.1.0", + "elliptic": "^6.5.3" + } + }, + "node_modules/create-ecdh/node_modules/bn.js": { + "version": "4.12.0", + "dev": true, + "license": "MIT" + }, + "node_modules/create-error-class": { + "version": "3.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "capture-stack-trace": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/create-hash": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "node_modules/create-hmac": { + "version": "1.1.7", + "dev": true, + "license": "MIT", + "dependencies": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-browserify": { + "version": "3.12.0", + "dev": true, + "license": "MIT", + "dependencies": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + }, + "engines": { + "node": "*" + } + }, + "node_modules/crypto-random-string": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/css": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.4", + "source-map": "^0.6.1", + "source-map-resolve": "^0.6.0" + } + }, + "node_modules/css-blank-pseudo": { + "version": "3.0.3", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-selector-parser": "^6.0.9" + }, + "bin": { + "css-blank-pseudo": "dist/cli.cjs" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-color-keywords": { + "version": "1.0.0", + "license": "ISC", + "engines": { + "node": ">=4" + } + }, + "node_modules/css-functions-list": { + "version": "3.2.1", + "resolved": "https://registry.npm.alibaba-inc.com/css-functions-list/download/css-functions-list-3.2.1.tgz", + "integrity": "sha512-Nj5YcaGgBtuUmn1D7oHqPW0c9iui7xsTsj5lIX8ZgevdfhmjFfKB3r8moHJtNJnctnYXJyYX5I1pp90HM4TPgQ==", + "dev": true, + "peer": true, + "engines": { + "node": ">=12 || >=16" + } + }, + "node_modules/css-has-pseudo": { + "version": "3.0.4", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-selector-parser": "^6.0.9" + }, + "bin": { + "css-has-pseudo": "dist/cli.cjs" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-loader": { + "version": "6.7.1", + "dev": true, + "license": "MIT", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.7", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.0", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.5" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/css-prefers-color-scheme": { + "version": "6.0.3", + "dev": true, + "license": "CC0-1.0", + "bin": { + "css-prefers-color-scheme": "dist/cli.cjs" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-select": { + "version": "2.1.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^3.2.1", + "domutils": "^1.7.0", + "nth-check": "^1.0.2" + } + }, + "node_modules/css-select-base-adapter": { + "version": "0.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/css-select/node_modules/dom-serializer": { + "version": "0.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "entities": "^2.0.0" + } + }, + "node_modules/css-select/node_modules/domutils": { + "version": "1.7.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "0", + "domelementtype": "1" + } + }, + "node_modules/css-select/node_modules/domutils/node_modules/domelementtype": { + "version": "1.3.1", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/css-select/node_modules/entities": { + "version": "2.2.0", + "dev": true, + "license": "BSD-2-Clause", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/css-to-react-native": { + "version": "3.2.0", + "license": "MIT", + "dependencies": { + "camelize": "^1.0.0", + "css-color-keywords": "^1.0.0", + "postcss-value-parser": "^4.0.2" + } + }, + "node_modules/css-tree": { + "version": "1.0.0-alpha.37", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.4", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/css-tree/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/css-what": { + "version": "3.4.2", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cssdb": { + "version": "6.6.3", + "dev": true, + "license": "CC0-1.0", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csso": { + "version": "4.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "css-tree": "^1.1.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "1.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.14", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/csso/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "license": "MIT" + }, + "node_modules/current-script-polyfill": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/cwd": { + "version": "0.9.1", + "dev": true, + "license": "MIT", + "dependencies": { + "find-pkg": "^0.1.0" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/cyclist": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/dargs": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/data-view-buffer": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/datauri": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "image-size": "0.8.3", + "mimer": "1.1.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/dayjs": { + "version": "1.11.11", + "license": "MIT" + }, + "node_modules/debug": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/decamelize": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decamelize-keys": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decamelize-keys/node_modules/map-obj": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decode-uri-component": { + "version": "0.2.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npm.alibaba-inc.com/deep-is/download/deep-is-0.1.4.tgz", + "integrity": "sha1-pvLc5hL63S7x9Rm3NVHxfoUZmDE=", + "dev": true, + "peer": true + }, + "node_modules/deep-rename-keys": { + "version": "0.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "kind-of": "^3.0.2", + "rename-keys": "^1.1.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-rename-keys/node_modules/is-buffer": { + "version": "1.1.6", + "dev": true, + "license": "MIT" + }, + "node_modules/deep-rename-keys/node_modules/kind-of": { + "version": "3.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-browser": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "bundle-name": "^3.0.0", + "default-browser-id": "^3.0.0", + "execa": "^7.1.1", + "titleize": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "bplist-parser": "^0.2.0", + "untildify": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser/node_modules/execa": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.1", + "human-signals": "^4.3.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^3.0.7", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": "^14.18.0 || ^16.14.0 || >=18.0.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/default-browser/node_modules/human-signals": { + "version": "4.3.1", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=14.18.0" + } + }, + "node_modules/default-browser/node_modules/is-stream": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser/node_modules/mimic-fn": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser/node_modules/npm-run-path": { + "version": "5.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser/node_modules/onetime": { + "version": "6.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser/node_modules/path-key": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser/node_modules/strip-final-newline": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/des.js": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/detect-indent": { + "version": "7.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/detect-libc": { + "version": "1.0.3", + "dev": true, + "license": "Apache-2.0", + "bin": { + "detect-libc": "bin/detect-libc.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/detect-newline": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/diff": { + "version": "4.0.2", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diffie-hellman": { + "version": "5.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + } + }, + "node_modules/diffie-hellman/node_modules/bn.js": { + "version": "4.12.0", + "dev": true, + "license": "MIT" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "2.1.0", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domain-browser": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4", + "npm": ">=1.2" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "BSD-2-Clause" + }, + "node_modules/domhandler": { + "version": "5.0.3", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "5.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dumi": { + "version": "2.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@ant-design/icons-svg": "^4.2.1", + "@makotot/ghostui": "^2.0.0", + "@stackblitz/sdk": "^1.9.0", + "@swc/core": "1.4.2", + "@types/hast": "^2.3.5", + "@types/mdast": "^3.0.12", + "@umijs/bundler-utils": "^4.0.84", + "@umijs/core": "^4.0.84", + "@umijs/utils": "^4.0.84", + "animated-scroll-to": "^2.3.0", + "classnames": "2.3.2", + "codesandbox": "^2.2.3", + "comlink": "^4.4.1", + "copy-to-clipboard": "^3.3.3", + "deepmerge": "^4.3.1", + "dumi-afx-deps": "^1.0.0-alpha.19", + "dumi-assets-types": "2.3.0", + "enhanced-resolve": "^5.15.0", + "estree-util-to-js": "^1.2.0", + "estree-util-visit": "^1.2.1", + "file-system-cache": "^2.4.3", + "github-slugger": "^1.5.0", + "hast-util-is-element": "^2.1.3", + "hast-util-raw": "^8.0.0", + "hast-util-to-estree": "^2.3.3", + "hast-util-to-string": "^2.0.0", + "heti": "^0.9.4", + "hosted-git-info": "^6.1.1", + "html-to-text": "^9.0.5", + "html2sketch": "^1.0.2", + "js-yaml": "^4.1.0", + "lodash.throttle": "^4.1.1", + "mdast-util-find-and-replace": "^2.2.2", + "mdast-util-to-string": "^3.2.0", + "nprogress": "^0.2.0", + "pluralize": "^8.0.0", + "prism-react-renderer": "^1.3.5", + "prism-themes": "^1.9.0", + "prismjs": "^1.29.0", + "raw-loader": "^4.0.2", + "rc-motion": "^2.7.3", + "rc-tabs": "^12.10.0", + "rc-tooltip": "^6.1.3", + "rc-tree": "^5.7.9", + "rc-util": "^5.38.0", + "react-copy-to-clipboard": "^5.1.0", + "react-error-boundary": "^4.0.10", + "react-intl": "^6.4.4", + "react-loading-skeleton": "^3.1.1", + "react-simple-code-editor": "^0.13.1", + "rehype-autolink-headings": "^6.1.1", + "rehype-remove-comments": "^5.0.0", + "rehype-stringify": "^9.0.3", + "remark-directive": "^2.0.1", + "remark-frontmatter": "^4.0.1", + "remark-gfm": "^3.0.1", + "remark-parse": "^10.0.2", + "remark-rehype": "^10.1.0", + "sass": "^1.64.1", + "sitemap": "^7.1.1", + "sucrase": "^3.34.0", + "umi": "^4.0.84", + "unified": "^10.1.2", + "unist-util-visit": "^4.1.2", + "unist-util-visit-parents": "^5.1.3", + "url": "^0.11.1", + "v8-compile-cache": "2.3.0", + "vfile": "^5.3.7" + }, + "bin": { + "dumi": "bin/dumi.js" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/dumi-afx-deps": { + "version": "1.0.0-alpha.20", + "dev": true, + "license": "MIT" + }, + "node_modules/dumi-assets-types": { + "version": "2.3.0", + "dev": true, + "license": "MIT" + }, + "node_modules/dumi/node_modules/@rc-component/trigger": { + "version": "1.18.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.38.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/dumi/node_modules/classnames": { + "version": "2.3.2", + "dev": true, + "license": "MIT" + }, + "node_modules/dumi/node_modules/rc-dropdown": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@rc-component/trigger": "^1.7.0", + "classnames": "^2.2.6", + "rc-util": "^5.17.0" + }, + "peerDependencies": { + "react": ">=16.11.0", + "react-dom": ">=16.11.0" + } + }, + "node_modules/dumi/node_modules/rc-menu": { + "version": "9.12.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^1.17.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/dumi/node_modules/rc-tabs": { + "version": "12.15.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "classnames": "2.x", + "rc-dropdown": "~4.1.0", + "rc-menu": "~9.12.0", + "rc-motion": "^2.6.2", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.34.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/duplexer3": { + "version": "0.1.5", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/duplexify": { + "version": "3.7.1", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "dev": true, + "license": "MIT" + }, + "node_modules/editions": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "errlop": "^2.0.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=0.8" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/editions/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.751", + "dev": true, + "license": "ISC" + }, + "node_modules/elliptic": { + "version": "6.5.5", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.11.9", + "brorand": "^1.1.0", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.1", + "inherits": "^2.0.4", + "minimalistic-assert": "^1.0.1", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/elliptic/node_modules/bn.js": { + "version": "4.12.0", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "dev": true, + "license": "MIT" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/encoding": { + "version": "0.1.13", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "^0.6.2" + } + }, + "node_modules/encoding/node_modules/iconv-lite": { + "version": "0.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "dev": true, + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.16.0", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/enquire.js": { + "version": "2.1.6", + "resolved": "https://registry.npm.alibaba-inc.com/enquire.js/download/enquire.js-2.1.6.tgz", + "integrity": "sha1-PoeAybi4NQhMP2DhZtvDwqPImBQ=" + }, + "node_modules/entities": { + "version": "4.5.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/err-code": { + "version": "1.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/errlop": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/errno": { + "version": "0.1.8", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "prr": "~1.0.1" + }, + "bin": { + "errno": "cli.js" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/error-stack-parser": { + "version": "2.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "stackframe": "^1.3.4" + } + }, + "node_modules/es-abstract": { + "version": "1.23.3", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "data-view-buffer": "^1.0.1", + "data-view-byte-length": "^1.0.1", + "data-view-byte-offset": "^1.0.0", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.0.3", + "es-to-primitive": "^1.2.1", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", + "globalthis": "^1.0.3", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "hasown": "^2.0.2", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.1", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.3", + "is-string": "^1.0.7", + "is-typed-array": "^1.1.13", + "is-weakref": "^1.0.2", + "object-inspect": "^1.13.1", + "object-keys": "^1.1.1", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.2", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.9", + "string.prototype.trimend": "^1.0.8", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.6", + "unbox-primitive": "^1.0.2", + "which-typed-array": "^1.1.15" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-array-method-boxes-properly": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-get-iterator": { + "version": "1.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.3", + "has-symbols": "^1.0.3", + "is-arguments": "^1.1.1", + "is-map": "^2.0.2", + "is-set": "^2.0.2", + "is-string": "^1.0.7", + "isarray": "^2.0.5", + "stop-iteration-iterator": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-get-iterator/node_modules/isarray": { + "version": "2.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/es-iterator-helpers": { + "version": "1.0.19", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "iterator.prototype": "^1.1.2", + "safe-array-concat": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/es-module-lexer/download/es-module-lexer-1.4.1.tgz", + "integrity": "sha512-cXLGjP0c4T3flZJKQSuziYoq7MlT+rnvfZjfp7h+I7K9BNX54kP9nyWvdbwjQ4u1iWbOL4u96fgeZLToQlZC7w==", + "dev": true, + "peer": true + }, + "node_modules/es-object-atoms": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.0" + } + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es5-imcompatible-versions": { + "version": "0.1.89", + "dev": true, + "license": "MIT" + }, + "node_modules/es6-promise": { + "version": "4.2.8", + "dev": true, + "license": "MIT" + }, + "node_modules/es6-promisify": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "es6-promise": "^4.0.3" + } + }, + "node_modules/esbuild": { + "version": "0.17.19", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/eslint": { + "version": "8.57.0", + "resolved": "https://registry.npm.alibaba-inc.com/eslint/download/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "dev": true, + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint-plugin-jest": { + "version": "27.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/utils": "^5.10.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@typescript-eslint/eslint-plugin": "^5.0.0 || ^6.0.0", + "eslint": "^7.0.0 || ^8.0.0", + "jest": "*" + }, + "peerDependenciesMeta": { + "@typescript-eslint/eslint-plugin": { + "optional": true + }, + "jest": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.33.2", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flatmap": "^1.3.1", + "array.prototype.tosorted": "^1.1.1", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.12", + "estraverse": "^5.3.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.6", + "object.fromentries": "^2.0.6", + "object.hasown": "^1.1.2", + "object.values": "^1.1.6", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.4", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.8" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/eslint-scope/node_modules/estraverse": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "2.1.0", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/ansi-regex/download/ansi-regex-5.0.1.tgz", + "integrity": "sha1-CCyyyJyf6GWaMRpTvWpNxTAdswQ=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/eslint/node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/doctrine/download/doctrine-3.0.0.tgz", + "integrity": "sha1-rd6+rXKmV023g2OdyHoSF3OXOWE=", + "dev": true, + "peer": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/eslint/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/escape-string-regexp/download/escape-string-regexp-4.0.0.tgz", + "integrity": "sha1-FLqDpdNz49MR5a/KKc9b+tllvzQ=", + "dev": true, + "peer": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npm.alibaba-inc.com/eslint-scope/download/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "peer": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npm.alibaba-inc.com/eslint-visitor-keys/download/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "peer": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/glob-parent/download/glob-parent-6.0.2.tgz", + "integrity": "sha1-bSN9mQg5UMeSkPJMdkKj3poo+eM=", + "dev": true, + "peer": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npm.alibaba-inc.com/globals/download/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "peer": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npm.alibaba-inc.com/is-path-inside/download/is-path-inside-3.0.3.tgz", + "integrity": "sha1-0jE2LlOgf/Kw4Op/7QSRYf/RYoM=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "node_modules/eslint/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-ansi/download/strip-ansi-6.0.1.tgz", + "integrity": "sha1-nibGPTD1NEPpSJSVshBdN7Z6hdk=", + "dev": true, + "peer": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npm.alibaba-inc.com/type-fest/download/type-fest-0.20.2.tgz", + "integrity": "sha1-G/IH9LKPkVg2ZstfvTJ4hzAc1fQ=", + "dev": true, + "peer": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npm.alibaba-inc.com/espree/download/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "peer": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npm.alibaba-inc.com/eslint-visitor-keys/download/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "peer": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npm.alibaba-inc.com/esquery/download/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "peer": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-visit": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eventemitter3": { + "version": "5.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/events": { + "version": "3.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/evp_bytestokey": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/expand-tilde": { + "version": "1.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "os-homedir": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/external-editor": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.2.12", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npm.alibaba-inc.com/fast-levenshtein/download/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true, + "peer": true + }, + "node_modules/fast-redact": { + "version": "3.5.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npm.alibaba-inc.com/fastest-levenshtein/download/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "dev": true, + "peer": true, + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, + "node_modules/figures": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/file-entry-cache/download/file-entry-cache-6.0.1.tgz", + "integrity": "sha1-IRst2WWcsDlLBz5zI6w8kz1SICc=", + "dev": true, + "peer": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/file-name": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/file-system-cache": { + "version": "2.4.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/fs-extra": "11.0.1", + "@types/ramda": "0.29.3", + "fs-extra": "11.1.1", + "ramda": "0.29.0" + } + }, + "node_modules/file-system-cache/node_modules/fs-extra": { + "version": "11.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/filesize": { + "version": "3.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/filter-obj": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/find-file-up": { + "version": "0.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "fs-exists-sync": "^0.1.0", + "resolve-dir": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/find-pkg": { + "version": "0.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "find-file-up": "^0.1.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/flat-cache/download/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "peer": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flat-cache/node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/rimraf/download/rimraf-3.0.2.tgz", + "integrity": "sha1-8aVAK6YiCtUswSgrrBrjqkn9Bho=", + "dev": true, + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/flatted": { + "version": "3.2.9", + "resolved": "https://registry.npm.alibaba-inc.com/flatted/download/flatted-3.2.9.tgz", + "integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==", + "dev": true, + "peer": true + }, + "node_modules/flush-write-stream": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + } + }, + "node_modules/follow-redirects": { + "version": "1.5.10", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "=3.1.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/for-each": { + "version": "0.3.3", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/foreground-child": { + "version": "3.1.1", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "8.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.16.7", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "fs-extra": "^10.0.0", + "memfs": "^3.4.1", + "minimatch": "^3.0.4", + "node-abort-controller": "^3.0.1", + "schema-utils": "^3.1.1", + "semver": "^7.3.5", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">=12.13.0", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "typescript": ">3.6.0", + "webpack": "^5.11.0" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "10.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/yaml": { + "version": "1.10.2", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/format": { + "version": "0.2.2", + "dev": true, + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "dev": true, + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/from2": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/fs-exists-sync": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fs-extra": { + "version": "11.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.5", + "dev": true, + "license": "Unlicense" + }, + "node_modules/fs-write-stream-atomic": { + "version": "1.0.10", + "dev": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.6", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/genfun": { + "version": "4.0.1", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-symbol-description": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.7.3", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/get-value": { + "version": "2.0.6", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/git-branch": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/git-config-path": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "extend-shallow": "^2.0.1", + "fs-exists-sync": "^0.1.0", + "homedir-polyfill": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/git-hooks-list": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/fisker/git-hooks-list?sponsor=1" + } + }, + "node_modules/git-raw-commits": { + "version": "2.0.11", + "dev": true, + "license": "MIT", + "dependencies": { + "dargs": "^7.0.0", + "lodash": "^4.17.15", + "meow": "^8.0.0", + "split2": "^3.0.0", + "through2": "^4.0.0" + }, + "bin": { + "git-raw-commits": "cli.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/git-repo-name": { + "version": "0.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cwd": "^0.9.1", + "file-name": "^0.1.0", + "lazy-cache": "^1.0.4", + "remote-origin-url": "^0.5.1" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/git-username": { + "version": "0.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "remote-origin-url": "^0.4.0" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/git-username/node_modules/parse-git-config": { + "version": "0.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/git-username/node_modules/remote-origin-url": { + "version": "0.4.0", + "dev": true, + "license": "MIT", + "dependencies": { + "parse-git-config": "^0.2.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "dev": true, + "license": "ISC" + }, + "node_modules/glob": { + "version": "7.2.3", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/glob-to-regexp/download/glob-to-regexp-0.4.1.tgz", + "integrity": "sha1-x1KXCHyFG5pXi9IX3VmpL1n+VG4=", + "dev": true, + "peer": true + }, + "node_modules/global-dirs": { + "version": "0.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.4" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/global-modules": { + "version": "0.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "global-prefix": "^0.1.4", + "is-windows": "^0.2.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/global-prefix": { + "version": "0.1.5", + "dev": true, + "license": "MIT", + "dependencies": { + "homedir-polyfill": "^1.0.0", + "ini": "^1.3.4", + "is-windows": "^0.2.0", + "which": "^1.2.12" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/globalthis": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globjoin": { + "version": "0.1.4", + "resolved": "https://registry.npm.alibaba-inc.com/globjoin/download/globjoin-0.1.4.tgz", + "integrity": "sha1-L0SUrIkZ43Z8XLtpHp9GMyQoXUM=", + "dev": true, + "peer": true + }, + "node_modules/gopd": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "6.7.1", + "dev": true, + "license": "MIT", + "dependencies": { + "create-error-class": "^3.0.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "unzip-response": "^2.0.1", + "url-parse-lax": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/got/node_modules/get-stream": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/got/node_modules/is-stream": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "dev": true, + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/hard-rejection": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/harmony-reflect": { + "version": "1.6.2", + "dev": true, + "license": "(Apache-2.0 OR MPL-1.1)" + }, + "node_modules/has-bigints": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-value": { + "version": "0.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-value/node_modules/isobject": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "isarray": "1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values": { + "version": "0.1.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/hash-base": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/hash.js": { + "version": "1.1.7", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "7.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "hastscript": "^7.0.0", + "property-information": "^6.0.0", + "vfile": "^5.0.0", + "vfile-location": "^4.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-has-property": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-heading-rank": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-conditional-comment": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-element": { + "version": "2.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "8.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "extend": "^3.0.0", + "hast-util-from-parse5": "^7.0.0", + "hast-util-to-parse5": "^7.0.0", + "html-void-elements": "^2.0.0", + "mdast-util-to-hast": "^12.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0", + "vfile": "^5.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "2.3.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "estree-util-attach-comments": "^2.0.0", + "estree-util-is-identifier-name": "^2.0.0", + "hast-util-whitespace": "^2.0.0", + "mdast-util-mdx-expression": "^1.0.0", + "mdast-util-mdxjs-esm": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.1", + "unist-util-position": "^4.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html": { + "version": "8.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-raw": "^7.0.0", + "hast-util-whitespace": "^2.0.0", + "html-void-elements": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html/node_modules/hast-util-raw": { + "version": "7.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/parse5": "^6.0.0", + "hast-util-from-parse5": "^7.0.0", + "hast-util-to-parse5": "^7.0.0", + "html-void-elements": "^2.0.0", + "parse5": "^6.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0", + "vfile": "^5.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html/node_modules/parse5": { + "version": "6.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/hast-util-to-parse5": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-string": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^3.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/heti": { + "version": "0.9.4", + "dev": true, + "license": "MIT", + "dependencies": { + "heti-findandreplacedomtext": "^0.5.0" + } + }, + "node_modules/heti-findandreplacedomtext": { + "version": "0.5.0", + "dev": true, + "license": "unlicense" + }, + "node_modules/history": { + "version": "5.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.7.6" + } + }, + "node_modules/hmac-drbg": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hoist-non-react-statics/node_modules/react-is": { + "version": "16.13.1", + "dev": true, + "license": "MIT" + }, + "node_modules/homedir-polyfill": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "parse-passwd": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/hosted-git-info": { + "version": "6.1.1", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^7.5.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/htm": { + "version": "3.1.1", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/html-entities": { + "version": "2.5.2", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ], + "license": "MIT" + }, + "node_modules/html-minifier-terser": { + "version": "6.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "8.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npm.alibaba-inc.com/html-tags/download/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/html-to-text": { + "version": "9.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@selderee/plugin-htmlparser2": "^0.11.0", + "deepmerge": "^4.3.1", + "dom-serializer": "^2.0.0", + "htmlparser2": "^8.0.2", + "selderee": "^0.11.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/html-void-elements": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "webpack": "^5.20.0" + } + }, + "node_modules/html2sketch": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@sketch-hq/sketch-file-format-ts": "^6", + "color": "^3.1.2", + "css": "^3.0.0", + "svg-pathdata": "^5.0.5", + "svgo-browser": "^1.3.7", + "svgson": "^4.1.0", + "transformation-matrix": "^2.11.1", + "uuid": "^8.2.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "dev": true, + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "3.8.1", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "dev": true, + "license": "MIT" + }, + "node_modules/http-proxy-agent": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "4", + "debug": "3.1.0" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/https-browserify": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/https-proxy-agent": { + "version": "2.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^4.3.0", + "debug": "^3.1.0" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/humps": { + "version": "2.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/husky": { + "version": "8.0.3", + "dev": true, + "license": "MIT", + "bin": { + "husky": "lib/bin.js" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/typicode" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "dev": true, + "license": "ISC", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/identity-obj-proxy": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "harmony-reflect": "^1.4.6" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/iferr": { + "version": "0.1.5", + "dev": true, + "license": "MIT" + }, + "node_modules/ignore": { + "version": "5.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "0.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "queue": "6.0.1" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/immutable": { + "version": "4.3.5", + "dev": true, + "license": "MIT" + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/import-lazy": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "dev": true, + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "dev": true, + "license": "ISC" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/inquirer": { + "version": "6.5.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^3.2.0", + "chalk": "^2.4.2", + "cli-cursor": "^2.1.0", + "cli-width": "^2.0.0", + "external-editor": "^3.0.3", + "figures": "^2.0.0", + "lodash": "^4.17.12", + "mute-stream": "0.0.7", + "run-async": "^2.2.0", + "rxjs": "^6.4.0", + "string-width": "^2.1.0", + "strip-ansi": "^5.1.0", + "through": "^2.3.6" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/inquirer/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/inquirer/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/inquirer/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/inquirer/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/inquirer/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/inquirer/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/internal-slot": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.0", + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/intl-messageformat": { + "version": "10.5.11", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/fast-memoize": "2.2.0", + "@formatjs/icu-messageformat-parser": "2.7.6", + "tslib": "^2.4.0" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ip": { + "version": "1.1.9", + "dev": true, + "license": "MIT" + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arguments": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "dev": true, + "license": "MIT" + }, + "node_modules/is-arrow-function": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-async-function": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-boolean-object": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-buffer": { + "version": "2.0.5", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-ci": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ci-info": "^1.5.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-ci/node_modules/ci-info": { + "version": "1.6.0", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.13.1", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-equal": { + "version": "1.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "es-get-iterator": "^1.1.3", + "es-to-primitive": "^1.2.1", + "functions-have-names": "^1.2.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0", + "is-arrow-function": "^2.0.3", + "is-bigint": "^1.0.4", + "is-boolean-object": "^1.1.2", + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-generator-function": "^1.0.10", + "is-number-object": "^1.0.7", + "is-regex": "^1.1.4", + "is-string": "^1.0.7", + "is-symbol": "^1.0.4", + "isarray": "^2.0.5", + "object-inspect": "^1.13.1", + "object.entries": "^1.1.7", + "object.getprototypeof": "^1.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-equal/node_modules/isarray": { + "version": "2.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-installed-globally": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "global-dirs": "^0.1.0", + "is-path-inside": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/is-map": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-npm": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-obj": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-path-inside": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "path-is-inside": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-obj": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-redirect": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-regex": { + "version": "1.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-retry-allowed": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-string": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-text-path": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "text-extensions": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.13", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-what": { + "version": "3.14.1", + "dev": true, + "license": "MIT" + }, + "node_modules/is-windows": { + "version": "0.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-wsl": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/isobject": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isomorphic-unfetch": { + "version": "4.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "node-fetch": "^3.2.0", + "unfetch": "^5.0.0" + } + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/istextorbinary": { + "version": "2.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "binaryextensions": "^2.1.2", + "editions": "^2.2.0", + "textextensions": "^2.5.0" + }, + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/iterator.prototype": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.4", + "set-function-name": "^2.0.1" + } + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-haste-map/node_modules/@jest/types": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map/node_modules/@types/yargs": { + "version": "17.0.32", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/jest-haste-map/node_modules/jest-worker": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map/node_modules/supports-color": { + "version": "8.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/@jest/types": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/@types/yargs": { + "version": "17.0.32", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/jest-worker": { + "version": "29.4.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.4.3", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jquery": { + "version": "3.7.1", + "resolved": "https://registry.npm.alibaba-inc.com/jquery/download/jquery-3.7.1.tgz", + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==", + "peer": true + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/json-buffer/download/json-buffer-3.0.1.tgz", + "integrity": "sha1-kziAKjDTtmBfvgYT4JQAjKjAWhM=", + "dev": true, + "peer": true + }, + "node_modules/json-parse-better-errors": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz", + "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/json-stable-stringify-without-jsonify/download/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true, + "peer": true + }, + "node_modules/json2mq": { + "version": "0.2.0", + "license": "MIT", + "dependencies": { + "string-convert": "^0.2.0" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonparse": { + "version": "1.3.1", + "dev": true, + "engines": [ + "node >= 0.2.0" + ], + "license": "MIT" + }, + "node_modules/JSONStream": { + "version": "1.3.5", + "dev": true, + "license": "(MIT OR Apache-2.0)", + "dependencies": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "JSONStream": "bin.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npm.alibaba-inc.com/keyv/download/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "peer": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/known-css-properties": { + "version": "0.26.0", + "resolved": "https://registry.npm.alibaba-inc.com/known-css-properties/download/known-css-properties-0.26.0.tgz", + "integrity": "sha512-5FZRzrZzNTBruuurWpvZnvP9pum+fe0HcK8z/ooo+U+Hmp4vtbyp1/QDsqmufirXy4egGzbaH/y2uCZf+6W5Kg==", + "dev": true, + "peer": true + }, + "node_modules/kolorist": { + "version": "1.8.0", + "dev": true, + "license": "MIT" + }, + "node_modules/latest-version": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "package-json": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/lazy-cache": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/leac": { + "version": "0.6.0", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://ko-fi.com/killymxi" + } + }, + "node_modules/less": { + "version": "4.1.3", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "copy-anything": "^2.0.1", + "parse-node-version": "^1.0.1", + "tslib": "^2.3.0" + }, + "bin": { + "lessc": "bin/lessc" + }, + "engines": { + "node": ">=6" + }, + "optionalDependencies": { + "errno": "^0.1.1", + "graceful-fs": "^4.1.2", + "image-size": "~0.5.0", + "make-dir": "^2.1.0", + "mime": "^1.4.1", + "needle": "^3.1.0", + "source-map": "~0.6.0" + } + }, + "node_modules/less-plugin-resolve": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "enhanced-resolve": "^5.15.0" + } + }, + "node_modules/less/node_modules/image-size": { + "version": "0.5.5", + "dev": true, + "license": "MIT", + "optional": true, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/less/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/levn/download/levn-0.4.1.tgz", + "integrity": "sha1-rkViwAdHO5MqYgDUAyaN0v/8at4=", + "dev": true, + "peer": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.22.1", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^1.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.22.1", + "lightningcss-darwin-x64": "1.22.1", + "lightningcss-freebsd-x64": "1.22.1", + "lightningcss-linux-arm-gnueabihf": "1.22.1", + "lightningcss-linux-arm64-gnu": "1.22.1", + "lightningcss-linux-arm64-musl": "1.22.1", + "lightningcss-linux-x64-gnu": "1.22.1", + "lightningcss-linux-x64-musl": "1.22.1", + "lightningcss-win32-x64-msvc": "1.22.1" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.22.1", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "dev": true, + "license": "MIT" + }, + "node_modules/lint-staged": { + "version": "13.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "5.3.0", + "commander": "11.0.0", + "debug": "4.3.4", + "execa": "7.2.0", + "lilconfig": "2.1.0", + "listr2": "6.6.1", + "micromatch": "4.0.5", + "pidtree": "0.6.0", + "string-argv": "0.3.2", + "yaml": "2.3.1" + }, + "bin": { + "lint-staged": "bin/lint-staged.js" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + }, + "funding": { + "url": "https://opencollective.com/lint-staged" + } + }, + "node_modules/lint-staged/node_modules/chalk": { + "version": "5.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/lint-staged/node_modules/commander": { + "version": "11.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + } + }, + "node_modules/lint-staged/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/lint-staged/node_modules/execa": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.1", + "human-signals": "^4.3.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^3.0.7", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": "^14.18.0 || ^16.14.0 || >=18.0.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/lint-staged/node_modules/human-signals": { + "version": "4.3.1", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=14.18.0" + } + }, + "node_modules/lint-staged/node_modules/is-stream": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lint-staged/node_modules/mimic-fn": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lint-staged/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/lint-staged/node_modules/npm-run-path": { + "version": "5.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lint-staged/node_modules/onetime": { + "version": "6.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lint-staged/node_modules/path-key": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lint-staged/node_modules/strip-final-newline": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/listr2": { + "version": "6.6.1", + "dev": true, + "license": "MIT", + "dependencies": { + "cli-truncate": "^3.1.0", + "colorette": "^2.0.20", + "eventemitter3": "^5.0.1", + "log-update": "^5.0.1", + "rfdc": "^1.3.0", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=16.0.0" + }, + "peerDependencies": { + "enquirer": ">= 2.3.0 < 3" + }, + "peerDependenciesMeta": { + "enquirer": { + "optional": true + } + } + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npm.alibaba-inc.com/loader-runner/download/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "dev": true, + "peer": true, + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/local-pkg": { + "version": "0.4.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npm.alibaba-inc.com/lodash.debounce/download/lodash.debounce-4.0.8.tgz", + "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" + }, + "node_modules/lodash.isfunction": { + "version": "3.0.9", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.kebabcase": { + "version": "4.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.mergewith": { + "version": "4.6.2", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.snakecase": { + "version": "4.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.startcase": { + "version": "4.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npm.alibaba-inc.com/lodash.truncate/download/lodash.truncate-4.4.2.tgz", + "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=", + "dev": true, + "peer": true + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.upperfirst": { + "version": "4.3.1", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-symbols/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-symbols/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-symbols/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/log-symbols/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/log-symbols/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^5.0.0", + "cli-cursor": "^4.0.0", + "slice-ansi": "^5.0.0", + "strip-ansi": "^7.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/ansi-escapes": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^1.0.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/ansi-regex": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/log-update/node_modules/cli-cursor": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/restore-cursor": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/strip-ansi": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/log-update/node_modules/type-fest": { + "version": "1.4.0", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lru-cache": { + "version": "7.18.3", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/lz-string": { + "version": "1.5.0", + "dev": true, + "license": "MIT", + "bin": { + "lz-string": "bin/bin.js" + } + }, + "node_modules/make-dir": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "optional": true, + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "dev": true, + "license": "ISC" + }, + "node_modules/make-fetch-happen": { + "version": "2.6.0", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "agentkeepalive": "^3.3.0", + "cacache": "^10.0.0", + "http-cache-semantics": "^3.8.0", + "http-proxy-agent": "^2.0.0", + "https-proxy-agent": "^2.1.0", + "lru-cache": "^4.1.1", + "mississippi": "^1.2.0", + "node-fetch-npm": "^2.0.2", + "promise-retry": "^1.1.1", + "socks-proxy-agent": "^3.0.1", + "ssri": "^5.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/cacache": { + "version": "10.0.4", + "dev": true, + "license": "ISC", + "dependencies": { + "bluebird": "^3.5.1", + "chownr": "^1.0.1", + "glob": "^7.1.2", + "graceful-fs": "^4.1.11", + "lru-cache": "^4.1.1", + "mississippi": "^2.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.2", + "ssri": "^5.2.4", + "unique-filename": "^1.1.0", + "y18n": "^4.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/cacache/node_modules/mississippi": { + "version": "2.0.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^2.0.1", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/lru-cache": { + "version": "4.1.5", + "dev": true, + "license": "ISC", + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/make-fetch-happen/node_modules/pump": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/make-fetch-happen/node_modules/ssri": { + "version": "5.3.0", + "dev": true, + "license": "ISC", + "dependencies": { + "safe-buffer": "^5.1.1" + } + }, + "node_modules/make-fetch-happen/node_modules/through2": { + "version": "2.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/make-fetch-happen/node_modules/y18n": { + "version": "4.0.3", + "dev": true, + "license": "ISC" + }, + "node_modules/make-fetch-happen/node_modules/yallist": { + "version": "2.1.2", + "dev": true, + "license": "ISC" + }, + "node_modules/makeerror": { + "version": "1.0.12", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/map-obj": { + "version": "4.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mathml-tag-names": { + "version": "2.1.3", + "resolved": "https://registry.npm.alibaba-inc.com/mathml-tag-names/download/mathml-tag-names-2.1.3.tgz", + "integrity": "sha1-TdrdZzCOeAzxakdoWHjuJ7c2oKM=", + "dev": true, + "peer": true + }, + "node_modules/md5.js": { + "version": "1.3.5", + "dev": true, + "license": "MIT", + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/mdast-util-definitions": { + "version": "5.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "unist-util-visit": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-directive": { + "version": "2.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "mdast-util-from-markdown": "^1.3.0", + "mdast-util-to-markdown": "^1.5.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^5.1.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "2.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "1.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0", + "micromark-extension-frontmatter": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-gfm-autolink-literal": "^1.0.0", + "mdast-util-gfm-footnote": "^1.0.0", + "mdast-util-gfm-strikethrough": "^1.0.0", + "mdast-util-gfm-table": "^1.0.0", + "mdast-util-gfm-task-list-item": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "ccount": "^2.0.0", + "mdast-util-find-and-replace": "^2.0.0", + "micromark-util-character": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0", + "micromark-util-normalize-identifier": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.3.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "1.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "1.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "unist-util-is": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "12.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-definitions": "^5.0.0", + "micromark-util-sanitize-uri": "^1.1.0", + "trim-lines": "^3.0.0", + "unist-util-generated": "^2.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "1.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^3.0.0", + "mdast-util-to-string": "^3.0.0", + "micromark-util-decode-string": "^1.0.0", + "unist-util-visit": "^4.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "3.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.4", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/memfs": { + "version": "3.5.3", + "dev": true, + "license": "Unlicense", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/meow": { + "version": "8.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^3.0.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.18.0", + "yargs-parser": "^20.2.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromark": { + "version": "3.2.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "micromark-core-commonmark": "^1.0.1", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-extension-directive": { + "version": "2.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "parse-entities": "^4.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "2.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^1.0.0", + "micromark-extension-gfm-footnote": "^1.0.0", + "micromark-extension-gfm-strikethrough": "^1.0.0", + "micromark-extension-gfm-table": "^1.0.0", + "micromark-extension-gfm-tagfilter": "^1.0.0", + "micromark-extension-gfm-task-list-item": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "1.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-core-commonmark": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "1.0.7", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "1.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "1.2.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "1.2.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/micromark/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/miller-rabin": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + }, + "bin": { + "miller-rabin": "bin/miller-rabin" + } + }, + "node_modules/miller-rabin/node_modules/bn.js": { + "version": "4.12.0", + "dev": true, + "license": "MIT" + }, + "node_modules/mime": { + "version": "1.6.0", + "dev": true, + "license": "MIT", + "optional": true, + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npm.alibaba-inc.com/mime-db/download/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "peer": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npm.alibaba-inc.com/mime-types/download/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "peer": true, + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimer": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "bin": { + "mimer": "bin/mimer" + }, + "engines": { + "node": ">= 6.0" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/min-indent": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "dev": true, + "license": "ISC" + }, + "node_modules/minimalistic-crypto-utils": { + "version": "1.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minimist-options": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0", + "kind-of": "^6.0.3" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/minipass": { + "version": "7.0.4", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mississippi": { + "version": "1.3.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^1.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + } + }, + "node_modules/mississippi/node_modules/through2": { + "version": "2.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/move-concurrently": { + "version": "1.0.1", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + } + }, + "node_modules/mri": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/mute-stream": { + "version": "0.0.7", + "dev": true, + "license": "ISC" + }, + "node_modules/mz": { + "version": "2.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "2.1.11", + "dev": true, + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npm.alibaba-inc.com/natural-compare/download/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true, + "peer": true + }, + "node_modules/natural-compare-lite": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/needle": { + "version": "3.3.1", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "iconv-lite": "^0.6.3", + "sax": "^1.2.4" + }, + "bin": { + "needle": "bin/needle" + }, + "engines": { + "node": ">= 4.4.x" + } + }, + "node_modules/needle/node_modules/iconv-lite": { + "version": "0.6.3", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npm.alibaba-inc.com/neo-async/download/neo-async-2.6.2.tgz", + "integrity": "sha1-tKr7k+OustgXTKU88WOrfXMIMF8=", + "dev": true, + "peer": true + }, + "node_modules/no-case": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "3.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, + "node_modules/node-fetch-npm": { + "version": "2.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "encoding": "^0.1.11", + "json-parse-better-errors": "^1.0.0", + "safe-buffer": "^5.1.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/node-libs-browser": { + "version": "2.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "assert": "^1.1.1", + "browserify-zlib": "^0.2.0", + "buffer": "^4.3.0", + "console-browserify": "^1.1.0", + "constants-browserify": "^1.0.0", + "crypto-browserify": "^3.11.0", + "domain-browser": "^1.1.1", + "events": "^3.0.0", + "https-browserify": "^1.0.0", + "os-browserify": "^0.3.0", + "path-browserify": "0.0.1", + "process": "^0.11.10", + "punycode": "^1.2.4", + "querystring-es3": "^0.2.0", + "readable-stream": "^2.3.3", + "stream-browserify": "^2.0.1", + "stream-http": "^2.7.2", + "string_decoder": "^1.0.0", + "timers-browserify": "^2.0.4", + "tty-browserify": "0.0.0", + "url": "^0.11.0", + "util": "^0.11.0", + "vm-browserify": "^1.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.14", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-package-data": { + "version": "3.0.3", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^4.0.1", + "is-core-module": "^2.5.0", + "semver": "^7.3.4", + "validate-npm-package-license": "^3.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/normalize-package-data/node_modules/hosted-git-info": { + "version": "4.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/normalize-package-data/node_modules/lru-cache": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/normalize-package-data/node_modules/yallist": { + "version": "4.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-package-arg": { + "version": "5.1.2", + "dev": true, + "license": "ISC", + "dependencies": { + "hosted-git-info": "^2.4.2", + "osenv": "^0.1.4", + "semver": "^5.1.0", + "validate-npm-package-name": "^3.0.0" + } + }, + "node_modules/npm-package-arg/node_modules/hosted-git-info": { + "version": "2.8.9", + "dev": true, + "license": "ISC" + }, + "node_modules/npm-package-arg/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/npm-pick-manifest": { + "version": "1.0.4", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "npm-package-arg": "^5.1.2", + "semver": "^5.3.0" + } + }, + "node_modules/npm-pick-manifest/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "dev": true, + "license": "MIT" + }, + "node_modules/nth-check": { + "version": "1.0.2", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "~1.0.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.8", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.getownpropertydescriptors": { + "version": "2.1.8", + "dev": true, + "license": "MIT", + "dependencies": { + "array.prototype.reduce": "^1.0.6", + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "gopd": "^1.0.1", + "safe-array-concat": "^1.1.2" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.getprototypeof": { + "version": "1.0.6", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "reflect.getprototypeof": "^1.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.hasown": { + "version": "1.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.values": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/omit-deep": { + "version": "0.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-plain-object": "^2.0.1", + "unset-value": "^0.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/on-exit-leak-free": { + "version": "0.2.0", + "dev": true, + "license": "MIT" + }, + "node_modules/once": { + "version": "1.4.0", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "6.4.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-wsl": "^1.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "resolved": "https://registry.npm.alibaba-inc.com/optionator/download/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "peer": true, + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "1.4.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^2.1.0", + "cli-cursor": "^2.1.0", + "cli-spinners": "^1.0.1", + "log-symbols": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ora/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ora/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ora/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/ora/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/ora/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ora/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/os-browserify": { + "version": "0.3.0", + "dev": true, + "license": "MIT" + }, + "node_modules/os-homedir": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/osenv": { + "version": "0.1.5", + "dev": true, + "license": "ISC", + "dependencies": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "node_modules/p-finally": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "got": "^6.7.1", + "registry-auth-token": "^3.0.1", + "registry-url": "^3.0.3", + "semver": "^5.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/package-json/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/pacote": { + "version": "2.7.38", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "bluebird": "^3.5.0", + "cacache": "^9.2.9", + "glob": "^7.1.2", + "lru-cache": "^4.1.1", + "make-fetch-happen": "^2.4.13", + "minimatch": "^3.0.4", + "mississippi": "^1.2.0", + "normalize-package-data": "^2.4.0", + "npm-package-arg": "^5.1.2", + "npm-pick-manifest": "^1.0.4", + "osenv": "^0.1.4", + "promise-inflight": "^1.0.1", + "promise-retry": "^1.1.1", + "protoduck": "^4.0.0", + "safe-buffer": "^5.1.1", + "semver": "^5.3.0", + "ssri": "^4.1.6", + "tar-fs": "^1.15.3", + "tar-stream": "^1.5.4", + "unique-filename": "^1.1.0", + "which": "^1.2.12" + } + }, + "node_modules/pacote/node_modules/hosted-git-info": { + "version": "2.8.9", + "dev": true, + "license": "ISC" + }, + "node_modules/pacote/node_modules/lru-cache": { + "version": "4.1.5", + "dev": true, + "license": "ISC", + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/pacote/node_modules/normalize-package-data": { + "version": "2.5.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/pacote/node_modules/resolve": { + "version": "1.22.8", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/pacote/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/pacote/node_modules/which": { + "version": "1.3.1", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/pacote/node_modules/yallist": { + "version": "2.1.2", + "dev": true, + "license": "ISC" + }, + "node_modules/pako": { + "version": "1.0.11", + "dev": true, + "license": "(MIT AND Zlib)" + }, + "node_modules/parallel-transform": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cyclist": "^1.0.1", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-asn1": { + "version": "5.1.7", + "dev": true, + "license": "ISC", + "dependencies": { + "asn1.js": "^4.10.1", + "browserify-aes": "^1.2.0", + "evp_bytestokey": "^1.0.3", + "hash-base": "~3.0", + "pbkdf2": "^3.1.2", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-git-config": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "extend-shallow": "^2.0.1", + "fs-exists-sync": "^0.1.0", + "git-config-path": "^1.0.1", + "ini": "^1.3.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-node-version": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/parse-passwd": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/parse5": { + "version": "7.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parseley": { + "version": "0.12.1", + "dev": true, + "license": "MIT", + "dependencies": { + "leac": "^0.6.0", + "peberminta": "^0.9.0" + }, + "funding": { + "url": "https://ko-fi.com/killymxi" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-browserify": { + "version": "0.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "dev": true, + "license": "(WTFPL OR MIT)" + }, + "node_modules/path-key": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.10.2", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.2.2", + "dev": true, + "license": "ISC", + "engines": { + "node": "14 || >=16.14" + } + }, + "node_modules/path-to-regexp": { + "version": "1.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-to-regexp/node_modules/isarray": { + "version": "0.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pbkdf2": { + "version": "3.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/peberminta": { + "version": "0.9.0", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://ko-fi.com/killymxi" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pidtree": { + "version": "0.6.0", + "dev": true, + "license": "MIT", + "bin": { + "pidtree": "bin/pidtree.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/pify": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pino": { + "version": "7.11.0", + "dev": true, + "license": "MIT", + "dependencies": { + "atomic-sleep": "^1.0.0", + "fast-redact": "^3.0.0", + "on-exit-leak-free": "^0.2.0", + "pino-abstract-transport": "v0.5.0", + "pino-std-serializers": "^4.0.0", + "process-warning": "^1.0.0", + "quick-format-unescaped": "^4.0.3", + "real-require": "^0.1.0", + "safe-stable-stringify": "^2.1.0", + "sonic-boom": "^2.2.1", + "thread-stream": "^0.15.1" + }, + "bin": { + "pino": "bin.js" + } + }, + "node_modules/pino-abstract-transport": { + "version": "0.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "duplexify": "^4.1.2", + "split2": "^4.0.0" + } + }, + "node_modules/pino-abstract-transport/node_modules/duplexify": { + "version": "4.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.4.1", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1", + "stream-shift": "^1.0.2" + } + }, + "node_modules/pino-abstract-transport/node_modules/readable-stream": { + "version": "3.6.2", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pino-abstract-transport/node_modules/split2": { + "version": "4.2.0", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/pino-std-serializers": { + "version": "4.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/pirates": { + "version": "4.0.6", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pluralize": { + "version": "8.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/point-in-polygon": { + "version": "1.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.4.31", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-attribute-case-insensitive": { + "version": "5.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.10" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-clamp": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=7.6.0" + }, + "peerDependencies": { + "postcss": "^8.4.6" + } + }, + "node_modules/postcss-color-functional-notation": { + "version": "4.2.4", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-color-hex-alpha": { + "version": "8.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-color-rebeccapurple": { + "version": "7.1.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-custom-media": { + "version": "8.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.3" + } + }, + "node_modules/postcss-custom-properties": { + "version": "12.1.11", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-custom-selectors": { + "version": "6.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.3" + } + }, + "node_modules/postcss-dir-pseudo-class": { + "version": "6.0.5", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-selector-parser": "^6.0.10" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-double-position-gradients": { + "version": "3.1.2", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-env-function": { + "version": "4.0.6", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-flexbugs-fixes": { + "version": "5.0.2", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": "^8.1.4" + } + }, + "node_modules/postcss-focus-visible": { + "version": "6.0.4", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-selector-parser": "^6.0.9" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-within": { + "version": "5.0.4", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-selector-parser": "^6.0.9" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-font-variant": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-gap-properties": { + "version": "3.0.5", + "dev": true, + "license": "CC0-1.0", + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-image-set-function": { + "version": "4.0.7", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-initial": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-lab-function": { + "version": "4.2.1", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-logical": { + "version": "5.0.4", + "dev": true, + "license": "CC0-1.0", + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-media-minmax": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-media-query-parser": { + "version": "0.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/postcss-media-query-parser/download/postcss-media-query-parser-0.2.3.tgz", + "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", + "dev": true, + "peer": true + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "dev": true, + "license": "ISC", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.0", + "dev": true, + "license": "ISC", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-nesting": { + "version": "10.2.0", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/selector-specificity": "^2.0.0", + "postcss-selector-parser": "^6.0.10" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-opacity-percentage": { + "version": "1.1.3", + "dev": true, + "funding": [ + { + "type": "kofi", + "url": "https://ko-fi.com/mrcgrtz" + }, + { + "type": "liberapay", + "url": "https://liberapay.com/mrcgrtz" + } + ], + "license": "MIT", + "engines": { + "node": "^12 || ^14 || >=16" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-overflow-shorthand": { + "version": "3.0.4", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-page-break": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": "^8" + } + }, + "node_modules/postcss-place": { + "version": "7.0.5", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-prefix-selector": { + "version": "1.16.0", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": ">4 <9" + } + }, + "node_modules/postcss-preset-env": { + "version": "7.5.0", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "@csstools/postcss-color-function": "^1.1.0", + "@csstools/postcss-font-format-keywords": "^1.0.0", + "@csstools/postcss-hwb-function": "^1.0.0", + "@csstools/postcss-ic-unit": "^1.0.0", + "@csstools/postcss-is-pseudo-class": "^2.0.2", + "@csstools/postcss-normalize-display-values": "^1.0.0", + "@csstools/postcss-oklab-function": "^1.1.0", + "@csstools/postcss-progressive-custom-properties": "^1.3.0", + "@csstools/postcss-stepped-value-functions": "^1.0.0", + "@csstools/postcss-unset-value": "^1.0.0", + "autoprefixer": "^10.4.6", + "browserslist": "^4.20.3", + "css-blank-pseudo": "^3.0.3", + "css-has-pseudo": "^3.0.4", + "css-prefers-color-scheme": "^6.0.3", + "cssdb": "^6.6.1", + "postcss-attribute-case-insensitive": "^5.0.0", + "postcss-clamp": "^4.1.0", + "postcss-color-functional-notation": "^4.2.2", + "postcss-color-hex-alpha": "^8.0.3", + "postcss-color-rebeccapurple": "^7.0.2", + "postcss-custom-media": "^8.0.0", + "postcss-custom-properties": "^12.1.7", + "postcss-custom-selectors": "^6.0.0", + "postcss-dir-pseudo-class": "^6.0.4", + "postcss-double-position-gradients": "^3.1.1", + "postcss-env-function": "^4.0.6", + "postcss-focus-visible": "^6.0.4", + "postcss-focus-within": "^5.0.4", + "postcss-font-variant": "^5.0.0", + "postcss-gap-properties": "^3.0.3", + "postcss-image-set-function": "^4.0.6", + "postcss-initial": "^4.0.1", + "postcss-lab-function": "^4.2.0", + "postcss-logical": "^5.0.4", + "postcss-media-minmax": "^5.0.0", + "postcss-nesting": "^10.1.4", + "postcss-opacity-percentage": "^1.1.2", + "postcss-overflow-shorthand": "^3.0.3", + "postcss-page-break": "^3.0.4", + "postcss-place": "^7.0.4", + "postcss-pseudo-class-any-link": "^7.1.2", + "postcss-replace-overflow-wrap": "^4.0.0", + "postcss-selector-not": "^5.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-pseudo-class-any-link": { + "version": "7.1.6", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "postcss-selector-parser": "^6.0.10" + }, + "engines": { + "node": "^12 || ^14 || >=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + "peerDependencies": { + "postcss": "^8.2" + } + }, + "node_modules/postcss-replace-overflow-wrap": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": "^8.0.3" + } + }, + "node_modules/postcss-resolve-nested-selector": { + "version": "0.1.1", + "resolved": "https://registry.npm.alibaba-inc.com/postcss-resolve-nested-selector/download/postcss-resolve-nested-selector-0.1.1.tgz", + "integrity": "sha1-Kcy8fDfe36wwTp//C/FZaz9qDk4=", + "dev": true, + "peer": true + }, + "node_modules/postcss-safe-parser": { + "version": "6.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/postcss-safe-parser/download/postcss-safe-parser-6.0.0.tgz", + "integrity": "sha1-u0wpiUFxqUvFyZa5owMX70Aq2qE=", + "dev": true, + "peer": true, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.3.3" + } + }, + "node_modules/postcss-selector-not": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.16", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-syntax": { + "version": "0.36.2", + "dev": true, + "license": "MIT", + "peerDependencies": { + "postcss": ">=5.0.0" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "license": "MIT" + }, + "node_modules/postcss/node_modules/nanoid": { + "version": "3.3.7", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npm.alibaba-inc.com/prelude-ls/download/prelude-ls-1.2.1.tgz", + "integrity": "sha1-3rxkidem5rDnYRiIzsiAM30xY5Y=", + "dev": true, + "peer": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prepend-http": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/prettier": { + "version": "2.8.8", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin-prettier.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-plugin-organize-imports": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@volar/vue-language-plugin-pug": "^1.0.4", + "@volar/vue-typescript": "^1.0.4", + "prettier": ">=2.0", + "typescript": ">=2.9" + }, + "peerDependenciesMeta": { + "@volar/vue-language-plugin-pug": { + "optional": true + }, + "@volar/vue-typescript": { + "optional": true + } + } + }, + "node_modules/prettier-plugin-packagejson": { + "version": "2.4.3", + "dev": true, + "license": "MIT", + "dependencies": { + "sort-package-json": "2.4.1", + "synckit": "0.8.5" + }, + "peerDependencies": { + "prettier": ">= 1.16.0" + }, + "peerDependenciesMeta": { + "prettier": { + "optional": true + } + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/prism-react-renderer": { + "version": "1.3.5", + "dev": true, + "license": "MIT", + "peerDependencies": { + "react": ">=0.14.9" + } + }, + "node_modules/prism-themes": { + "version": "1.9.0", + "dev": true, + "license": "MIT" + }, + "node_modules/prismjs": { + "version": "1.29.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/process": { + "version": "0.11.10", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/process-warning": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "dev": true, + "license": "ISC" + }, + "node_modules/promise-retry": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "err-code": "^1.0.0", + "retry": "^0.10.0" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "dev": true, + "license": "MIT" + }, + "node_modules/property-information": { + "version": "6.5.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/protoduck": { + "version": "4.0.0", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "genfun": "^4.0.1" + } + }, + "node_modules/prr": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/pseudomap": { + "version": "1.0.2", + "dev": true, + "license": "ISC" + }, + "node_modules/public-encrypt": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/public-encrypt/node_modules/bn.js": { + "version": "4.12.0", + "dev": true, + "license": "MIT" + }, + "node_modules/pump": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/pumpify": { + "version": "1.5.1", + "dev": true, + "license": "MIT", + "dependencies": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + } + }, + "node_modules/pumpify/node_modules/pump": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "dev": true, + "license": "MIT" + }, + "node_modules/q": { + "version": "1.5.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.6.0", + "teleport": ">=0.2.0" + } + }, + "node_modules/qrcode.react": { + "version": "3.1.0", + "license": "ISC", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/qs": { + "version": "6.12.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/query-string": { + "version": "6.14.1", + "dev": true, + "license": "MIT", + "dependencies": { + "decode-uri-component": "^0.2.0", + "filter-obj": "^1.1.0", + "split-on-first": "^1.0.0", + "strict-uri-encode": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/querystring-es3": { + "version": "0.2.1", + "dev": true, + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/queue": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/quick-format-unescaped": { + "version": "4.0.4", + "dev": true, + "license": "MIT" + }, + "node_modules/quick-lru": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ramda": { + "version": "0.29.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ramda" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/randomfill": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "node_modules/raw-loader": { + "version": "4.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "dev": true, + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc-cascader": { + "version": "3.24.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5", + "array-tree-filter": "^2.1.0", + "classnames": "^2.3.1", + "rc-select": "~14.13.0", + "rc-tree": "~5.8.1", + "rc-util": "^5.37.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-checkbox": { + "version": "3.2.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.25.2" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-collapse": { + "version": "3.7.3", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.3.4", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dialog": { + "version": "9.4.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/portal": "^1.0.0-8", + "classnames": "^2.2.6", + "rc-motion": "^2.3.0", + "rc-util": "^5.21.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-drawer": { + "version": "7.1.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.9", + "@rc-component/portal": "^1.1.1", + "classnames": "^2.2.6", + "rc-motion": "^2.6.1", + "rc-util": "^5.38.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dropdown": { + "version": "4.2.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-util": "^5.17.0" + }, + "peerDependencies": { + "react": ">=16.11.0", + "react-dom": ">=16.11.0" + } + }, + "node_modules/rc-field-form": { + "version": "1.44.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "async-validator": "^4.1.0", + "rc-util": "^5.32.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-image": { + "version": "7.6.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/portal": "^1.0.2", + "classnames": "^2.2.6", + "rc-dialog": "~9.4.0", + "rc-motion": "^2.6.2", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-input": { + "version": "1.4.5", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-input-number": { + "version": "9.0.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/mini-decimal": "^1.0.1", + "classnames": "^2.2.5", + "rc-input": "~1.4.0", + "rc-util": "^5.28.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-mentions": { + "version": "2.11.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.22.5", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-input": "~1.4.0", + "rc-menu": "~9.13.0", + "rc-textarea": "~1.6.1", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-menu": { + "version": "9.13.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-motion": { + "version": "2.9.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.21.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-notification": { + "version": "5.4.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.9.0", + "rc-util": "^5.20.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-overflow": { + "version": "1.3.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.37.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-pagination": { + "version": "4.0.4", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.38.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-picker": { + "version": "4.4.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.1", + "rc-overflow": "^1.3.2", + "rc-resize-observer": "^1.4.0", + "rc-util": "^5.38.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "date-fns": ">= 2.x", + "dayjs": ">= 1.x", + "luxon": ">= 3.x", + "moment": ">= 2.x", + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + }, + "peerDependenciesMeta": { + "date-fns": { + "optional": true + }, + "dayjs": { + "optional": true + }, + "luxon": { + "optional": true + }, + "moment": { + "optional": true + } + } + }, + "node_modules/rc-progress": { + "version": "4.0.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.6", + "rc-util": "^5.16.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-rate": { + "version": "2.12.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.0.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-resize-observer": { + "version": "1.4.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.7", + "classnames": "^2.2.1", + "rc-util": "^5.38.0", + "resize-observer-polyfill": "^1.5.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-segmented": { + "version": "2.3.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-motion": "^2.4.4", + "rc-util": "^5.17.0" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-select": { + "version": "14.13.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.1.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-overflow": "^1.3.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-slider": { + "version": "10.6.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.36.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-steps": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.16.7", + "classnames": "^2.2.3", + "rc-util": "^5.16.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-switch": { + "version": "4.1.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.21.0", + "classnames": "^2.2.1", + "rc-util": "^5.30.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-table": { + "version": "7.45.4", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/context": "^1.4.0", + "classnames": "^2.2.5", + "rc-resize-observer": "^1.1.0", + "rc-util": "^5.37.0", + "rc-virtual-list": "^3.11.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tabs": { + "version": "14.1.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "classnames": "2.x", + "rc-dropdown": "~4.2.0", + "rc-menu": "~9.13.0", + "rc-motion": "^2.6.2", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.34.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-textarea": { + "version": "1.6.3", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.1", + "rc-input": "~1.4.0", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tooltip": { + "version": "6.2.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tree": { + "version": "5.8.5", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.1" + }, + "engines": { + "node": ">=10.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-tree-select": { + "version": "5.19.0", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-select": "~14.13.0", + "rc-tree": "~5.8.1", + "rc-util": "^5.16.1" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-upload": { + "version": "4.5.2", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "classnames": "^2.2.5", + "rc-util": "^5.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-util": { + "version": "5.39.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-virtual-list": { + "version": "3.11.5", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.0", + "classnames": "^2.2.6", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.36.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/react": { + "version": "18.1.0", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-copy-to-clipboard": { + "version": "5.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "copy-to-clipboard": "^3.3.1", + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "react": "^15.3.0 || 16 || 17 || 18" + } + }, + "node_modules/react-dom": { + "version": "18.1.0", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.22.0" + }, + "peerDependencies": { + "react": "^18.1.0" + } + }, + "node_modules/react-error-boundary": { + "version": "4.0.13", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "peerDependencies": { + "react": ">=16.13.1" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.9", + "dev": true, + "license": "MIT" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "dev": true, + "license": "MIT" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-intl": { + "version": "6.6.5", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/icu-messageformat-parser": "2.7.6", + "@formatjs/intl": "2.10.1", + "@formatjs/intl-displaynames": "6.6.6", + "@formatjs/intl-listformat": "7.5.5", + "@types/hoist-non-react-statics": "^3.3.1", + "@types/react": "16 || 17 || 18", + "hoist-non-react-statics": "^3.3.2", + "intl-messageformat": "10.5.11", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "react": "^16.6.0 || 17 || 18", + "typescript": "^4.7 || 5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "license": "MIT" + }, + "node_modules/react-loading-skeleton": { + "version": "3.4.0", + "dev": true, + "license": "MIT", + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/react-merge-refs": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/react-refresh": { + "version": "0.14.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-router": { + "version": "6.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "history": "^5.2.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "history": "^5.2.0", + "react-router": "6.3.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/react-simple-code-editor": { + "version": "0.13.1", + "dev": true, + "license": "MIT", + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/react-slick": { + "version": "0.30.2", + "resolved": "https://registry.npm.alibaba-inc.com/react-slick/download/react-slick-0.30.2.tgz", + "integrity": "sha512-XvQJi7mRHuiU3b9irsqS9SGIgftIfdV5/tNcURTb5LdIokRA5kIIx3l4rlq2XYHfxcSntXapoRg/GxaVOM1yfg==", + "dependencies": { + "classnames": "^2.2.5", + "enquire.js": "^2.1.6", + "json2mq": "^0.2.0", + "lodash.debounce": "^4.0.8", + "resize-observer-polyfill": "^1.5.0" + }, + "peerDependencies": { + "react": "^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/read-pkg": { + "version": "5.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up": { + "version": "7.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg-up/node_modules/find-up": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up/node_modules/locate-path": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg-up/node_modules/p-locate": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up/node_modules/type-fest": { + "version": "0.8.1", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg/node_modules/hosted-git-info": { + "version": "2.8.9", + "dev": true, + "license": "ISC" + }, + "node_modules/read-pkg/node_modules/normalize-package-data": { + "version": "2.5.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/read-pkg/node_modules/resolve": { + "version": "1.22.8", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/read-pkg/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/readable-stream": { + "version": "2.3.8", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readable-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/readdirp": { + "version": "3.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/real-require": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.6", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.1", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "dev": true, + "license": "MIT" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "license": "MIT" + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/registry-auth-token": { + "version": "3.4.0", + "dev": true, + "license": "MIT", + "dependencies": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/registry-url": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "rc": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/rehype-autolink-headings": { + "version": "6.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "extend": "^3.0.0", + "hast-util-has-property": "^2.0.0", + "hast-util-heading-rank": "^2.0.0", + "hast-util-is-element": "^2.0.0", + "unified": "^10.0.0", + "unist-util-visit": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-remove-comments": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "hast-util-is-conditional-comment": "^2.0.0", + "unified": "^10.0.0", + "unist-util-filter": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-stringify": { + "version": "9.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "hast-util-to-html": "^8.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-directive": "^2.0.0", + "micromark-extension-directive": "^2.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-frontmatter": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-frontmatter": "^1.0.0", + "micromark-extension-frontmatter": "^1.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-gfm": "^2.0.0", + "micromark-extension-gfm": "^2.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "10.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "10.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-to-hast": "^12.1.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remote-origin-url": { + "version": "0.5.3", + "dev": true, + "license": "MIT", + "dependencies": { + "parse-git-config": "^1.1.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/rename-keys": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/css-what": { + "version": "6.1.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "dev": true, + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "dev": true, + "license": "BSD-2-Clause", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "dev": true, + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/renderkid/node_modules/nth-check": { + "version": "2.1.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/strip-ansi": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/require-from-string/download/require-from-string-2.0.2.tgz", + "integrity": "sha1-iaf92TgmEmcxjq/hT5wy5ZjDaQk=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "license": "MIT" + }, + "node_modules/resolve": { + "version": "2.0.0-next.5", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-dir": { + "version": "0.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "expand-tilde": "^1.2.2", + "global-modules": "^0.2.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-global": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "global-dirs": "^0.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/restore-cursor": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^2.0.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/restore-cursor/node_modules/mimic-fn": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/restore-cursor/node_modules/onetime": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/retry": { + "version": "0.10.1", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rfdc": { + "version": "1.3.1", + "dev": true, + "license": "MIT" + }, + "node_modules/rimraf": { + "version": "2.7.1", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/ripemd160": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "node_modules/rollup": { + "version": "3.29.4", + "dev": true, + "license": "MIT", + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=14.18.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/rollup-plugin-visualizer": { + "version": "5.9.0", + "dev": true, + "license": "MIT", + "dependencies": { + "open": "^8.4.0", + "picomatch": "^2.3.1", + "source-map": "^0.7.4", + "yargs": "^17.5.1" + }, + "bin": { + "rollup-plugin-visualizer": "dist/bin/cli.js" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "rollup": "2.x || 3.x" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/rollup-plugin-visualizer/node_modules/define-lazy-prop": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup-plugin-visualizer/node_modules/is-docker": { + "version": "2.2.1", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/rollup-plugin-visualizer/node_modules/is-wsl": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup-plugin-visualizer/node_modules/open": { + "version": "8.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/run-applescript": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/run-async": { + "version": "2.4.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/run-queue": { + "version": "1.0.3", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.1" + } + }, + "node_modules/rxjs": { + "version": "6.6.7", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^1.9.0" + }, + "engines": { + "npm": ">=2.0.0" + } + }, + "node_modules/rxjs/node_modules/tslib": { + "version": "1.14.1", + "dev": true, + "license": "0BSD" + }, + "node_modules/sade": { + "version": "1.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-array-concat/node_modules/isarray": { + "version": "2.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-regex-test": { + "version": "1.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-regex": "^1.1.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-stable-stringify": { + "version": "2.4.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/sass": { + "version": "1.75.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sax": { + "version": "1.3.0", + "dev": true, + "license": "ISC" + }, + "node_modules/scheduler": { + "version": "0.22.0", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/schema-utils": { + "version": "3.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/scroll-into-view-if-needed": { + "version": "3.1.0", + "license": "MIT", + "dependencies": { + "compute-scroll-into-view": "^3.0.2" + } + }, + "node_modules/selderee": { + "version": "0.11.0", + "dev": true, + "license": "MIT", + "dependencies": { + "parseley": "^0.12.0" + }, + "funding": { + "url": "https://ko-fi.com/killymxi" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.5.4", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^5.0.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/semver-diff/node_modules/semver": { + "version": "5.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/yallist": { + "version": "4.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/serialize-javascript/download/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "peer": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setimmediate": { + "version": "1.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/sha.js": { + "version": "2.4.11", + "dev": true, + "license": "(MIT AND BSD-3-Clause)", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "bin": { + "sha.js": "bin.js" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "license": "MIT" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/shortid": { + "version": "2.2.16", + "dev": true, + "license": "MIT", + "dependencies": { + "nanoid": "^2.1.0" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "dev": true, + "license": "ISC" + }, + "node_modules/simple-swizzle": { + "version": "0.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/simple-swizzle/node_modules/is-arrayish": { + "version": "0.3.2", + "dev": true, + "license": "MIT" + }, + "node_modules/sitemap": { + "version": "7.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/slice-ansi": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.0.0", + "is-fullwidth-code-point": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/slick-carousel": { + "version": "1.8.1", + "resolved": "https://registry.npm.alibaba-inc.com/slick-carousel/download/slick-carousel-1.8.1.tgz", + "integrity": "sha1-pL+ykBSIe7Zs5Si5C9DNomLMj40=", + "peerDependencies": { + "jquery": ">=1.8.0" + } + }, + "node_modules/smart-buffer": { + "version": "1.1.15", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.10.15", + "npm": ">= 1.3.5" + } + }, + "node_modules/socks": { + "version": "1.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "ip": "^1.1.4", + "smart-buffer": "^1.0.13" + }, + "engines": { + "node": ">= 0.10.0", + "npm": ">= 1.3.5" + } + }, + "node_modules/socks-proxy-agent": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^4.1.0", + "socks": "^1.1.10" + } + }, + "node_modules/sonic-boom": { + "version": "2.8.0", + "dev": true, + "license": "MIT", + "dependencies": { + "atomic-sleep": "^1.0.0" + } + }, + "node_modules/sort-object-keys": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/sort-package-json": { + "version": "2.4.1", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-indent": "^7.0.1", + "detect-newline": "^4.0.0", + "git-hooks-list": "^3.0.0", + "globby": "^13.1.2", + "is-plain-obj": "^4.1.0", + "sort-object-keys": "^1.1.3" + }, + "bin": { + "sort-package-json": "cli.js" + } + }, + "node_modules/sort-package-json/node_modules/fast-glob": { + "version": "3.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/sort-package-json/node_modules/globby": { + "version": "13.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/sort-package-json/node_modules/is-plain-obj": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/sort-package-json/node_modules/slash": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-resolve": { + "version": "0.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "dev": true, + "license": "CC-BY-3.0" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.17", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/spdy": { + "version": "4.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/spdy-transport/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/spdy-transport/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/spdy-transport/node_modules/readable-stream": { + "version": "3.6.2", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/spdy/node_modules/debug": { + "version": "4.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/spdy/node_modules/ms": { + "version": "2.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/split-on-first": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/split2": { + "version": "3.2.2", + "dev": true, + "license": "ISC", + "dependencies": { + "readable-stream": "^3.0.0" + } + }, + "node_modules/split2/node_modules/readable-stream": { + "version": "3.6.2", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/ssri": { + "version": "4.1.6", + "dev": true, + "license": "CC0-1.0", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/stable": { + "version": "0.1.8", + "dev": true, + "license": "MIT" + }, + "node_modules/stackframe": { + "version": "1.3.4", + "dev": true, + "license": "MIT" + }, + "node_modules/stop-iteration-iterator": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "internal-slot": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/stream-browserify": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "node_modules/stream-each": { + "version": "1.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/stream-http": { + "version": "2.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.3.6", + "to-arraybuffer": "^1.0.0", + "xtend": "^4.0.0" + } + }, + "node_modules/stream-shift": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/strict-uri-encode": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/string-argv": { + "version": "0.3.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.6.19" + } + }, + "node_modules/string-convert": { + "version": "0.2.1", + "license": "MIT" + }, + "node_modules/string-width": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width-cjs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/string-width/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.11", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "regexp.prototype.flags": "^1.5.2", + "set-function-name": "^2.0.2", + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.9", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.8", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "5.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-eof": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/style-search": { + "version": "0.1.0", + "resolved": "https://registry.npm.alibaba-inc.com/style-search/download/style-search-0.1.0.tgz", + "integrity": "sha1-eVjHk+R+MuB9K1yv5cC/jhLneQI=", + "dev": true, + "peer": true + }, + "node_modules/style-to-object": { + "version": "0.4.4", + "dev": true, + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/styled-components": { + "version": "6.1.8", + "license": "MIT", + "dependencies": { + "@emotion/is-prop-valid": "1.2.1", + "@emotion/unitless": "0.8.0", + "@types/stylis": "4.2.0", + "css-to-react-native": "3.2.0", + "csstype": "3.1.2", + "postcss": "8.4.31", + "shallowequal": "1.1.0", + "stylis": "4.3.1", + "tslib": "2.5.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/styled-components" + }, + "peerDependencies": { + "react": ">= 16.8.0", + "react-dom": ">= 16.8.0" + } + }, + "node_modules/styled-components/node_modules/@emotion/unitless": { + "version": "0.8.0", + "license": "MIT" + }, + "node_modules/styled-components/node_modules/csstype": { + "version": "3.1.2", + "license": "MIT" + }, + "node_modules/styled-components/node_modules/stylis": { + "version": "4.3.1", + "license": "MIT" + }, + "node_modules/styled-components/node_modules/tslib": { + "version": "2.5.0", + "license": "0BSD" + }, + "node_modules/stylelint": { + "version": "14.16.1", + "resolved": "https://registry.npm.alibaba-inc.com/stylelint/download/stylelint-14.16.1.tgz", + "integrity": "sha512-ErlzR/T3hhbV+a925/gbfc3f3Fep9/bnspMiJPorfGEmcBbXdS+oo6LrVtoUZ/w9fqD6o6k7PtUlCOsCRdjX/A==", + "dev": true, + "peer": true, + "dependencies": { + "@csstools/selector-specificity": "^2.0.2", + "balanced-match": "^2.0.0", + "colord": "^2.9.3", + "cosmiconfig": "^7.1.0", + "css-functions-list": "^3.1.0", + "debug": "^4.3.4", + "fast-glob": "^3.2.12", + "fastest-levenshtein": "^1.0.16", + "file-entry-cache": "^6.0.1", + "global-modules": "^2.0.0", + "globby": "^11.1.0", + "globjoin": "^0.1.4", + "html-tags": "^3.2.0", + "ignore": "^5.2.1", + "import-lazy": "^4.0.0", + "imurmurhash": "^0.1.4", + "is-plain-object": "^5.0.0", + "known-css-properties": "^0.26.0", + "mathml-tag-names": "^2.1.3", + "meow": "^9.0.0", + "micromatch": "^4.0.5", + "normalize-path": "^3.0.0", + "picocolors": "^1.0.0", + "postcss": "^8.4.19", + "postcss-media-query-parser": "^0.2.3", + "postcss-resolve-nested-selector": "^0.1.1", + "postcss-safe-parser": "^6.0.0", + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0", + "resolve-from": "^5.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "style-search": "^0.1.0", + "supports-hyperlinks": "^2.3.0", + "svg-tags": "^1.0.0", + "table": "^6.8.1", + "v8-compile-cache": "^2.3.0", + "write-file-atomic": "^4.0.2" + }, + "bin": { + "stylelint": "bin/stylelint.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/stylelint-config-recommended": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "peerDependencies": { + "stylelint": "^14.4.0" + } + }, + "node_modules/stylelint-config-standard": { + "version": "25.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "stylelint-config-recommended": "^7.0.0" + }, + "peerDependencies": { + "stylelint": "^14.4.0" + } + }, + "node_modules/stylelint/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/ansi-regex/download/ansi-regex-5.0.1.tgz", + "integrity": "sha1-CCyyyJyf6GWaMRpTvWpNxTAdswQ=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/balanced-match": { + "version": "2.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/balanced-match/download/balanced-match-2.0.0.tgz", + "integrity": "sha1-3HD5INeNuLhYU1eVhnv0j4IGM9k=", + "dev": true, + "peer": true + }, + "node_modules/stylelint/node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npm.alibaba-inc.com/cosmiconfig/download/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dev": true, + "peer": true, + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stylelint/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/stylelint/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/emoji-regex/download/emoji-regex-8.0.0.tgz", + "integrity": "sha1-6Bj9ac5cz8tARZT4QpY79TFkzDc=", + "dev": true, + "peer": true + }, + "node_modules/stylelint/node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/global-modules/download/global-modules-2.0.0.tgz", + "integrity": "sha1-mXYFrSNF8n9RU5vqJldEISFcd4A=", + "dev": true, + "peer": true, + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/stylelint/node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/global-prefix/download/global-prefix-3.0.0.tgz", + "integrity": "sha1-/IX3MGTfafUEIfR/iD/luRO6m5c=", + "dev": true, + "peer": true, + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/stylelint/node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/import-lazy/download/import-lazy-4.0.0.tgz", + "integrity": "sha1-6OtidIOgpD2jwD8+NVSL5csMwVM=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/is-fullwidth-code-point/download/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha1-8Rb4Bk/pCz94RKOJl8C3UFEmnx0=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/is-plain-object/download/is-plain-object-5.0.0.tgz", + "integrity": "sha1-RCf1CrNCnpAl6n1S6QQ6nvQVk0Q=", + "dev": true, + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stylelint/node_modules/meow": { + "version": "9.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/meow/download/meow-9.0.0.tgz", + "integrity": "sha1-zZUQvFysne59A8c+4fmtlZ9Oo2Q=", + "dev": true, + "peer": true, + "dependencies": { + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize": "^1.2.0", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^3.0.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.18.0", + "yargs-parser": "^20.2.3" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stylelint/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "node_modules/stylelint/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/string-width/download/string-width-4.2.3.tgz", + "integrity": "sha1-JpxxF9J7Ba0uU2gwqOyJXvnG0BA=", + "dev": true, + "peer": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-ansi/download/strip-ansi-6.0.1.tgz", + "integrity": "sha1-nibGPTD1NEPpSJSVshBdN7Z6hdk=", + "dev": true, + "peer": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npm.alibaba-inc.com/which/download/which-1.3.1.tgz", + "integrity": "sha1-pFBD1U9YBTFtqNYvn1CRjT2nCwo=", + "dev": true, + "peer": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/stylelint/node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npm.alibaba-inc.com/yaml/download/yaml-1.10.2.tgz", + "integrity": "sha1-IwHF/78StGfejaIzOkWeKeeSDks=", + "dev": true, + "peer": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/stylis": { + "version": "4.3.2", + "license": "MIT" + }, + "node_modules/sucrase": { + "version": "3.35.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sucrase/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/sucrase/node_modules/commander": { + "version": "4.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/sucrase/node_modules/glob": { + "version": "10.3.12", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.6", + "minimatch": "^9.0.1", + "minipass": "^7.0.4", + "path-scurry": "^1.10.2" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sucrase/node_modules/minimatch": { + "version": "9.0.4", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks": { + "version": "2.3.0", + "resolved": "https://registry.npm.alibaba-inc.com/supports-hyperlinks/download/supports-hyperlinks-2.3.0.tgz", + "integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==", + "dev": true, + "peer": true, + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "dev": true, + "license": "MIT" + }, + "node_modules/svg-pathdata": { + "version": "5.0.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.5" + } + }, + "node_modules/svg-tags": { + "version": "1.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/svg-tags/download/svg-tags-1.0.0.tgz", + "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=", + "dev": true, + "peer": true + }, + "node_modules/svgo": { + "version": "2.8.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/svgo-browser": { + "version": "1.3.8", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^2.4.1", + "coa": "^2.0.2", + "css-select": "^2.0.0", + "css-select-base-adapter": "^0.1.1", + "css-tree": "1.0.0-alpha.37", + "csso": "^4.0.2", + "js-yaml": "^3.13.1", + "mkdirp": "~0.5.1", + "sax": "~1.2.4", + "stable": "^0.1.8", + "unquote": "~1.1.1", + "util.promisify": "~1.0.0" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/svgo-browser/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/svgo-browser/node_modules/argparse": { + "version": "1.0.10", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/svgo-browser/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/svgo-browser/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/svgo-browser/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/svgo-browser/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/svgo-browser/node_modules/js-yaml": { + "version": "3.14.1", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/svgo-browser/node_modules/sax": { + "version": "1.2.4", + "dev": true, + "license": "ISC" + }, + "node_modules/svgo-browser/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/svgo/node_modules/css-select": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/svgo/node_modules/css-tree": { + "version": "1.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/svgo/node_modules/css-what": { + "version": "6.1.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/svgo/node_modules/dom-serializer": { + "version": "1.4.1", + "dev": true, + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/svgo/node_modules/domhandler": { + "version": "4.3.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/svgo/node_modules/domutils": { + "version": "2.8.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/svgo/node_modules/entities": { + "version": "2.2.0", + "dev": true, + "license": "BSD-2-Clause", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/svgo/node_modules/mdn-data": { + "version": "2.0.14", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/svgo/node_modules/nth-check": { + "version": "2.1.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/svgo/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/svgson": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-rename-keys": "^0.2.1", + "omit-deep": "0.3.0", + "xml-reader": "2.4.3" + } + }, + "node_modules/synckit": { + "version": "0.8.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@pkgr/utils": "^2.3.1", + "tslib": "^2.5.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, + "node_modules/systemjs": { + "version": "6.15.1", + "dev": true, + "license": "MIT" + }, + "node_modules/table": { + "version": "6.8.2", + "resolved": "https://registry.npm.alibaba-inc.com/table/download/table-6.8.2.tgz", + "integrity": "sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==", + "dev": true, + "peer": true, + "dependencies": { + "ajv": "^8.0.1", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/table/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npm.alibaba-inc.com/ajv/download/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "node_modules/table/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/ansi-regex/download/ansi-regex-5.0.1.tgz", + "integrity": "sha1-CCyyyJyf6GWaMRpTvWpNxTAdswQ=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/table/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/emoji-regex/download/emoji-regex-8.0.0.tgz", + "integrity": "sha1-6Bj9ac5cz8tARZT4QpY79TFkzDc=", + "dev": true, + "peer": true + }, + "node_modules/table/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/is-fullwidth-code-point/download/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha1-8Rb4Bk/pCz94RKOJl8C3UFEmnx0=", + "dev": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/table/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/json-schema-traverse/download/json-schema-traverse-1.0.0.tgz", + "integrity": "sha1-rnvLNlard6c7pcSb9lTzjmtoYOI=", + "dev": true, + "peer": true + }, + "node_modules/table/node_modules/slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/slice-ansi/download/slice-ansi-4.0.0.tgz", + "integrity": "sha1-UA6N0P1VsFgVCGJVsxla3ypF/ms=", + "dev": true, + "peer": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/table/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/string-width/download/string-width-4.2.3.tgz", + "integrity": "sha1-JpxxF9J7Ba0uU2gwqOyJXvnG0BA=", + "dev": true, + "peer": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/table/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-ansi/download/strip-ansi-6.0.1.tgz", + "integrity": "sha1-nibGPTD1NEPpSJSVshBdN7Z6hdk=", + "dev": true, + "peer": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/tar-fs": { + "version": "1.16.3", + "dev": true, + "license": "MIT", + "dependencies": { + "chownr": "^1.0.1", + "mkdirp": "^0.5.1", + "pump": "^1.0.0", + "tar-stream": "^1.1.2" + } + }, + "node_modules/tar-stream": { + "version": "1.6.2", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/term-size": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^0.7.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/term-size/node_modules/cross-spawn": { + "version": "5.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "lru-cache": "^4.0.1", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "node_modules/term-size/node_modules/execa": { + "version": "0.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^5.0.1", + "get-stream": "^3.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/term-size/node_modules/get-stream": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/term-size/node_modules/is-stream": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/term-size/node_modules/lru-cache": { + "version": "4.1.5", + "dev": true, + "license": "ISC", + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/term-size/node_modules/npm-run-path": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/term-size/node_modules/path-key": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/term-size/node_modules/shebang-command": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/term-size/node_modules/shebang-regex": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/term-size/node_modules/which": { + "version": "1.3.1", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/term-size/node_modules/yallist": { + "version": "2.1.2", + "dev": true, + "license": "ISC" + }, + "node_modules/terser": { + "version": "5.31.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.10", + "resolved": "https://registry.npm.alibaba-inc.com/terser-webpack-plugin/download/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "dev": true, + "peer": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.20", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.26.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npm.alibaba-inc.com/jest-worker/download/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dev": true, + "peer": true, + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npm.alibaba-inc.com/supports-color/download/supports-color-8.1.1.tgz", + "integrity": "sha1-zW/BfihQDP9WwbhsCn/UpUpzAFw=", + "dev": true, + "peer": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-extensions": { + "version": "1.9.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/text-table/download/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true, + "peer": true + }, + "node_modules/textextensions": { + "version": "2.6.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/thread-stream": { + "version": "0.15.2", + "dev": true, + "license": "MIT", + "dependencies": { + "real-require": "^0.1.0" + } + }, + "node_modules/throttle-debounce": { + "version": "5.0.0", + "license": "MIT", + "engines": { + "node": ">=12.22" + } + }, + "node_modules/through": { + "version": "2.3.8", + "dev": true, + "license": "MIT" + }, + "node_modules/through2": { + "version": "4.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "3" + } + }, + "node_modules/through2/node_modules/readable-stream": { + "version": "3.6.2", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/timed-out": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/timers-browserify": { + "version": "2.0.12", + "dev": true, + "license": "MIT", + "dependencies": { + "setimmediate": "^1.0.4" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/titleize": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tmp": { + "version": "0.0.33", + "dev": true, + "license": "MIT", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-arraybuffer": { + "version": "1.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/to-buffer": { + "version": "1.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toggle-selection": { + "version": "1.0.6", + "license": "MIT" + }, + "node_modules/transformation-matrix": { + "version": "2.16.1", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/chrvadala" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trim-newlines": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/ts-node": { + "version": "10.9.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/ts-node/node_modules/arg": { + "version": "4.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/ts-toolbelt": { + "version": "9.6.0", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/tslib": { + "version": "2.6.2", + "dev": true, + "license": "0BSD" + }, + "node_modules/tsutils": { + "version": "3.21.0", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^1.8.1" + }, + "engines": { + "node": ">= 6" + }, + "peerDependencies": { + "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + } + }, + "node_modules/tsutils/node_modules/tslib": { + "version": "1.14.1", + "dev": true, + "license": "0BSD" + }, + "node_modules/tsx": { + "version": "3.12.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@esbuild-kit/cjs-loader": "^2.4.1", + "@esbuild-kit/core-utils": "^3.0.0", + "@esbuild-kit/esm-loader": "^2.5.4" + }, + "bin": { + "tsx": "dist/cli.js" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/tty-browserify": { + "version": "0.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npm.alibaba-inc.com/type-check/download/type-check-0.4.0.tgz", + "integrity": "sha1-B7ggO/pwVsBlcFDjzNLDdzC6uPE=", + "dev": true, + "peer": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.18.1", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.6", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typedarray": { + "version": "0.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/types-ramda": { + "version": "0.29.10", + "dev": true, + "license": "MIT", + "dependencies": { + "ts-toolbelt": "^9.6.0" + } + }, + "node_modules/typescript": { + "version": "5.4.5", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/umi": { + "version": "4.1.10", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "7.23.6", + "@umijs/bundler-utils": "4.1.10", + "@umijs/bundler-webpack": "4.1.10", + "@umijs/core": "4.1.10", + "@umijs/lint": "4.1.10", + "@umijs/preset-umi": "4.1.10", + "@umijs/renderer-react": "4.1.10", + "@umijs/server": "4.1.10", + "@umijs/test": "4.1.10", + "@umijs/utils": "4.1.10", + "prettier-plugin-organize-imports": "^3.2.2", + "prettier-plugin-packagejson": "2.4.3" + }, + "bin": { + "umi": "bin/umi.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/umi/node_modules/@babel/runtime": { + "version": "7.23.6", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/unbox-primitive": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/unfetch": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "workspaces": [ + "./packages/isomorphic-unfetch" + ] + }, + "node_modules/unified": { + "version": "10.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "bail": "^2.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/is-plain-obj": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unique-filename": { + "version": "1.1.1", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^2.0.0" + } + }, + "node_modules/unique-slug": { + "version": "2.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + } + }, + "node_modules/unique-string": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "crypto-random-string": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unist-util-filter": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.0.0" + } + }, + "node_modules/unist-util-generated": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "5.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "4.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "5.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unquote": { + "version": "1.1.1", + "dev": true, + "license": "MIT" + }, + "node_modules/unset-value": { + "version": "0.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/untildify": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/unzip-response": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.13", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "2.5.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boxen": "^1.2.1", + "chalk": "^2.0.1", + "configstore": "^3.0.0", + "import-lazy": "^2.1.0", + "is-ci": "^1.0.10", + "is-installed-globally": "^0.1.0", + "is-npm": "^1.0.0", + "latest-version": "^3.0.0", + "semver-diff": "^2.0.0", + "xdg-basedir": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/update-notifier/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/update-notifier/node_modules/color-convert": { + "version": "1.9.3", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/update-notifier/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/update-notifier/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/update-notifier/node_modules/supports-color": { + "version": "5.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/url": { + "version": "0.11.3", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^1.4.1", + "qs": "^6.11.2" + } + }, + "node_modules/url-parse-lax": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "prepend-http": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/use-isomorphic-layout-effect": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/util": { + "version": "0.11.1", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "2.0.3" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/util.promisify": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.2", + "has-symbols": "^1.0.1", + "object.getownpropertydescriptors": "^2.1.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/inherits": { + "version": "2.0.3", + "dev": true, + "license": "ISC" + }, + "node_modules/utila": { + "version": "0.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/uuid": { + "version": "8.3.2", + "dev": true, + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/uvu": { + "version": "0.5.6", + "dev": true, + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "bin": { + "uvu": "bin.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uvu/node_modules/diff": { + "version": "5.2.0", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/v8-compile-cache": { + "version": "2.3.0", + "dev": true, + "license": "MIT" + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/validate-npm-package-name": { + "version": "3.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "builtins": "^1.0.3" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "5.3.7", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^3.0.0", + "vfile-message": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "vfile": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "3.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "4.5.2", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.18.10", + "postcss": "^8.4.27", + "rollup": "^3.27.1" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@types/node": ">= 14", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.18.20", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/esbuild": { + "version": "0.18.20", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" + } + }, + "node_modules/vm-browserify": { + "version": "1.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/walker": { + "version": "1.0.8", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/watchpack": { + "version": "2.4.0", + "resolved": "https://registry.npm.alibaba-inc.com/watchpack/download/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "dev": true, + "peer": true, + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "dev": true, + "license": "MIT", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/webpack": { + "version": "5.89.0", + "resolved": "https://registry.npm.alibaba-inc.com/webpack/download/webpack-5.89.0.tgz", + "integrity": "sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==", + "dev": true, + "peer": true, + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.0", + "@webassemblyjs/ast": "^1.11.5", + "@webassemblyjs/wasm-edit": "^1.11.5", + "@webassemblyjs/wasm-parser": "^1.11.5", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.15.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.7", + "watchpack": "^2.4.0", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/webpack-sources/download/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "dev": true, + "peer": true, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.1.3", + "dev": true, + "license": "MIT", + "dependencies": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type/node_modules/isarray": { + "version": "2.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/which-collection": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.15", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/widest-line": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "string-width": "^2.1.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "5.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/xdg-basedir": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/xml-lexer": { + "version": "0.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^2.0.0" + } + }, + "node_modules/xml-lexer/node_modules/eventemitter3": { + "version": "2.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/xml-reader": { + "version": "2.4.3", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^2.0.0", + "xml-lexer": "^0.2.2" + } + }, + "node_modules/xml-reader/node_modules/eventemitter3": { + "version": "2.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/xtend": { + "version": "4.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "3.2.2", + "dev": true, + "license": "ISC" + }, + "node_modules/yallist": { + "version": "3.1.1", + "dev": true, + "license": "ISC" + }, + "node_modules/yaml": { + "version": "2.3.1", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 14" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/yargs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/y18n": { + "version": "5.0.8", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs/node_modules/yargs-parser": { + "version": "21.1.1", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + }, + "dependencies": { + "@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npm.alibaba-inc.com/@aashutoshrathi/word-wrap/download/@aashutoshrathi/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "peer": true + }, + "@ampproject/remapping": { + "version": "2.3.0", + "dev": true, + "requires": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "@ant-design/colors": { + "version": "7.0.2", + "requires": { + "@ctrl/tinycolor": "^3.6.1" + } + }, + "@ant-design/cssinjs": { + "version": "1.20.0", + "requires": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "classnames": "^2.3.1", + "csstype": "^3.1.3", + "rc-util": "^5.35.0", + "stylis": "^4.0.13" + } + }, + "@ant-design/icons": { + "version": "5.3.6", + "requires": { + "@ant-design/colors": "^7.0.0", + "@ant-design/icons-svg": "^4.4.0", + "@babel/runtime": "^7.11.2", + "classnames": "^2.2.6", + "rc-util": "^5.31.1" + } + }, + "@ant-design/icons-svg": { + "version": "4.4.2" + }, + "@ant-design/react-slick": { + "version": "1.1.2", + "requires": { + "@babel/runtime": "^7.10.4", + "classnames": "^2.2.5", + "json2mq": "^0.2.0", + "resize-observer-polyfill": "^1.5.1", + "throttle-debounce": "^5.0.0" + } + }, + "@antfu/install-pkg": { + "version": "0.1.1", + "dev": true, + "requires": { + "execa": "^5.1.1", + "find-up": "^5.0.0" + } + }, + "@antfu/utils": { + "version": "0.7.7", + "dev": true + }, + "@babel/code-frame": { + "version": "7.24.2", + "dev": true, + "requires": { + "@babel/highlight": "^7.24.2", + "picocolors": "^1.0.0" + } + }, + "@babel/compat-data": { + "version": "7.24.4", + "dev": true + }, + "@babel/core": { + "version": "7.24.5", + "dev": true, + "requires": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.24.5", + "@babel/helpers": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + }, + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "@babel/eslint-parser": { + "version": "7.23.3", + "dev": true, + "requires": { + "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", + "eslint-visitor-keys": "^2.1.0", + "semver": "^6.3.1" + }, + "dependencies": { + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "@babel/generator": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/types": "^7.24.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.23.6", + "dev": true, + "requires": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "dependencies": { + "lru-cache": { + "version": "5.1.1", + "dev": true, + "requires": { + "yallist": "^3.0.2" + } + }, + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "@babel/helper-environment-visitor": { + "version": "7.22.20", + "dev": true + }, + "@babel/helper-function-name": { + "version": "7.23.0", + "dev": true, + "requires": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + } + }, + "@babel/helper-hoist-variables": { + "version": "7.22.5", + "dev": true, + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-module-imports": { + "version": "7.24.3", + "dev": true, + "requires": { + "@babel/types": "^7.24.0" + } + }, + "@babel/helper-module-transforms": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.24.3", + "@babel/helper-simple-access": "^7.24.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/helper-validator-identifier": "^7.24.5" + } + }, + "@babel/helper-plugin-utils": { + "version": "7.24.5", + "dev": true + }, + "@babel/helper-simple-access": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/types": "^7.24.5" + } + }, + "@babel/helper-split-export-declaration": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/types": "^7.24.5" + } + }, + "@babel/helper-string-parser": { + "version": "7.24.1", + "dev": true + }, + "@babel/helper-validator-identifier": { + "version": "7.24.5", + "dev": true + }, + "@babel/helper-validator-option": { + "version": "7.23.5", + "dev": true + }, + "@babel/helpers": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5" + } + }, + "@babel/highlight": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.24.5", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "@babel/parser": { + "version": "7.24.5", + "dev": true + }, + "@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.12.13" + } + }, + "@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, + "@babel/plugin-transform-modules-commonjs": { + "version": "7.23.3", + "dev": true, + "requires": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + } + }, + "@babel/plugin-transform-react-jsx-self": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.24.5" + } + }, + "@babel/plugin-transform-react-jsx-source": { + "version": "7.24.1", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.24.0" + } + }, + "@babel/runtime": { + "version": "7.24.5", + "requires": { + "regenerator-runtime": "^0.14.0" + } + }, + "@babel/template": { + "version": "7.24.0", + "dev": true, + "requires": { + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.24.0", + "@babel/types": "^7.24.0" + } + }, + "@babel/traverse": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/types": "^7.24.5", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "@babel/types": { + "version": "7.24.5", + "dev": true, + "requires": { + "@babel/helper-string-parser": "^7.24.1", + "@babel/helper-validator-identifier": "^7.24.5", + "to-fast-properties": "^2.0.0" + } + }, + "@bloomberg/record-tuple-polyfill": { + "version": "0.0.4", + "dev": true + }, + "@commitlint/cli": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/format": "^17.8.1", + "@commitlint/lint": "^17.8.1", + "@commitlint/load": "^17.8.1", + "@commitlint/read": "^17.8.1", + "@commitlint/types": "^17.8.1", + "execa": "^5.0.0", + "lodash.isfunction": "^3.0.9", + "resolve-from": "5.0.0", + "resolve-global": "1.0.0", + "yargs": "^17.0.0" + } + }, + "@commitlint/config-conventional": { + "version": "17.8.1", + "dev": true, + "requires": { + "conventional-changelog-conventionalcommits": "^6.1.0" + } + }, + "@commitlint/config-validator": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/types": "^17.8.1", + "ajv": "^8.11.0" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npm.alibaba-inc.com/ajv/download/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/json-schema-traverse/download/json-schema-traverse-1.0.0.tgz", + "integrity": "sha1-rnvLNlard6c7pcSb9lTzjmtoYOI=", + "dev": true + } + } + }, + "@commitlint/ensure": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/types": "^17.8.1", + "lodash.camelcase": "^4.3.0", + "lodash.kebabcase": "^4.1.1", + "lodash.snakecase": "^4.1.1", + "lodash.startcase": "^4.4.0", + "lodash.upperfirst": "^4.3.1" + } + }, + "@commitlint/execute-rule": { + "version": "17.8.1", + "dev": true + }, + "@commitlint/format": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/types": "^17.8.1", + "chalk": "^4.1.0" + } + }, + "@commitlint/is-ignored": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/types": "^17.8.1", + "semver": "7.5.4" + } + }, + "@commitlint/lint": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/is-ignored": "^17.8.1", + "@commitlint/parse": "^17.8.1", + "@commitlint/rules": "^17.8.1", + "@commitlint/types": "^17.8.1" + } + }, + "@commitlint/load": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/config-validator": "^17.8.1", + "@commitlint/execute-rule": "^17.8.1", + "@commitlint/resolve-extends": "^17.8.1", + "@commitlint/types": "^17.8.1", + "@types/node": "20.5.1", + "chalk": "^4.1.0", + "cosmiconfig": "^8.0.0", + "cosmiconfig-typescript-loader": "^4.0.0", + "lodash.isplainobject": "^4.0.6", + "lodash.merge": "^4.6.2", + "lodash.uniq": "^4.5.0", + "resolve-from": "^5.0.0", + "ts-node": "^10.8.1", + "typescript": "^4.6.4 || ^5.2.2" + } + }, + "@commitlint/message": { + "version": "17.8.1", + "dev": true + }, + "@commitlint/parse": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/types": "^17.8.1", + "conventional-changelog-angular": "^6.0.0", + "conventional-commits-parser": "^4.0.0" + } + }, + "@commitlint/read": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/top-level": "^17.8.1", + "@commitlint/types": "^17.8.1", + "fs-extra": "^11.0.0", + "git-raw-commits": "^2.0.11", + "minimist": "^1.2.6" + } + }, + "@commitlint/resolve-extends": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/config-validator": "^17.8.1", + "@commitlint/types": "^17.8.1", + "import-fresh": "^3.0.0", + "lodash.mergewith": "^4.6.2", + "resolve-from": "^5.0.0", + "resolve-global": "^1.0.0" + } + }, + "@commitlint/rules": { + "version": "17.8.1", + "dev": true, + "requires": { + "@commitlint/ensure": "^17.8.1", + "@commitlint/message": "^17.8.1", + "@commitlint/to-lines": "^17.8.1", + "@commitlint/types": "^17.8.1", + "execa": "^5.0.0" + } + }, + "@commitlint/to-lines": { + "version": "17.8.1", + "dev": true + }, + "@commitlint/top-level": { + "version": "17.8.1", + "dev": true, + "requires": { + "find-up": "^5.0.0" + } + }, + "@commitlint/types": { + "version": "17.8.1", + "dev": true, + "requires": { + "chalk": "^4.1.0" + } + }, + "@cspotcode/source-map-support": { + "version": "0.8.1", + "dev": true, + "requires": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "dependencies": { + "@jridgewell/trace-mapping": { + "version": "0.3.9", + "dev": true, + "requires": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + } + } + }, + "@csstools/postcss-color-function": { + "version": "1.1.1", + "dev": true, + "requires": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-font-format-keywords": { + "version": "1.0.1", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-hwb-function": { + "version": "1.0.2", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-ic-unit": { + "version": "1.0.1", + "dev": true, + "requires": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-is-pseudo-class": { + "version": "2.0.7", + "dev": true, + "requires": { + "@csstools/selector-specificity": "^2.0.0", + "postcss-selector-parser": "^6.0.10" + } + }, + "@csstools/postcss-normalize-display-values": { + "version": "1.0.1", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-oklab-function": { + "version": "1.1.1", + "dev": true, + "requires": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-progressive-custom-properties": { + "version": "1.3.0", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-stepped-value-functions": { + "version": "1.0.1", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "@csstools/postcss-unset-value": { + "version": "1.0.2", + "dev": true, + "requires": {} + }, + "@csstools/selector-specificity": { + "version": "2.2.0", + "dev": true, + "requires": {} + }, + "@ctrl/tinycolor": { + "version": "3.6.1" + }, + "@emotion/hash": { + "version": "0.8.0" + }, + "@emotion/is-prop-valid": { + "version": "1.2.1", + "requires": { + "@emotion/memoize": "^0.8.1" + } + }, + "@emotion/memoize": { + "version": "0.8.1" + }, + "@emotion/unitless": { + "version": "0.7.5" + }, + "@esbuild-kit/cjs-loader": { + "version": "2.4.4", + "dev": true, + "requires": { + "@esbuild-kit/core-utils": "^3.2.3", + "get-tsconfig": "^4.7.0" + } + }, + "@esbuild-kit/core-utils": { + "version": "3.3.2", + "dev": true, + "requires": { + "esbuild": "~0.18.20", + "source-map-support": "^0.5.21" + }, + "dependencies": { + "@esbuild/darwin-arm64": { + "version": "0.18.20", + "dev": true, + "optional": true + }, + "esbuild": { + "version": "0.18.20", + "dev": true, + "requires": { + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" + } + } + } + }, + "@esbuild-kit/esm-loader": { + "version": "2.6.5", + "dev": true, + "requires": { + "@esbuild-kit/core-utils": "^3.3.2", + "get-tsconfig": "^4.7.0" + } + }, + "@esbuild/darwin-arm64": { + "version": "0.17.19", + "dev": true, + "optional": true + }, + "@eslint-community/eslint-utils": { + "version": "4.4.0", + "dev": true, + "requires": { + "eslint-visitor-keys": "^3.3.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "3.4.3", + "dev": true + } + } + }, + "@eslint-community/regexpp": { + "version": "4.10.0", + "dev": true + }, + "@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npm.alibaba-inc.com/@eslint/eslintrc/download/@eslint/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "peer": true, + "requires": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "requires": { + "ms": "2.1.2" + } + }, + "globals": { + "version": "13.24.0", + "resolved": "https://registry.npm.alibaba-inc.com/globals/download/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "peer": true, + "requires": { + "type-fest": "^0.20.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-json-comments/download/strip-json-comments-3.1.1.tgz", + "integrity": "sha1-MfEoGzgyYwQ0gxwxDAHMzajL4AY=", + "dev": true, + "peer": true + }, + "type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npm.alibaba-inc.com/type-fest/download/type-fest-0.20.2.tgz", + "integrity": "sha1-G/IH9LKPkVg2ZstfvTJ4hzAc1fQ=", + "dev": true, + "peer": true + } + } + }, + "@eslint/js": { + "version": "8.57.0", + "resolved": "https://registry.npm.alibaba-inc.com/@eslint/js/download/@eslint/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "dev": true, + "peer": true + }, + "@floating-ui/core": { + "version": "0.6.2", + "dev": true + }, + "@floating-ui/dom": { + "version": "0.4.5", + "dev": true, + "requires": { + "@floating-ui/core": "^0.6.2" + } + }, + "@floating-ui/react-dom": { + "version": "0.6.3", + "dev": true, + "requires": { + "@floating-ui/dom": "^0.4.5", + "use-isomorphic-layout-effect": "^1.1.1" + } + }, + "@floating-ui/react-dom-interactions": { + "version": "0.3.1", + "dev": true, + "requires": { + "@floating-ui/react-dom": "^0.6.3", + "aria-hidden": "^1.1.3", + "point-in-polygon": "^1.1.0", + "use-isomorphic-layout-effect": "^1.1.1" + } + }, + "@formatjs/ecma402-abstract": { + "version": "1.18.2", + "dev": true, + "requires": { + "@formatjs/intl-localematcher": "0.5.4", + "tslib": "^2.4.0" + } + }, + "@formatjs/fast-memoize": { + "version": "2.2.0", + "dev": true, + "requires": { + "tslib": "^2.4.0" + } + }, + "@formatjs/icu-messageformat-parser": { + "version": "2.7.6", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/icu-skeleton-parser": "1.8.0", + "tslib": "^2.4.0" + } + }, + "@formatjs/icu-skeleton-parser": { + "version": "1.8.0", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "tslib": "^2.4.0" + } + }, + "@formatjs/intl": { + "version": "2.10.1", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/fast-memoize": "2.2.0", + "@formatjs/icu-messageformat-parser": "2.7.6", + "@formatjs/intl-displaynames": "6.6.6", + "@formatjs/intl-listformat": "7.5.5", + "intl-messageformat": "10.5.11", + "tslib": "^2.4.0" + } + }, + "@formatjs/intl-displaynames": { + "version": "6.6.6", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/intl-localematcher": "0.5.4", + "tslib": "^2.4.0" + } + }, + "@formatjs/intl-listformat": { + "version": "7.5.5", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/intl-localematcher": "0.5.4", + "tslib": "^2.4.0" + } + }, + "@formatjs/intl-localematcher": { + "version": "0.5.4", + "dev": true, + "requires": { + "tslib": "^2.4.0" + } + }, + "@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npm.alibaba-inc.com/@humanwhocodes/config-array/download/@humanwhocodes/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "peer": true, + "requires": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + } + } + }, + "@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/@humanwhocodes/module-importer/download/@humanwhocodes/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "peer": true + }, + "@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npm.alibaba-inc.com/@humanwhocodes/object-schema/download/@humanwhocodes/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "dev": true, + "peer": true + }, + "@iconify/types": { + "version": "2.0.0", + "dev": true + }, + "@iconify/utils": { + "version": "2.1.1", + "dev": true, + "requires": { + "@antfu/install-pkg": "^0.1.1", + "@antfu/utils": "^0.7.2", + "@iconify/types": "^2.0.0", + "debug": "^4.3.4", + "kolorist": "^1.6.0", + "local-pkg": "^0.4.2" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "@isaacs/cliui": { + "version": "8.0.2", + "dev": true, + "requires": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "dev": true + }, + "string-width": { + "version": "5.1.2", + "dev": true, + "requires": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + } + }, + "strip-ansi": { + "version": "7.1.0", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "dependencies": { + "argparse": { + "version": "1.0.10", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "find-up": { + "version": "4.1.0", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "js-yaml": { + "version": "3.14.1", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + } + } + }, + "@istanbuljs/schema": { + "version": "0.1.3", + "dev": true + }, + "@jest/schemas": { + "version": "29.6.3", + "dev": true, + "requires": { + "@sinclair/typebox": "^0.27.8" + } + }, + "@jest/transform": { + "version": "29.7.0", + "dev": true, + "requires": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "dependencies": { + "@jest/types": { + "version": "29.6.3", + "dev": true, + "requires": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + } + }, + "@types/yargs": { + "version": "17.0.32", + "dev": true, + "requires": { + "@types/yargs-parser": "*" + } + } + } + }, + "@jest/types": { + "version": "27.5.1", + "dev": true, + "requires": { + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^16.0.0", + "chalk": "^4.0.0" + } + }, + "@jridgewell/gen-mapping": { + "version": "0.3.5", + "dev": true, + "requires": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "@jridgewell/resolve-uri": { + "version": "3.1.2", + "dev": true + }, + "@jridgewell/set-array": { + "version": "1.2.1", + "dev": true + }, + "@jridgewell/source-map": { + "version": "0.3.6", + "dev": true, + "requires": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "dev": true + }, + "@jridgewell/trace-mapping": { + "version": "0.3.25", + "dev": true, + "requires": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "@loadable/component": { + "version": "5.15.2", + "dev": true, + "requires": { + "@babel/runtime": "^7.7.7", + "hoist-non-react-statics": "^3.3.1", + "react-is": "^16.12.0" + }, + "dependencies": { + "react-is": { + "version": "16.13.1", + "dev": true + } + } + }, + "@makotot/ghostui": { + "version": "2.0.0", + "dev": true, + "requires": {} + }, + "@nicolo-ribaudo/eslint-scope-5-internals": { + "version": "5.1.1-v1", + "dev": true, + "requires": { + "eslint-scope": "5.1.1" + } + }, + "@nodelib/fs.scandir": { + "version": "2.1.5", + "dev": true, + "requires": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.5", + "dev": true + }, + "@nodelib/fs.walk": { + "version": "1.2.8", + "dev": true, + "requires": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + } + }, + "@pkgjs/parseargs": { + "version": "0.11.0", + "dev": true, + "optional": true + }, + "@pkgr/utils": { + "version": "2.4.2", + "dev": true, + "requires": { + "cross-spawn": "^7.0.3", + "fast-glob": "^3.3.0", + "is-glob": "^4.0.3", + "open": "^9.1.0", + "picocolors": "^1.0.0", + "tslib": "^2.6.0" + }, + "dependencies": { + "fast-glob": { + "version": "3.3.2", + "dev": true, + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + } + }, + "is-docker": { + "version": "2.2.1", + "dev": true + }, + "is-wsl": { + "version": "2.2.0", + "dev": true, + "requires": { + "is-docker": "^2.0.0" + } + }, + "open": { + "version": "9.1.0", + "dev": true, + "requires": { + "default-browser": "^4.0.0", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "is-wsl": "^2.2.0" + } + } + } + }, + "@rc-component/color-picker": { + "version": "1.5.3", + "requires": { + "@babel/runtime": "^7.23.6", + "@ctrl/tinycolor": "^3.6.1", + "classnames": "^2.2.6", + "rc-util": "^5.38.1" + } + }, + "@rc-component/context": { + "version": "1.4.0", + "requires": { + "@babel/runtime": "^7.10.1", + "rc-util": "^5.27.0" + } + }, + "@rc-component/mini-decimal": { + "version": "1.1.0", + "requires": { + "@babel/runtime": "^7.18.0" + } + }, + "@rc-component/mutate-observer": { + "version": "1.1.0", + "requires": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + } + }, + "@rc-component/portal": { + "version": "1.1.2", + "requires": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + } + }, + "@rc-component/tour": { + "version": "1.14.2", + "requires": { + "@babel/runtime": "^7.18.0", + "@rc-component/portal": "^1.0.0-9", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + } + }, + "@rc-component/trigger": { + "version": "2.1.1", + "requires": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.38.0" + } + }, + "@selderee/plugin-htmlparser2": { + "version": "0.11.0", + "dev": true, + "requires": { + "domhandler": "^5.0.3", + "selderee": "^0.11.0" + } + }, + "@sinclair/typebox": { + "version": "0.27.8", + "dev": true + }, + "@sketch-hq/sketch-file-format-ts": { + "version": "6.5.0", + "dev": true + }, + "@stackblitz/sdk": { + "version": "1.9.0", + "dev": true + }, + "@stylelint/postcss-css-in-js": { + "version": "0.38.0", + "dev": true, + "requires": { + "@babel/core": "^7.17.9" + } + }, + "@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "dev": true, + "requires": {} + }, + "@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "dev": true, + "requires": {} + }, + "@svgr/babel-preset": { + "version": "6.5.1", + "dev": true, + "requires": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + } + }, + "@svgr/core": { + "version": "6.5.1", + "dev": true, + "requires": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + }, + "dependencies": { + "camelcase": { + "version": "6.3.0", + "dev": true + }, + "cosmiconfig": { + "version": "7.1.0", + "dev": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + } + }, + "yaml": { + "version": "1.10.2", + "dev": true + } + } + }, + "@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "dev": true, + "requires": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + } + }, + "@svgr/plugin-jsx": { + "version": "6.5.1", + "dev": true, + "requires": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + } + }, + "@svgr/plugin-svgo": { + "version": "6.5.1", + "dev": true, + "requires": { + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "svgo": "^2.8.0" + }, + "dependencies": { + "cosmiconfig": { + "version": "7.1.0", + "dev": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + } + }, + "yaml": { + "version": "1.10.2", + "dev": true + } + } + }, + "@swc/core": { + "version": "1.4.2", + "dev": true, + "requires": { + "@swc/core-darwin-arm64": "1.4.2", + "@swc/core-darwin-x64": "1.4.2", + "@swc/core-linux-arm-gnueabihf": "1.4.2", + "@swc/core-linux-arm64-gnu": "1.4.2", + "@swc/core-linux-arm64-musl": "1.4.2", + "@swc/core-linux-x64-gnu": "1.4.2", + "@swc/core-linux-x64-musl": "1.4.2", + "@swc/core-win32-arm64-msvc": "1.4.2", + "@swc/core-win32-ia32-msvc": "1.4.2", + "@swc/core-win32-x64-msvc": "1.4.2", + "@swc/counter": "^0.1.2", + "@swc/types": "^0.1.5" + } + }, + "@swc/core-darwin-arm64": { + "version": "1.4.2", + "dev": true, + "optional": true + }, + "@swc/counter": { + "version": "0.1.3", + "dev": true + }, + "@swc/types": { + "version": "0.1.6", + "dev": true, + "requires": { + "@swc/counter": "^0.1.3" + } + }, + "@trysound/sax": { + "version": "0.2.0", + "dev": true + }, + "@tsconfig/node10": { + "version": "1.0.11", + "dev": true + }, + "@tsconfig/node12": { + "version": "1.0.11", + "dev": true + }, + "@tsconfig/node14": { + "version": "1.0.3", + "dev": true + }, + "@tsconfig/node16": { + "version": "1.0.4", + "dev": true + }, + "@types/babel__core": { + "version": "7.20.5", + "dev": true, + "requires": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "@types/babel__generator": { + "version": "7.6.8", + "dev": true, + "requires": { + "@babel/types": "^7.0.0" + } + }, + "@types/babel__template": { + "version": "7.4.4", + "dev": true, + "requires": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "@types/babel__traverse": { + "version": "7.20.5", + "dev": true, + "requires": { + "@babel/types": "^7.20.7" + } + }, + "@types/debug": { + "version": "4.1.12", + "dev": true, + "requires": { + "@types/ms": "*" + } + }, + "@types/eslint": { + "version": "8.56.10", + "resolved": "https://registry.npm.alibaba-inc.com/@types/eslint/download/@types/eslint-8.56.10.tgz", + "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", + "dev": true, + "peer": true, + "requires": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "@types/eslint-scope": { + "version": "3.7.5", + "resolved": "https://registry.npm.alibaba-inc.com/@types/eslint-scope/download/@types/eslint-scope-3.7.5.tgz", + "integrity": "sha512-JNvhIEyxVW6EoMIFIvj93ZOywYFatlpu9deeH6eSx6PE3WHYvHaQtmHmQeNw7aA81bYGBPPQqdtBm6b1SsQMmA==", + "dev": true, + "peer": true, + "requires": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "@types/estree": { + "version": "1.0.5", + "dev": true + }, + "@types/estree-jsx": { + "version": "1.0.5", + "dev": true, + "requires": { + "@types/estree": "*" + } + }, + "@types/fs-extra": { + "version": "11.0.1", + "dev": true, + "requires": { + "@types/jsonfile": "*", + "@types/node": "*" + } + }, + "@types/graceful-fs": { + "version": "4.1.9", + "dev": true, + "requires": { + "@types/node": "*" + } + }, + "@types/hapi__joi": { + "version": "17.1.9", + "dev": true + }, + "@types/hast": { + "version": "2.3.10", + "dev": true, + "requires": { + "@types/unist": "^2" + } + }, + "@types/hoist-non-react-statics": { + "version": "3.3.5", + "dev": true, + "requires": { + "@types/react": "*", + "hoist-non-react-statics": "^3.3.0" + } + }, + "@types/html-minifier-terser": { + "version": "6.1.0", + "dev": true + }, + "@types/istanbul-lib-coverage": { + "version": "2.0.6", + "dev": true + }, + "@types/istanbul-lib-report": { + "version": "3.0.3", + "dev": true, + "requires": { + "@types/istanbul-lib-coverage": "*" + } + }, + "@types/istanbul-reports": { + "version": "3.0.4", + "dev": true, + "requires": { + "@types/istanbul-lib-report": "*" + } + }, + "@types/json-schema": { + "version": "7.0.15", + "dev": true + }, + "@types/jsonfile": { + "version": "6.1.4", + "dev": true, + "requires": { + "@types/node": "*" + } + }, + "@types/lodash": { + "version": "4.17.0", + "dev": true + }, + "@types/mdast": { + "version": "3.0.15", + "dev": true, + "requires": { + "@types/unist": "^2" + } + }, + "@types/minimist": { + "version": "1.2.5", + "dev": true + }, + "@types/ms": { + "version": "0.7.34", + "dev": true + }, + "@types/node": { + "version": "20.5.1", + "dev": true + }, + "@types/normalize-package-data": { + "version": "2.4.4", + "dev": true + }, + "@types/parse-json": { + "version": "4.0.2", + "dev": true + }, + "@types/parse5": { + "version": "6.0.3", + "dev": true + }, + "@types/prop-types": { + "version": "15.7.12", + "dev": true + }, + "@types/q": { + "version": "1.5.8", + "dev": true + }, + "@types/ramda": { + "version": "0.29.3", + "dev": true, + "requires": { + "types-ramda": "^0.29.4" + } + }, + "@types/react": { + "version": "18.3.1", + "dev": true, + "requires": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "@types/sax": { + "version": "1.2.7", + "dev": true, + "requires": { + "@types/node": "*" + } + }, + "@types/semver": { + "version": "7.5.8", + "dev": true + }, + "@types/stylis": { + "version": "4.2.0" + }, + "@types/unist": { + "version": "2.0.10", + "dev": true + }, + "@types/yargs": { + "version": "16.0.9", + "dev": true, + "requires": { + "@types/yargs-parser": "*" + } + }, + "@types/yargs-parser": { + "version": "21.0.3", + "dev": true + }, + "@typescript-eslint/eslint-plugin": { + "version": "5.62.0", + "dev": true, + "requires": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/type-utils": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "@typescript-eslint/parser": { + "version": "5.62.0", + "dev": true, + "requires": { + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "debug": "^4.3.4" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "@typescript-eslint/scope-manager": { + "version": "5.62.0", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" + } + }, + "@typescript-eslint/type-utils": { + "version": "5.62.0", + "dev": true, + "requires": { + "@typescript-eslint/typescript-estree": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "@typescript-eslint/types": { + "version": "5.62.0", + "dev": true + }, + "@typescript-eslint/typescript-estree": { + "version": "5.62.0", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "@typescript-eslint/utils": { + "version": "5.62.0", + "dev": true, + "requires": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + } + }, + "@typescript-eslint/visitor-keys": { + "version": "5.62.0", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.62.0", + "eslint-visitor-keys": "^3.3.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "3.4.3", + "dev": true + } + } + }, + "@umijs/ast": { + "version": "4.1.10", + "dev": true, + "requires": { + "@umijs/bundler-utils": "4.1.10" + } + }, + "@umijs/babel-preset-umi": { + "version": "4.1.10", + "dev": true, + "requires": { + "@babel/runtime": "7.23.6", + "@bloomberg/record-tuple-polyfill": "0.0.4", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "core-js": "3.34.0" + }, + "dependencies": { + "@babel/runtime": { + "version": "7.23.6", + "dev": true, + "requires": { + "regenerator-runtime": "^0.14.0" + } + } + } + }, + "@umijs/bundler-esbuild": { + "version": "4.1.10", + "dev": true, + "requires": { + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "enhanced-resolve": "5.9.3", + "postcss": "^8.4.21", + "postcss-flexbugs-fixes": "5.0.2", + "postcss-preset-env": "7.5.0" + }, + "dependencies": { + "enhanced-resolve": { + "version": "5.9.3", + "dev": true, + "requires": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + } + } + } + }, + "@umijs/bundler-utils": { + "version": "4.1.10", + "dev": true, + "requires": { + "@umijs/utils": "4.1.10", + "esbuild": "0.17.19", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "10.1.1", + "spdy": "^4.0.2" + } + }, + "@umijs/bundler-vite": { + "version": "4.1.10", + "dev": true, + "requires": { + "@svgr/core": "6.5.1", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "@vitejs/plugin-react": "4.0.0", + "core-js": "3.34.0", + "less": "4.1.3", + "postcss-preset-env": "7.5.0", + "rollup-plugin-visualizer": "5.9.0", + "systemjs": "^6.14.1", + "vite": "4.5.2" + } + }, + "@umijs/bundler-webpack": { + "version": "4.1.10", + "dev": true, + "requires": { + "@svgr/core": "6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "@svgr/plugin-svgo": "^6.5.1", + "@types/hapi__joi": "17.1.9", + "@umijs/babel-preset-umi": "4.1.10", + "@umijs/bundler-utils": "4.1.10", + "@umijs/case-sensitive-paths-webpack-plugin": "^1.0.1", + "@umijs/mfsu": "4.1.10", + "@umijs/react-refresh-webpack-plugin": "0.5.11", + "@umijs/utils": "4.1.10", + "cors": "^2.8.5", + "css-loader": "6.7.1", + "es5-imcompatible-versions": "^0.1.78", + "fork-ts-checker-webpack-plugin": "8.0.0", + "jest-worker": "29.4.3", + "lightningcss": "1.22.1", + "node-libs-browser": "2.2.1", + "postcss": "^8.4.21", + "postcss-preset-env": "7.5.0", + "react-error-overlay": "6.0.9", + "react-refresh": "0.14.0" + } + }, + "@umijs/case-sensitive-paths-webpack-plugin": { + "version": "1.0.1", + "dev": true + }, + "@umijs/core": { + "version": "4.1.10", + "dev": true, + "requires": { + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10" + } + }, + "@umijs/did-you-know": { + "version": "1.0.3", + "dev": true + }, + "@umijs/es-module-parser": { + "version": "0.0.7", + "dev": true, + "requires": { + "@umijs/es-module-parser-darwin-arm64": "0.0.7", + "@umijs/es-module-parser-darwin-x64": "0.0.7", + "@umijs/es-module-parser-linux-arm-gnueabihf": "0.0.7", + "@umijs/es-module-parser-linux-arm64-gnu": "0.0.7", + "@umijs/es-module-parser-linux-arm64-musl": "0.0.7", + "@umijs/es-module-parser-linux-x64-gnu": "0.0.7", + "@umijs/es-module-parser-linux-x64-musl": "0.0.7", + "@umijs/es-module-parser-win32-arm64-msvc": "0.0.7", + "@umijs/es-module-parser-win32-x64-msvc": "0.0.7" + } + }, + "@umijs/es-module-parser-darwin-arm64": { + "version": "0.0.7", + "dev": true, + "optional": true + }, + "@umijs/history": { + "version": "5.3.1", + "dev": true, + "requires": { + "@babel/runtime": "^7.7.6", + "query-string": "^6.13.6" + } + }, + "@umijs/lint": { + "version": "4.1.10", + "dev": true, + "requires": { + "@babel/core": "7.23.6", + "@babel/eslint-parser": "7.23.3", + "@stylelint/postcss-css-in-js": "^0.38.0", + "@typescript-eslint/eslint-plugin": "^5.62.0", + "@typescript-eslint/parser": "^5.62.0", + "@umijs/babel-preset-umi": "4.1.10", + "eslint-plugin-jest": "27.2.3", + "eslint-plugin-react": "7.33.2", + "eslint-plugin-react-hooks": "4.6.0", + "postcss": "^8.4.21", + "postcss-syntax": "0.36.2", + "stylelint-config-standard": "25.0.0" + }, + "dependencies": { + "@babel/core": { + "version": "7.23.6", + "dev": true, + "requires": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.6", + "@babel/parser": "^7.23.6", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.6", + "@babel/types": "^7.23.6", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + } + }, + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + }, + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "@umijs/mfsu": { + "version": "4.1.10", + "dev": true, + "requires": { + "@umijs/bundler-esbuild": "4.1.10", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "enhanced-resolve": "5.9.3", + "is-equal": "^1.6.4" + }, + "dependencies": { + "enhanced-resolve": { + "version": "5.9.3", + "dev": true, + "requires": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + } + } + } + }, + "@umijs/plugin-run": { + "version": "4.1.10", + "dev": true, + "requires": { + "tsx": "3.12.2" + } + }, + "@umijs/preset-umi": { + "version": "4.1.10", + "dev": true, + "requires": { + "@iconify/utils": "2.1.1", + "@svgr/core": "6.5.1", + "@umijs/ast": "4.1.10", + "@umijs/babel-preset-umi": "4.1.10", + "@umijs/bundler-esbuild": "4.1.10", + "@umijs/bundler-utils": "4.1.10", + "@umijs/bundler-vite": "4.1.10", + "@umijs/bundler-webpack": "4.1.10", + "@umijs/core": "4.1.10", + "@umijs/did-you-know": "1.0.3", + "@umijs/es-module-parser": "0.0.7", + "@umijs/history": "5.3.1", + "@umijs/mfsu": "4.1.10", + "@umijs/plugin-run": "4.1.10", + "@umijs/renderer-react": "4.1.10", + "@umijs/server": "4.1.10", + "@umijs/ui": "3.0.1", + "@umijs/utils": "4.1.10", + "@umijs/zod2ts": "4.1.10", + "babel-plugin-dynamic-import-node": "2.3.3", + "click-to-react-component": "^1.0.8", + "core-js": "3.34.0", + "current-script-polyfill": "1.0.0", + "enhanced-resolve": "5.9.3", + "fast-glob": "3.2.12", + "html-webpack-plugin": "5.5.0", + "less-plugin-resolve": "1.0.2", + "path-to-regexp": "1.7.0", + "postcss": "^8.4.21", + "postcss-prefix-selector": "1.16.0", + "react": "18.1.0", + "react-dom": "18.1.0", + "react-router": "6.3.0", + "react-router-dom": "6.3.0", + "regenerator-runtime": "0.13.11" + }, + "dependencies": { + "enhanced-resolve": { + "version": "5.9.3", + "dev": true, + "requires": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + } + }, + "regenerator-runtime": { + "version": "0.13.11", + "dev": true + } + } + }, + "@umijs/react-refresh-webpack-plugin": { + "version": "0.5.11", + "dev": true, + "requires": { + "ansi-html-community": "^0.0.8", + "common-path-prefix": "^3.0.0", + "core-js-pure": "^3.23.3", + "error-stack-parser": "^2.0.6", + "find-up": "^5.0.0", + "html-entities": "^2.1.0", + "loader-utils": "^2.0.4", + "schema-utils": "^3.0.0", + "source-map": "^0.7.3" + } + }, + "@umijs/renderer-react": { + "version": "4.1.10", + "dev": true, + "requires": { + "@babel/runtime": "7.23.6", + "@loadable/component": "5.15.2", + "history": "5.3.0", + "react-helmet-async": "1.3.0", + "react-router-dom": "6.3.0" + }, + "dependencies": { + "@babel/runtime": { + "version": "7.23.6", + "dev": true, + "requires": { + "regenerator-runtime": "^0.14.0" + } + } + } + }, + "@umijs/server": { + "version": "4.1.10", + "dev": true, + "requires": { + "@umijs/bundler-utils": "4.1.10", + "history": "5.3.0", + "react": "18.1.0", + "react-dom": "18.1.0", + "react-router-dom": "6.3.0" + } + }, + "@umijs/test": { + "version": "4.1.10", + "dev": true, + "requires": { + "@babel/plugin-transform-modules-commonjs": "7.23.3", + "@jest/types": "27.5.1", + "@umijs/bundler-utils": "4.1.10", + "@umijs/utils": "4.1.10", + "babel-jest": "^29.7.0", + "esbuild": "0.17.19", + "identity-obj-proxy": "3.0.0", + "isomorphic-unfetch": "4.0.2" + } + }, + "@umijs/ui": { + "version": "3.0.1", + "dev": true + }, + "@umijs/utils": { + "version": "4.1.10", + "dev": true, + "requires": { + "chokidar": "3.5.3", + "pino": "7.11.0" + } + }, + "@umijs/zod2ts": { + "version": "4.1.10", + "dev": true + }, + "@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/@ungap/structured-clone/download/@ungap/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true, + "peer": true + }, + "@vitejs/plugin-react": { + "version": "4.0.0", + "dev": true, + "requires": { + "@babel/core": "^7.21.4", + "@babel/plugin-transform-react-jsx-self": "^7.21.0", + "@babel/plugin-transform-react-jsx-source": "^7.19.6", + "react-refresh": "^0.14.0" + } + }, + "@webassemblyjs/ast": { + "version": "1.12.1", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.12.1.tgz", + "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/floating-point-hex-parser/download/@webassemblyjs/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==", + "dev": true, + "peer": true + }, + "@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-api-error/download/@webassemblyjs/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==", + "dev": true, + "peer": true + }, + "@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-buffer/download/@webassemblyjs/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==", + "dev": true, + "peer": true + }, + "@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-numbers/download/@webassemblyjs/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-wasm-bytecode/download/@webassemblyjs/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==", + "dev": true, + "peer": true + }, + "@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/helper-wasm-section/download/@webassemblyjs/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" + }, + "dependencies": { + "@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + } + } + }, + "@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ieee754/download/@webassemblyjs/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dev": true, + "peer": true, + "requires": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/leb128/download/@webassemblyjs/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dev": true, + "peer": true, + "requires": { + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/utf8/download/@webassemblyjs/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==", + "dev": true, + "peer": true + }, + "@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-edit/download/@webassemblyjs/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" + }, + "dependencies": { + "@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-parser/download/@webassemblyjs/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + } + } + }, + "@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-gen/download/@webassemblyjs/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + }, + "dependencies": { + "@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + } + } + }, + "@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-opt/download/@webassemblyjs/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" + }, + "dependencies": { + "@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-parser/download/@webassemblyjs/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + } + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.12.1", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wasm-parser/download/@webassemblyjs/wasm-parser-1.12.1.tgz", + "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/wast-printer/download/@webassemblyjs/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" + }, + "dependencies": { + "@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npm.alibaba-inc.com/@webassemblyjs/ast/download/@webassemblyjs/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dev": true, + "peer": true, + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + } + } + }, + "@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/@xtuc/ieee754/download/@xtuc/ieee754-1.2.0.tgz", + "integrity": "sha1-7vAUoxRa5Hehy8AM0eVSM23Ot5A=", + "dev": true, + "peer": true + }, + "@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npm.alibaba-inc.com/@xtuc/long/download/@xtuc/long-4.2.2.tgz", + "integrity": "sha1-0pHGpOl5ibXGHZrPOWrk/hM6cY0=", + "dev": true, + "peer": true + }, + "acorn": { + "version": "8.11.3", + "dev": true + }, + "acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npm.alibaba-inc.com/acorn-import-assertions/download/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "dev": true, + "peer": true, + "requires": {} + }, + "acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npm.alibaba-inc.com/acorn-jsx/download/acorn-jsx-5.3.2.tgz", + "integrity": "sha1-ftW7VZCLOy8bxVxq8WU7rafweTc=", + "dev": true, + "peer": true, + "requires": {} + }, + "acorn-walk": { + "version": "8.3.2", + "dev": true + }, + "agent-base": { + "version": "4.3.0", + "dev": true, + "requires": { + "es6-promisify": "^5.0.0" + } + }, + "agentkeepalive": { + "version": "3.5.2", + "dev": true, + "requires": { + "humanize-ms": "^1.2.1" + } + }, + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npm.alibaba-inc.com/ajv/download/ajv-6.12.6.tgz", + "integrity": "sha1-uvWmLoArB9l3A0WG+MO69a3ybfQ=", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "3.5.2", + "dev": true, + "requires": {} + }, + "animated-scroll-to": { + "version": "2.3.0", + "dev": true + }, + "ansi-align": { + "version": "2.0.0", + "dev": true, + "requires": { + "string-width": "^2.0.0" + } + }, + "ansi-escapes": { + "version": "3.2.0", + "dev": true + }, + "ansi-html-community": { + "version": "0.0.8", + "dev": true + }, + "ansi-regex": { + "version": "4.1.1", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "antd": { + "version": "5.16.5", + "requires": { + "@ant-design/colors": "^7.0.2", + "@ant-design/cssinjs": "^1.18.5", + "@ant-design/icons": "^5.3.6", + "@ant-design/react-slick": "~1.1.2", + "@babel/runtime": "^7.24.4", + "@ctrl/tinycolor": "^3.6.1", + "@rc-component/color-picker": "~1.5.3", + "@rc-component/mutate-observer": "^1.1.0", + "@rc-component/tour": "~1.14.2", + "@rc-component/trigger": "^2.1.1", + "classnames": "^2.5.1", + "copy-to-clipboard": "^3.3.3", + "dayjs": "^1.11.10", + "qrcode.react": "^3.1.0", + "rc-cascader": "~3.24.1", + "rc-checkbox": "~3.2.0", + "rc-collapse": "~3.7.3", + "rc-dialog": "~9.4.0", + "rc-drawer": "~7.1.0", + "rc-dropdown": "~4.2.0", + "rc-field-form": "~1.44.0", + "rc-image": "~7.6.0", + "rc-input": "~1.4.5", + "rc-input-number": "~9.0.0", + "rc-mentions": "~2.11.1", + "rc-menu": "~9.13.0", + "rc-motion": "^2.9.0", + "rc-notification": "~5.4.0", + "rc-pagination": "~4.0.4", + "rc-picker": "~4.4.2", + "rc-progress": "~4.0.0", + "rc-rate": "~2.12.0", + "rc-resize-observer": "^1.4.0", + "rc-segmented": "~2.3.0", + "rc-select": "~14.13.1", + "rc-slider": "~10.6.2", + "rc-steps": "~6.0.1", + "rc-switch": "~4.1.0", + "rc-table": "~7.45.4", + "rc-tabs": "~14.1.1", + "rc-textarea": "~1.6.3", + "rc-tooltip": "~6.2.0", + "rc-tree": "~5.8.5", + "rc-tree-select": "~5.19.0", + "rc-upload": "~4.5.2", + "rc-util": "^5.39.1", + "scroll-into-view-if-needed": "^3.1.0", + "throttle-debounce": "^5.0.0" + } + }, + "any-promise": { + "version": "1.3.0", + "dev": true + }, + "anymatch": { + "version": "3.1.3", + "dev": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "aproba": { + "version": "1.2.0", + "dev": true + }, + "arg": { + "version": "5.0.2", + "dev": true + }, + "argparse": { + "version": "2.0.1", + "dev": true + }, + "aria-hidden": { + "version": "1.2.4", + "dev": true, + "requires": { + "tslib": "^2.0.0" + } + }, + "array-buffer-byte-length": { + "version": "1.0.1", + "dev": true, + "requires": { + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" + } + }, + "array-ify": { + "version": "1.0.0", + "dev": true + }, + "array-includes": { + "version": "3.1.8", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "is-string": "^1.0.7" + } + }, + "array-tree-filter": { + "version": "2.1.0" + }, + "array-union": { + "version": "2.1.0", + "dev": true + }, + "array.prototype.flat": { + "version": "1.3.2", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + } + }, + "array.prototype.flatmap": { + "version": "1.3.2", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + } + }, + "array.prototype.reduce": { + "version": "1.0.7", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-array-method-boxes-properly": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "is-string": "^1.0.7" + } + }, + "array.prototype.tosorted": { + "version": "1.1.3", + "dev": true, + "requires": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.1.0", + "es-shim-unscopables": "^1.0.2" + } + }, + "arraybuffer.prototype.slice": { + "version": "1.0.3", + "dev": true, + "requires": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" + } + }, + "arrify": { + "version": "1.0.1", + "dev": true + }, + "asn1.js": { + "version": "4.10.1", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + }, + "dependencies": { + "bn.js": { + "version": "4.12.0", + "dev": true + } + } + }, + "assert": { + "version": "1.5.1", + "dev": true, + "requires": { + "object.assign": "^4.1.4", + "util": "^0.10.4" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "dev": true + }, + "util": { + "version": "0.10.4", + "dev": true, + "requires": { + "inherits": "2.0.3" + } + } + } + }, + "astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/astral-regex/download/astral-regex-2.0.0.tgz", + "integrity": "sha1-SDFDxWeu7UeFdZwIZXhtx319LjE=", + "dev": true, + "peer": true + }, + "astring": { + "version": "1.8.6", + "dev": true + }, + "async-validator": { + "version": "4.2.5" + }, + "atob": { + "version": "2.1.2", + "dev": true + }, + "atomic-sleep": { + "version": "1.0.0", + "dev": true + }, + "autoprefixer": { + "version": "10.4.19", + "dev": true, + "requires": { + "browserslist": "^4.23.0", + "caniuse-lite": "^1.0.30001599", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + } + }, + "available-typed-arrays": { + "version": "1.0.7", + "dev": true, + "requires": { + "possible-typed-array-names": "^1.0.0" + } + }, + "axios": { + "version": "0.18.1", + "dev": true, + "requires": { + "follow-redirects": "1.5.10", + "is-buffer": "^2.0.2" + } + }, + "babel-jest": { + "version": "29.7.0", + "dev": true, + "requires": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + } + }, + "babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "dev": true, + "requires": { + "object.assign": "^4.1.0" + } + }, + "babel-plugin-istanbul": { + "version": "6.1.1", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + } + }, + "babel-plugin-jest-hoist": { + "version": "29.6.3", + "dev": true, + "requires": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + } + }, + "babel-preset-current-node-syntax": { + "version": "1.0.1", + "dev": true, + "requires": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + } + }, + "babel-preset-jest": { + "version": "29.6.3", + "dev": true, + "requires": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + } + }, + "bail": { + "version": "2.0.2", + "dev": true + }, + "balanced-match": { + "version": "1.0.2", + "dev": true + }, + "base64-js": { + "version": "1.5.1", + "dev": true + }, + "big-integer": { + "version": "1.6.52", + "dev": true + }, + "big.js": { + "version": "5.2.2", + "dev": true + }, + "binary-extensions": { + "version": "2.3.0", + "dev": true + }, + "binaryextensions": { + "version": "2.3.0", + "dev": true + }, + "bl": { + "version": "1.2.3", + "dev": true, + "requires": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "bluebird": { + "version": "3.7.2", + "dev": true + }, + "bn.js": { + "version": "5.2.1", + "dev": true + }, + "boolbase": { + "version": "1.0.0", + "dev": true + }, + "boxen": { + "version": "1.3.0", + "dev": true, + "requires": { + "ansi-align": "^2.0.0", + "camelcase": "^4.0.0", + "chalk": "^2.0.1", + "cli-boxes": "^1.0.0", + "string-width": "^2.0.0", + "term-size": "^1.2.0", + "widest-line": "^2.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "camelcase": { + "version": "4.1.0", + "dev": true + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "bplist-parser": { + "version": "0.2.0", + "dev": true, + "requires": { + "big-integer": "^1.6.44" + } + }, + "brace-expansion": { + "version": "1.1.11", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "3.0.2", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "brorand": { + "version": "1.1.0", + "dev": true + }, + "browserify-aes": { + "version": "1.2.0", + "dev": true, + "requires": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "browserify-cipher": { + "version": "1.0.1", + "dev": true, + "requires": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "browserify-des": { + "version": "1.0.2", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "browserify-rsa": { + "version": "4.1.0", + "dev": true, + "requires": { + "bn.js": "^5.0.0", + "randombytes": "^2.0.1" + } + }, + "browserify-sign": { + "version": "4.2.3", + "dev": true, + "requires": { + "bn.js": "^5.2.1", + "browserify-rsa": "^4.1.0", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.5.5", + "hash-base": "~3.0", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.7", + "readable-stream": "^2.3.8", + "safe-buffer": "^5.2.1" + } + }, + "browserify-zlib": { + "version": "0.2.0", + "dev": true, + "requires": { + "pako": "~1.0.5" + } + }, + "browserslist": { + "version": "4.23.0", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" + } + }, + "bser": { + "version": "2.1.1", + "dev": true, + "requires": { + "node-int64": "^0.4.0" + } + }, + "buffer": { + "version": "4.9.2", + "dev": true, + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "buffer-alloc": { + "version": "1.2.0", + "dev": true, + "requires": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "buffer-alloc-unsafe": { + "version": "1.1.0", + "dev": true + }, + "buffer-fill": { + "version": "1.0.0", + "dev": true + }, + "buffer-from": { + "version": "1.1.2", + "dev": true + }, + "buffer-xor": { + "version": "1.0.3", + "dev": true + }, + "builtin-status-codes": { + "version": "3.0.0", + "dev": true + }, + "builtins": { + "version": "1.0.3", + "dev": true + }, + "bundle-name": { + "version": "3.0.0", + "dev": true, + "requires": { + "run-applescript": "^5.0.0" + } + }, + "cacache": { + "version": "9.3.0", + "dev": true, + "requires": { + "bluebird": "^3.5.0", + "chownr": "^1.0.1", + "glob": "^7.1.2", + "graceful-fs": "^4.1.11", + "lru-cache": "^4.1.1", + "mississippi": "^1.3.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.1", + "ssri": "^4.1.6", + "unique-filename": "^1.1.0", + "y18n": "^3.2.1" + }, + "dependencies": { + "lru-cache": { + "version": "4.1.5", + "dev": true, + "requires": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "yallist": { + "version": "2.1.2", + "dev": true + } + } + }, + "call-bind": { + "version": "1.0.7", + "dev": true, + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + } + }, + "callsites": { + "version": "3.1.0", + "dev": true + }, + "camel-case": { + "version": "4.1.2", + "dev": true, + "requires": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "camelcase": { + "version": "5.3.1", + "dev": true + }, + "camelcase-keys": { + "version": "6.2.2", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "map-obj": "^4.0.0", + "quick-lru": "^4.0.1" + } + }, + "camelize": { + "version": "1.0.1" + }, + "caniuse-lite": { + "version": "1.0.30001614", + "dev": true + }, + "capture-stack-trace": { + "version": "1.0.2", + "dev": true + }, + "ccount": { + "version": "2.0.1", + "dev": true + }, + "chalk": { + "version": "4.1.2", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "character-entities": { + "version": "2.0.2", + "dev": true + }, + "character-entities-html4": { + "version": "2.1.0", + "dev": true + }, + "character-entities-legacy": { + "version": "3.0.0", + "dev": true + }, + "character-reference-invalid": { + "version": "2.0.1", + "dev": true + }, + "chardet": { + "version": "0.7.0", + "dev": true + }, + "chokidar": { + "version": "3.5.3", + "dev": true, + "requires": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + } + }, + "chownr": { + "version": "1.1.4", + "dev": true + }, + "chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npm.alibaba-inc.com/chrome-trace-event/download/chrome-trace-event-1.0.3.tgz", + "integrity": "sha1-EBXs7UdB4V0GZkqVfbv1DQQeJqw=", + "dev": true, + "peer": true + }, + "ci-info": { + "version": "3.9.0", + "dev": true + }, + "cipher-base": { + "version": "1.0.4", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "classnames": { + "version": "2.5.1" + }, + "clean-css": { + "version": "5.3.3", + "dev": true, + "requires": { + "source-map": "~0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "dev": true + } + } + }, + "cli-boxes": { + "version": "1.0.0", + "dev": true + }, + "cli-cursor": { + "version": "2.1.0", + "dev": true, + "requires": { + "restore-cursor": "^2.0.0" + } + }, + "cli-spinners": { + "version": "1.3.1", + "dev": true + }, + "cli-truncate": { + "version": "3.1.0", + "dev": true, + "requires": { + "slice-ansi": "^5.0.0", + "string-width": "^5.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "dev": true + }, + "string-width": { + "version": "5.1.2", + "dev": true, + "requires": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + } + }, + "strip-ansi": { + "version": "7.1.0", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "cli-width": { + "version": "2.2.1", + "dev": true + }, + "click-to-react-component": { + "version": "1.1.0", + "dev": true, + "requires": { + "@floating-ui/react-dom-interactions": "^0.3.1", + "htm": "^3.1.0", + "react-merge-refs": "^1.1.0" + } + }, + "cliui": { + "version": "8.0.1", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true + }, + "string-width": { + "version": "4.2.3", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "wrap-ansi": { + "version": "7.0.0", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + } + } + }, + "coa": { + "version": "2.0.2", + "dev": true, + "requires": { + "@types/q": "^1.5.1", + "chalk": "^2.4.1", + "q": "^1.1.2" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "codesandbox": { + "version": "2.2.3", + "dev": true, + "requires": { + "axios": "^0.18.1", + "chalk": "^2.4.1", + "codesandbox-import-util-types": "^2.2.3", + "codesandbox-import-utils": "^2.2.3", + "commander": "^2.9.0", + "datauri": "^3.0.0", + "filesize": "^3.6.1", + "fs-extra": "^3.0.1", + "git-branch": "^1.0.0", + "git-repo-name": "^0.6.0", + "git-username": "^0.5.0", + "humps": "^2.0.1", + "inquirer": "^6.2.2", + "lodash": "^4.17.5", + "lz-string": "^1.4.4", + "ms": "^2.0.0", + "open": "^6.3.0", + "ora": "^1.3.0", + "pacote": "^2.7.36", + "shortid": "^2.2.8", + "update-notifier": "^2.2.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "fs-extra": { + "version": "3.0.1", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "jsonfile": "^3.0.0", + "universalify": "^0.1.0" + } + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "jsonfile": { + "version": "3.0.1", + "dev": true, + "requires": { + "graceful-fs": "^4.1.6" + } + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "universalify": { + "version": "0.1.2", + "dev": true + } + } + }, + "codesandbox-import-util-types": { + "version": "2.2.3", + "dev": true + }, + "codesandbox-import-utils": { + "version": "2.2.3", + "dev": true, + "requires": { + "codesandbox-import-util-types": "^2.2.3", + "istextorbinary": "^2.2.1", + "lz-string": "^1.4.4" + } + }, + "color": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.3", + "color-string": "^1.6.0" + }, + "dependencies": { + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + } + } + }, + "color-convert": { + "version": "2.0.1", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "dev": true + }, + "color-string": { + "version": "1.9.1", + "dev": true, + "requires": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "colord": { + "version": "2.9.3", + "resolved": "https://registry.npm.alibaba-inc.com/colord/download/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "dev": true, + "peer": true + }, + "colorette": { + "version": "2.0.20", + "dev": true + }, + "comlink": { + "version": "4.4.1", + "dev": true + }, + "comma-separated-tokens": { + "version": "2.0.3", + "dev": true + }, + "commander": { + "version": "2.20.3", + "dev": true + }, + "common-path-prefix": { + "version": "3.0.0", + "dev": true + }, + "compare-func": { + "version": "2.0.0", + "dev": true, + "requires": { + "array-ify": "^1.0.0", + "dot-prop": "^5.1.0" + } + }, + "compute-scroll-into-view": { + "version": "3.1.0" + }, + "concat-map": { + "version": "0.0.1", + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "configstore": { + "version": "3.1.5", + "dev": true, + "requires": { + "dot-prop": "^4.2.1", + "graceful-fs": "^4.1.2", + "make-dir": "^1.0.0", + "unique-string": "^1.0.0", + "write-file-atomic": "^2.0.0", + "xdg-basedir": "^3.0.0" + }, + "dependencies": { + "dot-prop": { + "version": "4.2.1", + "dev": true, + "requires": { + "is-obj": "^1.0.0" + } + }, + "is-obj": { + "version": "1.0.1", + "dev": true + }, + "make-dir": { + "version": "1.3.0", + "dev": true, + "requires": { + "pify": "^3.0.0" + } + }, + "pify": { + "version": "3.0.0", + "dev": true + }, + "write-file-atomic": { + "version": "2.4.3", + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.2" + } + } + } + }, + "console-browserify": { + "version": "1.2.0", + "dev": true + }, + "constants-browserify": { + "version": "1.0.0", + "dev": true + }, + "conventional-changelog-angular": { + "version": "6.0.0", + "dev": true, + "requires": { + "compare-func": "^2.0.0" + } + }, + "conventional-changelog-conventionalcommits": { + "version": "6.1.0", + "dev": true, + "requires": { + "compare-func": "^2.0.0" + } + }, + "conventional-commits-parser": { + "version": "4.0.0", + "dev": true, + "requires": { + "is-text-path": "^1.0.1", + "JSONStream": "^1.3.5", + "meow": "^8.1.2", + "split2": "^3.2.2" + } + }, + "convert-source-map": { + "version": "2.0.0", + "dev": true + }, + "copy-anything": { + "version": "2.0.6", + "dev": true, + "requires": { + "is-what": "^3.14.1" + } + }, + "copy-concurrently": { + "version": "1.0.5", + "dev": true, + "requires": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + } + }, + "copy-to-clipboard": { + "version": "3.3.3", + "requires": { + "toggle-selection": "^1.0.6" + } + }, + "core-js": { + "version": "3.34.0", + "dev": true + }, + "core-js-pure": { + "version": "3.37.0", + "dev": true + }, + "core-util-is": { + "version": "1.0.3", + "dev": true + }, + "cors": { + "version": "2.8.5", + "dev": true, + "requires": { + "object-assign": "^4", + "vary": "^1" + } + }, + "cosmiconfig": { + "version": "8.3.6", + "dev": true, + "requires": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + } + }, + "cosmiconfig-typescript-loader": { + "version": "4.4.0", + "dev": true, + "requires": {} + }, + "create-ecdh": { + "version": "4.0.4", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "elliptic": "^6.5.3" + }, + "dependencies": { + "bn.js": { + "version": "4.12.0", + "dev": true + } + } + }, + "create-error-class": { + "version": "3.0.2", + "dev": true, + "requires": { + "capture-stack-trace": "^1.0.0" + } + }, + "create-hash": { + "version": "1.2.0", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "create-hmac": { + "version": "1.1.7", + "dev": true, + "requires": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "create-require": { + "version": "1.1.1", + "dev": true + }, + "cross-spawn": { + "version": "7.0.3", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "crypto-browserify": { + "version": "3.12.0", + "dev": true, + "requires": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + } + }, + "crypto-random-string": { + "version": "1.0.0", + "dev": true + }, + "css": { + "version": "3.0.0", + "dev": true, + "requires": { + "inherits": "^2.0.4", + "source-map": "^0.6.1", + "source-map-resolve": "^0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "dev": true + } + } + }, + "css-blank-pseudo": { + "version": "3.0.3", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.9" + } + }, + "css-color-keywords": { + "version": "1.0.0" + }, + "css-functions-list": { + "version": "3.2.1", + "resolved": "https://registry.npm.alibaba-inc.com/css-functions-list/download/css-functions-list-3.2.1.tgz", + "integrity": "sha512-Nj5YcaGgBtuUmn1D7oHqPW0c9iui7xsTsj5lIX8ZgevdfhmjFfKB3r8moHJtNJnctnYXJyYX5I1pp90HM4TPgQ==", + "dev": true, + "peer": true + }, + "css-has-pseudo": { + "version": "3.0.4", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.9" + } + }, + "css-loader": { + "version": "6.7.1", + "dev": true, + "requires": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.7", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.0", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.5" + } + }, + "css-prefers-color-scheme": { + "version": "6.0.3", + "dev": true, + "requires": {} + }, + "css-select": { + "version": "2.1.0", + "dev": true, + "requires": { + "boolbase": "^1.0.0", + "css-what": "^3.2.1", + "domutils": "^1.7.0", + "nth-check": "^1.0.2" + }, + "dependencies": { + "dom-serializer": { + "version": "0.2.2", + "dev": true, + "requires": { + "domelementtype": "^2.0.1", + "entities": "^2.0.0" + } + }, + "domutils": { + "version": "1.7.0", + "dev": true, + "requires": { + "dom-serializer": "0", + "domelementtype": "1" + }, + "dependencies": { + "domelementtype": { + "version": "1.3.1", + "dev": true + } + } + }, + "entities": { + "version": "2.2.0", + "dev": true + } + } + }, + "css-select-base-adapter": { + "version": "0.1.1", + "dev": true + }, + "css-to-react-native": { + "version": "3.2.0", + "requires": { + "camelize": "^1.0.0", + "css-color-keywords": "^1.0.0", + "postcss-value-parser": "^4.0.2" + } + }, + "css-tree": { + "version": "1.0.0-alpha.37", + "dev": true, + "requires": { + "mdn-data": "2.0.4", + "source-map": "^0.6.1" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "dev": true + } + } + }, + "css-what": { + "version": "3.4.2", + "dev": true + }, + "cssdb": { + "version": "6.6.3", + "dev": true + }, + "cssesc": { + "version": "3.0.0", + "dev": true + }, + "csso": { + "version": "4.2.0", + "dev": true, + "requires": { + "css-tree": "^1.1.2" + }, + "dependencies": { + "css-tree": { + "version": "1.1.3", + "dev": true, + "requires": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + } + }, + "mdn-data": { + "version": "2.0.14", + "dev": true + }, + "source-map": { + "version": "0.6.1", + "dev": true + } + } + }, + "csstype": { + "version": "3.1.3" + }, + "current-script-polyfill": { + "version": "1.0.0", + "dev": true + }, + "cwd": { + "version": "0.9.1", + "dev": true, + "requires": { + "find-pkg": "^0.1.0" + } + }, + "cyclist": { + "version": "1.0.2", + "dev": true + }, + "dargs": { + "version": "7.0.0", + "dev": true + }, + "data-uri-to-buffer": { + "version": "4.0.1", + "dev": true + }, + "data-view-buffer": { + "version": "1.0.1", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + } + }, + "data-view-byte-length": { + "version": "1.0.1", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + } + }, + "data-view-byte-offset": { + "version": "1.0.0", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + } + }, + "datauri": { + "version": "3.0.0", + "dev": true, + "requires": { + "image-size": "0.8.3", + "mimer": "1.1.0" + } + }, + "dayjs": { + "version": "1.11.11" + }, + "debug": { + "version": "3.1.0", + "dev": true, + "requires": { + "ms": "2.0.0" + }, + "dependencies": { + "ms": { + "version": "2.0.0", + "dev": true + } + } + }, + "decamelize": { + "version": "1.2.0", + "dev": true + }, + "decamelize-keys": { + "version": "1.1.1", + "dev": true, + "requires": { + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" + }, + "dependencies": { + "map-obj": { + "version": "1.0.1", + "dev": true + } + } + }, + "decode-named-character-reference": { + "version": "1.0.2", + "dev": true, + "requires": { + "character-entities": "^2.0.0" + } + }, + "decode-uri-component": { + "version": "0.2.2", + "dev": true + }, + "deep-extend": { + "version": "0.6.0", + "dev": true + }, + "deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npm.alibaba-inc.com/deep-is/download/deep-is-0.1.4.tgz", + "integrity": "sha1-pvLc5hL63S7x9Rm3NVHxfoUZmDE=", + "dev": true, + "peer": true + }, + "deep-rename-keys": { + "version": "0.2.1", + "dev": true, + "requires": { + "kind-of": "^3.0.2", + "rename-keys": "^1.1.2" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "deepmerge": { + "version": "4.3.1", + "dev": true + }, + "default-browser": { + "version": "4.0.0", + "dev": true, + "requires": { + "bundle-name": "^3.0.0", + "default-browser-id": "^3.0.0", + "execa": "^7.1.1", + "titleize": "^3.0.0" + }, + "dependencies": { + "execa": { + "version": "7.2.0", + "dev": true, + "requires": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.1", + "human-signals": "^4.3.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^3.0.7", + "strip-final-newline": "^3.0.0" + } + }, + "human-signals": { + "version": "4.3.1", + "dev": true + }, + "is-stream": { + "version": "3.0.0", + "dev": true + }, + "mimic-fn": { + "version": "4.0.0", + "dev": true + }, + "npm-run-path": { + "version": "5.3.0", + "dev": true, + "requires": { + "path-key": "^4.0.0" + } + }, + "onetime": { + "version": "6.0.0", + "dev": true, + "requires": { + "mimic-fn": "^4.0.0" + } + }, + "path-key": { + "version": "4.0.0", + "dev": true + }, + "strip-final-newline": { + "version": "3.0.0", + "dev": true + } + } + }, + "default-browser-id": { + "version": "3.0.0", + "dev": true, + "requires": { + "bplist-parser": "^0.2.0", + "untildify": "^4.0.0" + } + }, + "define-data-property": { + "version": "1.1.4", + "dev": true, + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, + "define-lazy-prop": { + "version": "3.0.0", + "dev": true + }, + "define-properties": { + "version": "1.2.1", + "dev": true, + "requires": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + } + }, + "dequal": { + "version": "2.0.3", + "dev": true + }, + "des.js": { + "version": "1.1.0", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "detect-indent": { + "version": "7.0.1", + "dev": true + }, + "detect-libc": { + "version": "1.0.3", + "dev": true + }, + "detect-newline": { + "version": "4.0.1", + "dev": true + }, + "detect-node": { + "version": "2.1.0", + "dev": true + }, + "diff": { + "version": "4.0.2", + "dev": true + }, + "diffie-hellman": { + "version": "5.0.3", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + }, + "dependencies": { + "bn.js": { + "version": "4.12.0", + "dev": true + } + } + }, + "dir-glob": { + "version": "3.0.1", + "dev": true, + "requires": { + "path-type": "^4.0.0" + } + }, + "doctrine": { + "version": "2.1.0", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "dom-converter": { + "version": "0.2.0", + "dev": true, + "requires": { + "utila": "~0.4" + } + }, + "dom-serializer": { + "version": "2.0.0", + "dev": true, + "requires": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + } + }, + "domain-browser": { + "version": "1.2.0", + "dev": true + }, + "domelementtype": { + "version": "2.3.0", + "dev": true + }, + "domhandler": { + "version": "5.0.3", + "dev": true, + "requires": { + "domelementtype": "^2.3.0" + } + }, + "domutils": { + "version": "3.1.0", + "dev": true, + "requires": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + } + }, + "dot-case": { + "version": "3.0.4", + "dev": true, + "requires": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "dot-prop": { + "version": "5.3.0", + "dev": true, + "requires": { + "is-obj": "^2.0.0" + } + }, + "dumi": { + "version": "2.3.2", + "dev": true, + "requires": { + "@ant-design/icons-svg": "^4.2.1", + "@makotot/ghostui": "^2.0.0", + "@stackblitz/sdk": "^1.9.0", + "@swc/core": "1.4.2", + "@types/hast": "^2.3.5", + "@types/mdast": "^3.0.12", + "@umijs/bundler-utils": "^4.0.84", + "@umijs/core": "^4.0.84", + "@umijs/utils": "^4.0.84", + "animated-scroll-to": "^2.3.0", + "classnames": "2.3.2", + "codesandbox": "^2.2.3", + "comlink": "^4.4.1", + "copy-to-clipboard": "^3.3.3", + "deepmerge": "^4.3.1", + "dumi-afx-deps": "^1.0.0-alpha.19", + "dumi-assets-types": "2.3.0", + "enhanced-resolve": "^5.15.0", + "estree-util-to-js": "^1.2.0", + "estree-util-visit": "^1.2.1", + "file-system-cache": "^2.4.3", + "github-slugger": "^1.5.0", + "hast-util-is-element": "^2.1.3", + "hast-util-raw": "^8.0.0", + "hast-util-to-estree": "^2.3.3", + "hast-util-to-string": "^2.0.0", + "heti": "^0.9.4", + "hosted-git-info": "^6.1.1", + "html-to-text": "^9.0.5", + "html2sketch": "^1.0.2", + "js-yaml": "^4.1.0", + "lodash.throttle": "^4.1.1", + "mdast-util-find-and-replace": "^2.2.2", + "mdast-util-to-string": "^3.2.0", + "nprogress": "^0.2.0", + "pluralize": "^8.0.0", + "prism-react-renderer": "^1.3.5", + "prism-themes": "^1.9.0", + "prismjs": "^1.29.0", + "raw-loader": "^4.0.2", + "rc-motion": "^2.7.3", + "rc-tabs": "^12.10.0", + "rc-tooltip": "^6.1.3", + "rc-tree": "^5.7.9", + "rc-util": "^5.38.0", + "react-copy-to-clipboard": "^5.1.0", + "react-error-boundary": "^4.0.10", + "react-intl": "^6.4.4", + "react-loading-skeleton": "^3.1.1", + "react-simple-code-editor": "^0.13.1", + "rehype-autolink-headings": "^6.1.1", + "rehype-remove-comments": "^5.0.0", + "rehype-stringify": "^9.0.3", + "remark-directive": "^2.0.1", + "remark-frontmatter": "^4.0.1", + "remark-gfm": "^3.0.1", + "remark-parse": "^10.0.2", + "remark-rehype": "^10.1.0", + "sass": "^1.64.1", + "sitemap": "^7.1.1", + "sucrase": "^3.34.0", + "umi": "^4.0.84", + "unified": "^10.1.2", + "unist-util-visit": "^4.1.2", + "unist-util-visit-parents": "^5.1.3", + "url": "^0.11.1", + "v8-compile-cache": "2.3.0", + "vfile": "^5.3.7" + }, + "dependencies": { + "@rc-component/trigger": { + "version": "1.18.3", + "dev": true, + "requires": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.38.0" + } + }, + "classnames": { + "version": "2.3.2", + "dev": true + }, + "rc-dropdown": { + "version": "4.1.0", + "dev": true, + "requires": { + "@babel/runtime": "^7.18.3", + "@rc-component/trigger": "^1.7.0", + "classnames": "^2.2.6", + "rc-util": "^5.17.0" + } + }, + "rc-menu": { + "version": "9.12.4", + "dev": true, + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^1.17.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + } + }, + "rc-tabs": { + "version": "12.15.0", + "dev": true, + "requires": { + "@babel/runtime": "^7.11.2", + "classnames": "2.x", + "rc-dropdown": "~4.1.0", + "rc-menu": "~9.12.0", + "rc-motion": "^2.6.2", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.34.1" + } + } + } + }, + "dumi-afx-deps": { + "version": "1.0.0-alpha.20", + "dev": true + }, + "dumi-assets-types": { + "version": "2.3.0", + "dev": true + }, + "duplexer3": { + "version": "0.1.5", + "dev": true + }, + "duplexify": { + "version": "3.7.1", + "dev": true, + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "eastasianwidth": { + "version": "0.2.0", + "dev": true + }, + "editions": { + "version": "2.3.1", + "dev": true, + "requires": { + "errlop": "^2.0.0", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "electron-to-chromium": { + "version": "1.4.751", + "dev": true + }, + "elliptic": { + "version": "6.5.5", + "dev": true, + "requires": { + "bn.js": "^4.11.9", + "brorand": "^1.1.0", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.1", + "inherits": "^2.0.4", + "minimalistic-assert": "^1.0.1", + "minimalistic-crypto-utils": "^1.0.1" + }, + "dependencies": { + "bn.js": { + "version": "4.12.0", + "dev": true + } + } + }, + "emoji-regex": { + "version": "9.2.2", + "dev": true + }, + "emojis-list": { + "version": "3.0.0", + "dev": true + }, + "encoding": { + "version": "0.1.13", + "dev": true, + "requires": { + "iconv-lite": "^0.6.2" + }, + "dependencies": { + "iconv-lite": { + "version": "0.6.3", + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + } + } + } + }, + "end-of-stream": { + "version": "1.4.4", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "enhanced-resolve": { + "version": "5.16.0", + "dev": true, + "requires": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + } + }, + "enquire.js": { + "version": "2.1.6", + "resolved": "https://registry.npm.alibaba-inc.com/enquire.js/download/enquire.js-2.1.6.tgz", + "integrity": "sha1-PoeAybi4NQhMP2DhZtvDwqPImBQ=" + }, + "entities": { + "version": "4.5.0", + "dev": true + }, + "err-code": { + "version": "1.1.2", + "dev": true + }, + "errlop": { + "version": "2.2.0", + "dev": true + }, + "errno": { + "version": "0.1.8", + "dev": true, + "optional": true, + "requires": { + "prr": "~1.0.1" + } + }, + "error-ex": { + "version": "1.3.2", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "error-stack-parser": { + "version": "2.1.4", + "dev": true, + "requires": { + "stackframe": "^1.3.4" + } + }, + "es-abstract": { + "version": "1.23.3", + "dev": true, + "requires": { + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "data-view-buffer": "^1.0.1", + "data-view-byte-length": "^1.0.1", + "data-view-byte-offset": "^1.0.0", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.0.3", + "es-to-primitive": "^1.2.1", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", + "globalthis": "^1.0.3", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "hasown": "^2.0.2", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.1", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.3", + "is-string": "^1.0.7", + "is-typed-array": "^1.1.13", + "is-weakref": "^1.0.2", + "object-inspect": "^1.13.1", + "object-keys": "^1.1.1", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.2", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.9", + "string.prototype.trimend": "^1.0.8", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.6", + "unbox-primitive": "^1.0.2", + "which-typed-array": "^1.1.15" + } + }, + "es-array-method-boxes-properly": { + "version": "1.0.0", + "dev": true + }, + "es-define-property": { + "version": "1.0.0", + "dev": true, + "requires": { + "get-intrinsic": "^1.2.4" + } + }, + "es-errors": { + "version": "1.3.0", + "dev": true + }, + "es-get-iterator": { + "version": "1.1.3", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.3", + "has-symbols": "^1.0.3", + "is-arguments": "^1.1.1", + "is-map": "^2.0.2", + "is-set": "^2.0.2", + "is-string": "^1.0.7", + "isarray": "^2.0.5", + "stop-iteration-iterator": "^1.0.0" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "dev": true + } + } + }, + "es-iterator-helpers": { + "version": "1.0.19", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "iterator.prototype": "^1.1.2", + "safe-array-concat": "^1.1.2" + } + }, + "es-module-lexer": { + "version": "1.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/es-module-lexer/download/es-module-lexer-1.4.1.tgz", + "integrity": "sha512-cXLGjP0c4T3flZJKQSuziYoq7MlT+rnvfZjfp7h+I7K9BNX54kP9nyWvdbwjQ4u1iWbOL4u96fgeZLToQlZC7w==", + "dev": true, + "peer": true + }, + "es-object-atoms": { + "version": "1.0.0", + "dev": true, + "requires": { + "es-errors": "^1.3.0" + } + }, + "es-set-tostringtag": { + "version": "2.0.3", + "dev": true, + "requires": { + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" + } + }, + "es-shim-unscopables": { + "version": "1.0.2", + "dev": true, + "requires": { + "hasown": "^2.0.0" + } + }, + "es-to-primitive": { + "version": "1.2.1", + "dev": true, + "requires": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + } + }, + "es5-imcompatible-versions": { + "version": "0.1.89", + "dev": true + }, + "es6-promise": { + "version": "4.2.8", + "dev": true + }, + "es6-promisify": { + "version": "5.0.0", + "dev": true, + "requires": { + "es6-promise": "^4.0.3" + } + }, + "esbuild": { + "version": "0.17.19", + "dev": true, + "requires": { + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" + } + }, + "escalade": { + "version": "3.1.2", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "dev": true + }, + "eslint": { + "version": "8.57.0", + "resolved": "https://registry.npm.alibaba-inc.com/eslint/download/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "dev": true, + "peer": true, + "requires": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/ansi-regex/download/ansi-regex-5.0.1.tgz", + "integrity": "sha1-CCyyyJyf6GWaMRpTvWpNxTAdswQ=", + "dev": true, + "peer": true + }, + "debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "requires": { + "ms": "2.1.2" + } + }, + "doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/doctrine/download/doctrine-3.0.0.tgz", + "integrity": "sha1-rd6+rXKmV023g2OdyHoSF3OXOWE=", + "dev": true, + "peer": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/escape-string-regexp/download/escape-string-regexp-4.0.0.tgz", + "integrity": "sha1-FLqDpdNz49MR5a/KKc9b+tllvzQ=", + "dev": true, + "peer": true + }, + "eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npm.alibaba-inc.com/eslint-scope/download/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "peer": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + } + }, + "eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npm.alibaba-inc.com/eslint-visitor-keys/download/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "peer": true + }, + "glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/glob-parent/download/glob-parent-6.0.2.tgz", + "integrity": "sha1-bSN9mQg5UMeSkPJMdkKj3poo+eM=", + "dev": true, + "peer": true, + "requires": { + "is-glob": "^4.0.3" + } + }, + "globals": { + "version": "13.24.0", + "resolved": "https://registry.npm.alibaba-inc.com/globals/download/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "peer": true, + "requires": { + "type-fest": "^0.20.2" + } + }, + "is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npm.alibaba-inc.com/is-path-inside/download/is-path-inside-3.0.3.tgz", + "integrity": "sha1-0jE2LlOgf/Kw4Op/7QSRYf/RYoM=", + "dev": true, + "peer": true + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-ansi/download/strip-ansi-6.0.1.tgz", + "integrity": "sha1-nibGPTD1NEPpSJSVshBdN7Z6hdk=", + "dev": true, + "peer": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npm.alibaba-inc.com/type-fest/download/type-fest-0.20.2.tgz", + "integrity": "sha1-G/IH9LKPkVg2ZstfvTJ4hzAc1fQ=", + "dev": true, + "peer": true + } + } + }, + "eslint-plugin-jest": { + "version": "27.2.3", + "dev": true, + "requires": { + "@typescript-eslint/utils": "^5.10.0" + } + }, + "eslint-plugin-react": { + "version": "7.33.2", + "dev": true, + "requires": { + "array-includes": "^3.1.6", + "array.prototype.flatmap": "^1.3.1", + "array.prototype.tosorted": "^1.1.1", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.12", + "estraverse": "^5.3.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.6", + "object.fromentries": "^2.0.6", + "object.hasown": "^1.1.2", + "object.values": "^1.1.6", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.4", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.8" + }, + "dependencies": { + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "eslint-plugin-react-hooks": { + "version": "4.6.0", + "dev": true, + "requires": {} + }, + "eslint-scope": { + "version": "5.1.1", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "dependencies": { + "estraverse": { + "version": "4.3.0", + "dev": true + } + } + }, + "eslint-visitor-keys": { + "version": "2.1.0", + "dev": true + }, + "espree": { + "version": "9.6.1", + "resolved": "https://registry.npm.alibaba-inc.com/espree/download/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "peer": true, + "requires": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npm.alibaba-inc.com/eslint-visitor-keys/download/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "peer": true + } + } + }, + "esprima": { + "version": "4.0.1", + "dev": true + }, + "esquery": { + "version": "1.5.0", + "resolved": "https://registry.npm.alibaba-inc.com/esquery/download/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "peer": true, + "requires": { + "estraverse": "^5.1.0" + } + }, + "esrecurse": { + "version": "4.3.0", + "dev": true, + "requires": { + "estraverse": "^5.2.0" + } + }, + "estraverse": { + "version": "5.3.0", + "dev": true + }, + "estree-util-attach-comments": { + "version": "2.1.1", + "dev": true, + "requires": { + "@types/estree": "^1.0.0" + } + }, + "estree-util-is-identifier-name": { + "version": "2.1.0", + "dev": true + }, + "estree-util-to-js": { + "version": "1.2.0", + "dev": true, + "requires": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + } + }, + "estree-util-visit": { + "version": "1.2.1", + "dev": true, + "requires": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^2.0.0" + } + }, + "esutils": { + "version": "2.0.3", + "dev": true + }, + "eventemitter3": { + "version": "5.0.1", + "dev": true + }, + "events": { + "version": "3.3.0", + "dev": true + }, + "evp_bytestokey": { + "version": "1.0.3", + "dev": true, + "requires": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "execa": { + "version": "5.1.1", + "dev": true, + "requires": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + } + }, + "expand-tilde": { + "version": "1.2.2", + "dev": true, + "requires": { + "os-homedir": "^1.0.1" + } + }, + "extend": { + "version": "3.0.2", + "dev": true + }, + "extend-shallow": { + "version": "2.0.1", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "external-editor": { + "version": "3.1.0", + "dev": true, + "requires": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + } + }, + "fast-deep-equal": { + "version": "3.1.3", + "dev": true + }, + "fast-glob": { + "version": "3.2.12", + "dev": true, + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + } + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npm.alibaba-inc.com/fast-levenshtein/download/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true, + "peer": true + }, + "fast-redact": { + "version": "3.5.0", + "dev": true + }, + "fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npm.alibaba-inc.com/fastest-levenshtein/download/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "dev": true, + "peer": true + }, + "fastq": { + "version": "1.17.1", + "dev": true, + "requires": { + "reusify": "^1.0.4" + } + }, + "fault": { + "version": "2.0.1", + "dev": true, + "requires": { + "format": "^0.2.0" + } + }, + "fb-watchman": { + "version": "2.0.2", + "dev": true, + "requires": { + "bser": "2.1.1" + } + }, + "fetch-blob": { + "version": "3.2.0", + "dev": true, + "requires": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + } + }, + "figures": { + "version": "2.0.0", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/file-entry-cache/download/file-entry-cache-6.0.1.tgz", + "integrity": "sha1-IRst2WWcsDlLBz5zI6w8kz1SICc=", + "dev": true, + "peer": true, + "requires": { + "flat-cache": "^3.0.4" + } + }, + "file-name": { + "version": "0.1.0", + "dev": true + }, + "file-system-cache": { + "version": "2.4.4", + "dev": true, + "requires": { + "@types/fs-extra": "11.0.1", + "@types/ramda": "0.29.3", + "fs-extra": "11.1.1", + "ramda": "0.29.0" + }, + "dependencies": { + "fs-extra": { + "version": "11.1.1", + "dev": true, + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + } + } + }, + "filesize": { + "version": "3.6.1", + "dev": true + }, + "fill-range": { + "version": "7.0.1", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "filter-obj": { + "version": "1.1.0", + "dev": true + }, + "find-file-up": { + "version": "0.1.3", + "dev": true, + "requires": { + "fs-exists-sync": "^0.1.0", + "resolve-dir": "^0.1.0" + } + }, + "find-pkg": { + "version": "0.1.2", + "dev": true, + "requires": { + "find-file-up": "^0.1.2" + } + }, + "find-up": { + "version": "5.0.0", + "dev": true, + "requires": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + } + }, + "flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/flat-cache/download/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "peer": true, + "requires": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "dependencies": { + "rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/rimraf/download/rimraf-3.0.2.tgz", + "integrity": "sha1-8aVAK6YiCtUswSgrrBrjqkn9Bho=", + "dev": true, + "peer": true, + "requires": { + "glob": "^7.1.3" + } + } + } + }, + "flatted": { + "version": "3.2.9", + "resolved": "https://registry.npm.alibaba-inc.com/flatted/download/flatted-3.2.9.tgz", + "integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==", + "dev": true, + "peer": true + }, + "flush-write-stream": { + "version": "1.1.1", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + } + }, + "follow-redirects": { + "version": "1.5.10", + "dev": true, + "requires": { + "debug": "=3.1.0" + } + }, + "for-each": { + "version": "0.3.3", + "dev": true, + "requires": { + "is-callable": "^1.1.3" + } + }, + "foreground-child": { + "version": "3.1.1", + "dev": true, + "requires": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "dependencies": { + "signal-exit": { + "version": "4.1.0", + "dev": true + } + } + }, + "fork-ts-checker-webpack-plugin": { + "version": "8.0.0", + "dev": true, + "requires": { + "@babel/code-frame": "^7.16.7", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "fs-extra": "^10.0.0", + "memfs": "^3.4.1", + "minimatch": "^3.0.4", + "node-abort-controller": "^3.0.1", + "schema-utils": "^3.1.1", + "semver": "^7.3.5", + "tapable": "^2.2.1" + }, + "dependencies": { + "cosmiconfig": { + "version": "7.1.0", + "dev": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + } + }, + "fs-extra": { + "version": "10.1.0", + "dev": true, + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "yaml": { + "version": "1.10.2", + "dev": true + } + } + }, + "format": { + "version": "0.2.2", + "dev": true + }, + "formdata-polyfill": { + "version": "4.0.10", + "dev": true, + "requires": { + "fetch-blob": "^3.1.2" + } + }, + "fraction.js": { + "version": "4.3.7", + "dev": true + }, + "from2": { + "version": "2.3.0", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "fs-constants": { + "version": "1.0.0", + "dev": true + }, + "fs-exists-sync": { + "version": "0.1.0", + "dev": true + }, + "fs-extra": { + "version": "11.2.0", + "dev": true, + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "fs-monkey": { + "version": "1.0.5", + "dev": true + }, + "fs-write-stream-atomic": { + "version": "1.0.10", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + } + }, + "fs.realpath": { + "version": "1.0.0", + "dev": true + }, + "fsevents": { + "version": "2.3.3", + "dev": true, + "optional": true + }, + "function-bind": { + "version": "1.1.2", + "dev": true + }, + "function.prototype.name": { + "version": "1.1.6", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" + } + }, + "functions-have-names": { + "version": "1.2.3", + "dev": true + }, + "genfun": { + "version": "4.0.1", + "dev": true + }, + "gensync": { + "version": "1.0.0-beta.2", + "dev": true + }, + "get-caller-file": { + "version": "2.0.5", + "dev": true + }, + "get-intrinsic": { + "version": "1.2.4", + "dev": true, + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + }, + "get-package-type": { + "version": "0.1.0", + "dev": true + }, + "get-stream": { + "version": "6.0.1", + "dev": true + }, + "get-symbol-description": { + "version": "1.0.2", + "dev": true, + "requires": { + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" + } + }, + "get-tsconfig": { + "version": "4.7.3", + "dev": true, + "requires": { + "resolve-pkg-maps": "^1.0.0" + } + }, + "get-value": { + "version": "2.0.6", + "dev": true + }, + "git-branch": { + "version": "1.0.0", + "dev": true + }, + "git-config-path": { + "version": "1.0.1", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "fs-exists-sync": "^0.1.0", + "homedir-polyfill": "^1.0.0" + } + }, + "git-hooks-list": { + "version": "3.1.0", + "dev": true + }, + "git-raw-commits": { + "version": "2.0.11", + "dev": true, + "requires": { + "dargs": "^7.0.0", + "lodash": "^4.17.15", + "meow": "^8.0.0", + "split2": "^3.0.0", + "through2": "^4.0.0" + } + }, + "git-repo-name": { + "version": "0.6.0", + "dev": true, + "requires": { + "cwd": "^0.9.1", + "file-name": "^0.1.0", + "lazy-cache": "^1.0.4", + "remote-origin-url": "^0.5.1" + } + }, + "git-username": { + "version": "0.5.1", + "dev": true, + "requires": { + "remote-origin-url": "^0.4.0" + }, + "dependencies": { + "parse-git-config": { + "version": "0.2.0", + "dev": true, + "requires": { + "ini": "^1.3.3" + } + }, + "remote-origin-url": { + "version": "0.4.0", + "dev": true, + "requires": { + "parse-git-config": "^0.2.0" + } + } + } + }, + "github-slugger": { + "version": "1.5.0", + "dev": true + }, + "glob": { + "version": "7.2.3", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.2", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/glob-to-regexp/download/glob-to-regexp-0.4.1.tgz", + "integrity": "sha1-x1KXCHyFG5pXi9IX3VmpL1n+VG4=", + "dev": true, + "peer": true + }, + "global-dirs": { + "version": "0.1.1", + "dev": true, + "requires": { + "ini": "^1.3.4" + } + }, + "global-modules": { + "version": "0.2.3", + "dev": true, + "requires": { + "global-prefix": "^0.1.4", + "is-windows": "^0.2.0" + } + }, + "global-prefix": { + "version": "0.1.5", + "dev": true, + "requires": { + "homedir-polyfill": "^1.0.0", + "ini": "^1.3.4", + "is-windows": "^0.2.0", + "which": "^1.2.12" + }, + "dependencies": { + "which": { + "version": "1.3.1", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "globals": { + "version": "11.12.0", + "dev": true + }, + "globalthis": { + "version": "1.0.3", + "dev": true, + "requires": { + "define-properties": "^1.1.3" + } + }, + "globby": { + "version": "11.1.0", + "dev": true, + "requires": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + } + }, + "globjoin": { + "version": "0.1.4", + "resolved": "https://registry.npm.alibaba-inc.com/globjoin/download/globjoin-0.1.4.tgz", + "integrity": "sha1-L0SUrIkZ43Z8XLtpHp9GMyQoXUM=", + "dev": true, + "peer": true + }, + "gopd": { + "version": "1.0.1", + "dev": true, + "requires": { + "get-intrinsic": "^1.1.3" + } + }, + "got": { + "version": "6.7.1", + "dev": true, + "requires": { + "create-error-class": "^3.0.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "unzip-response": "^2.0.1", + "url-parse-lax": "^1.0.0" + }, + "dependencies": { + "get-stream": { + "version": "3.0.0", + "dev": true + }, + "is-stream": { + "version": "1.1.0", + "dev": true + } + } + }, + "graceful-fs": { + "version": "4.2.11", + "dev": true + }, + "graphemer": { + "version": "1.4.0", + "dev": true + }, + "handle-thing": { + "version": "2.0.1", + "dev": true + }, + "hard-rejection": { + "version": "2.1.0", + "dev": true + }, + "harmony-reflect": { + "version": "1.6.2", + "dev": true + }, + "has-bigints": { + "version": "1.0.2", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "dev": true + }, + "has-property-descriptors": { + "version": "1.0.2", + "dev": true, + "requires": { + "es-define-property": "^1.0.0" + } + }, + "has-proto": { + "version": "1.0.3", + "dev": true + }, + "has-symbols": { + "version": "1.0.3", + "dev": true + }, + "has-tostringtag": { + "version": "1.0.2", + "dev": true, + "requires": { + "has-symbols": "^1.0.3" + } + }, + "has-value": { + "version": "0.3.1", + "dev": true, + "requires": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "dependencies": { + "isobject": { + "version": "2.1.0", + "dev": true, + "requires": { + "isarray": "1.0.0" + } + } + } + }, + "has-values": { + "version": "0.1.4", + "dev": true + }, + "hash-base": { + "version": "3.0.4", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "hash.js": { + "version": "1.1.7", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "hasown": { + "version": "2.0.2", + "dev": true, + "requires": { + "function-bind": "^1.1.2" + } + }, + "hast-util-from-parse5": { + "version": "7.1.2", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "hastscript": "^7.0.0", + "property-information": "^6.0.0", + "vfile": "^5.0.0", + "vfile-location": "^4.0.0", + "web-namespaces": "^2.0.0" + } + }, + "hast-util-has-property": { + "version": "2.0.1", + "dev": true + }, + "hast-util-heading-rank": { + "version": "2.1.1", + "dev": true, + "requires": { + "@types/hast": "^2.0.0" + } + }, + "hast-util-is-conditional-comment": { + "version": "2.0.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0" + } + }, + "hast-util-is-element": { + "version": "2.1.3", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0" + } + }, + "hast-util-parse-selector": { + "version": "3.1.1", + "dev": true, + "requires": { + "@types/hast": "^2.0.0" + } + }, + "hast-util-raw": { + "version": "8.0.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "extend": "^3.0.0", + "hast-util-from-parse5": "^7.0.0", + "hast-util-to-parse5": "^7.0.0", + "html-void-elements": "^2.0.0", + "mdast-util-to-hast": "^12.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0", + "vfile": "^5.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + } + }, + "hast-util-to-estree": { + "version": "2.3.3", + "dev": true, + "requires": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "estree-util-attach-comments": "^2.0.0", + "estree-util-is-identifier-name": "^2.0.0", + "hast-util-whitespace": "^2.0.0", + "mdast-util-mdx-expression": "^1.0.0", + "mdast-util-mdxjs-esm": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.1", + "unist-util-position": "^4.0.0", + "zwitch": "^2.0.0" + } + }, + "hast-util-to-html": { + "version": "8.0.4", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-raw": "^7.0.0", + "hast-util-whitespace": "^2.0.0", + "html-void-elements": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "dependencies": { + "hast-util-raw": { + "version": "7.2.3", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "@types/parse5": "^6.0.0", + "hast-util-from-parse5": "^7.0.0", + "hast-util-to-parse5": "^7.0.0", + "html-void-elements": "^2.0.0", + "parse5": "^6.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0", + "vfile": "^5.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + } + }, + "parse5": { + "version": "6.0.1", + "dev": true + } + } + }, + "hast-util-to-parse5": { + "version": "7.1.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + } + }, + "hast-util-to-string": { + "version": "2.0.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0" + } + }, + "hast-util-whitespace": { + "version": "2.0.1", + "dev": true + }, + "hastscript": { + "version": "7.2.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^3.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + } + }, + "he": { + "version": "1.2.0", + "dev": true + }, + "heti": { + "version": "0.9.4", + "dev": true, + "requires": { + "heti-findandreplacedomtext": "^0.5.0" + } + }, + "heti-findandreplacedomtext": { + "version": "0.5.0", + "dev": true + }, + "history": { + "version": "5.3.0", + "dev": true, + "requires": { + "@babel/runtime": "^7.7.6" + } + }, + "hmac-drbg": { + "version": "1.0.1", + "dev": true, + "requires": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "hoist-non-react-statics": { + "version": "3.3.2", + "dev": true, + "requires": { + "react-is": "^16.7.0" + }, + "dependencies": { + "react-is": { + "version": "16.13.1", + "dev": true + } + } + }, + "homedir-polyfill": { + "version": "1.0.3", + "dev": true, + "requires": { + "parse-passwd": "^1.0.0" + } + }, + "hosted-git-info": { + "version": "6.1.1", + "dev": true, + "requires": { + "lru-cache": "^7.5.1" + } + }, + "hpack.js": { + "version": "2.1.6", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "htm": { + "version": "3.1.1", + "dev": true + }, + "html-entities": { + "version": "2.5.2", + "dev": true + }, + "html-minifier-terser": { + "version": "6.1.0", + "dev": true, + "requires": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "dependencies": { + "commander": { + "version": "8.3.0", + "dev": true + } + } + }, + "html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npm.alibaba-inc.com/html-tags/download/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "dev": true, + "peer": true + }, + "html-to-text": { + "version": "9.0.5", + "dev": true, + "requires": { + "@selderee/plugin-htmlparser2": "^0.11.0", + "deepmerge": "^4.3.1", + "dom-serializer": "^2.0.0", + "htmlparser2": "^8.0.2", + "selderee": "^0.11.0" + } + }, + "html-void-elements": { + "version": "2.0.1", + "dev": true + }, + "html-webpack-plugin": { + "version": "5.5.0", + "dev": true, + "requires": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + } + }, + "html2sketch": { + "version": "1.0.2", + "dev": true, + "requires": { + "@sketch-hq/sketch-file-format-ts": "^6", + "color": "^3.1.2", + "css": "^3.0.0", + "svg-pathdata": "^5.0.5", + "svgo-browser": "^1.3.7", + "svgson": "^4.1.0", + "transformation-matrix": "^2.11.1", + "uuid": "^8.2.0" + } + }, + "htmlparser2": { + "version": "8.0.2", + "dev": true, + "requires": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "http-cache-semantics": { + "version": "3.8.1", + "dev": true + }, + "http-deceiver": { + "version": "1.2.7", + "dev": true + }, + "http-proxy-agent": { + "version": "2.1.0", + "dev": true, + "requires": { + "agent-base": "4", + "debug": "3.1.0" + } + }, + "https-browserify": { + "version": "1.0.0", + "dev": true + }, + "https-proxy-agent": { + "version": "2.2.4", + "dev": true, + "requires": { + "agent-base": "^4.3.0", + "debug": "^3.1.0" + } + }, + "human-signals": { + "version": "2.1.0", + "dev": true + }, + "humanize-ms": { + "version": "1.2.1", + "dev": true, + "requires": { + "ms": "^2.0.0" + } + }, + "humps": { + "version": "2.0.1", + "dev": true + }, + "husky": { + "version": "8.0.3", + "dev": true + }, + "iconv-lite": { + "version": "0.4.24", + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "icss-utils": { + "version": "5.1.0", + "dev": true, + "requires": {} + }, + "identity-obj-proxy": { + "version": "3.0.0", + "dev": true, + "requires": { + "harmony-reflect": "^1.4.6" + } + }, + "ieee754": { + "version": "1.2.1", + "dev": true + }, + "iferr": { + "version": "0.1.5", + "dev": true + }, + "ignore": { + "version": "5.3.1", + "dev": true + }, + "image-size": { + "version": "0.8.3", + "dev": true, + "requires": { + "queue": "6.0.1" + } + }, + "immutable": { + "version": "4.3.5", + "dev": true + }, + "import-fresh": { + "version": "3.3.0", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "dependencies": { + "resolve-from": { + "version": "4.0.0", + "dev": true + } + } + }, + "import-lazy": { + "version": "2.1.0", + "dev": true + }, + "imurmurhash": { + "version": "0.1.4", + "dev": true + }, + "indent-string": { + "version": "4.0.0", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "dev": true + }, + "ini": { + "version": "1.3.8", + "dev": true + }, + "inline-style-parser": { + "version": "0.1.1", + "dev": true + }, + "inquirer": { + "version": "6.5.2", + "dev": true, + "requires": { + "ansi-escapes": "^3.2.0", + "chalk": "^2.4.2", + "cli-cursor": "^2.1.0", + "cli-width": "^2.0.0", + "external-editor": "^3.0.3", + "figures": "^2.0.0", + "lodash": "^4.17.12", + "mute-stream": "0.0.7", + "run-async": "^2.2.0", + "rxjs": "^6.4.0", + "string-width": "^2.1.0", + "strip-ansi": "^5.1.0", + "through": "^2.3.6" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "internal-slot": { + "version": "1.0.7", + "dev": true, + "requires": { + "es-errors": "^1.3.0", + "hasown": "^2.0.0", + "side-channel": "^1.0.4" + } + }, + "intl-messageformat": { + "version": "10.5.11", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/fast-memoize": "2.2.0", + "@formatjs/icu-messageformat-parser": "2.7.6", + "tslib": "^2.4.0" + } + }, + "invariant": { + "version": "2.2.4", + "dev": true, + "requires": { + "loose-envify": "^1.0.0" + } + }, + "ip": { + "version": "1.1.9", + "dev": true + }, + "is-alphabetical": { + "version": "2.0.1", + "dev": true + }, + "is-alphanumerical": { + "version": "2.0.1", + "dev": true, + "requires": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + } + }, + "is-arguments": { + "version": "1.1.1", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + } + }, + "is-array-buffer": { + "version": "3.0.4", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1" + } + }, + "is-arrayish": { + "version": "0.2.1", + "dev": true + }, + "is-arrow-function": { + "version": "2.0.3", + "dev": true, + "requires": { + "is-callable": "^1.0.4" + } + }, + "is-async-function": { + "version": "2.0.0", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-bigint": { + "version": "1.0.4", + "dev": true, + "requires": { + "has-bigints": "^1.0.1" + } + }, + "is-binary-path": { + "version": "2.1.0", + "dev": true, + "requires": { + "binary-extensions": "^2.0.0" + } + }, + "is-boolean-object": { + "version": "1.1.2", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + } + }, + "is-buffer": { + "version": "2.0.5", + "dev": true + }, + "is-callable": { + "version": "1.2.7", + "dev": true + }, + "is-ci": { + "version": "1.2.1", + "dev": true, + "requires": { + "ci-info": "^1.5.0" + }, + "dependencies": { + "ci-info": { + "version": "1.6.0", + "dev": true + } + } + }, + "is-core-module": { + "version": "2.13.1", + "dev": true, + "requires": { + "hasown": "^2.0.0" + } + }, + "is-data-view": { + "version": "1.0.1", + "dev": true, + "requires": { + "is-typed-array": "^1.1.13" + } + }, + "is-date-object": { + "version": "1.0.5", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-decimal": { + "version": "2.0.1", + "dev": true + }, + "is-docker": { + "version": "3.0.0", + "dev": true + }, + "is-equal": { + "version": "1.7.0", + "dev": true, + "requires": { + "es-get-iterator": "^1.1.3", + "es-to-primitive": "^1.2.1", + "functions-have-names": "^1.2.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0", + "is-arrow-function": "^2.0.3", + "is-bigint": "^1.0.4", + "is-boolean-object": "^1.1.2", + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-generator-function": "^1.0.10", + "is-number-object": "^1.0.7", + "is-regex": "^1.1.4", + "is-string": "^1.0.7", + "is-symbol": "^1.0.4", + "isarray": "^2.0.5", + "object-inspect": "^1.13.1", + "object.entries": "^1.1.7", + "object.getprototypeof": "^1.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "dev": true + } + } + }, + "is-extendable": { + "version": "0.1.1", + "dev": true + }, + "is-extglob": { + "version": "2.1.1", + "dev": true + }, + "is-finalizationregistry": { + "version": "1.0.2", + "dev": true, + "requires": { + "call-bind": "^1.0.2" + } + }, + "is-fullwidth-code-point": { + "version": "4.0.0", + "dev": true + }, + "is-generator-function": { + "version": "1.0.10", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-glob": { + "version": "4.0.3", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-hexadecimal": { + "version": "2.0.1", + "dev": true + }, + "is-inside-container": { + "version": "1.0.0", + "dev": true, + "requires": { + "is-docker": "^3.0.0" + } + }, + "is-installed-globally": { + "version": "0.1.0", + "dev": true, + "requires": { + "global-dirs": "^0.1.0", + "is-path-inside": "^1.0.0" + } + }, + "is-map": { + "version": "2.0.3", + "dev": true + }, + "is-negative-zero": { + "version": "2.0.3", + "dev": true + }, + "is-npm": { + "version": "1.0.0", + "dev": true + }, + "is-number": { + "version": "7.0.0", + "dev": true + }, + "is-number-object": { + "version": "1.0.7", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-obj": { + "version": "2.0.0", + "dev": true + }, + "is-path-inside": { + "version": "1.0.1", + "dev": true, + "requires": { + "path-is-inside": "^1.0.1" + } + }, + "is-plain-obj": { + "version": "1.1.0", + "dev": true + }, + "is-plain-object": { + "version": "2.0.4", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "is-redirect": { + "version": "1.0.0", + "dev": true + }, + "is-regex": { + "version": "1.1.4", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + } + }, + "is-retry-allowed": { + "version": "1.2.0", + "dev": true + }, + "is-set": { + "version": "2.0.3", + "dev": true + }, + "is-shared-array-buffer": { + "version": "1.0.3", + "dev": true, + "requires": { + "call-bind": "^1.0.7" + } + }, + "is-stream": { + "version": "2.0.1", + "dev": true + }, + "is-string": { + "version": "1.0.7", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-symbol": { + "version": "1.0.4", + "dev": true, + "requires": { + "has-symbols": "^1.0.2" + } + }, + "is-text-path": { + "version": "1.0.1", + "dev": true, + "requires": { + "text-extensions": "^1.0.0" + } + }, + "is-typed-array": { + "version": "1.1.13", + "dev": true, + "requires": { + "which-typed-array": "^1.1.14" + } + }, + "is-weakmap": { + "version": "2.0.2", + "dev": true + }, + "is-weakref": { + "version": "1.0.2", + "dev": true, + "requires": { + "call-bind": "^1.0.2" + } + }, + "is-weakset": { + "version": "2.0.3", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4" + } + }, + "is-what": { + "version": "3.14.1", + "dev": true + }, + "is-windows": { + "version": "0.2.0", + "dev": true + }, + "is-wsl": { + "version": "1.1.0", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "dev": true + }, + "isobject": { + "version": "3.0.1", + "dev": true + }, + "isomorphic-unfetch": { + "version": "4.0.2", + "dev": true, + "requires": { + "node-fetch": "^3.2.0", + "unfetch": "^5.0.0" + } + }, + "istanbul-lib-coverage": { + "version": "3.2.2", + "dev": true + }, + "istanbul-lib-instrument": { + "version": "5.2.1", + "dev": true, + "requires": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.1", + "dev": true + } + } + }, + "istextorbinary": { + "version": "2.6.0", + "dev": true, + "requires": { + "binaryextensions": "^2.1.2", + "editions": "^2.2.0", + "textextensions": "^2.5.0" + } + }, + "iterator.prototype": { + "version": "1.1.2", + "dev": true, + "requires": { + "define-properties": "^1.2.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.4", + "set-function-name": "^2.0.1" + } + }, + "jackspeak": { + "version": "2.3.6", + "dev": true, + "requires": { + "@isaacs/cliui": "^8.0.2", + "@pkgjs/parseargs": "^0.11.0" + } + }, + "jest-haste-map": { + "version": "29.7.0", + "dev": true, + "requires": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "fsevents": "^2.3.2", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "dependencies": { + "@jest/types": { + "version": "29.6.3", + "dev": true, + "requires": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + } + }, + "@types/yargs": { + "version": "17.0.32", + "dev": true, + "requires": { + "@types/yargs-parser": "*" + } + }, + "jest-worker": { + "version": "29.7.0", + "dev": true, + "requires": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + } + }, + "supports-color": { + "version": "8.1.1", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "jest-regex-util": { + "version": "29.6.3", + "dev": true + }, + "jest-util": { + "version": "29.7.0", + "dev": true, + "requires": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "dependencies": { + "@jest/types": { + "version": "29.6.3", + "dev": true, + "requires": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + } + }, + "@types/yargs": { + "version": "17.0.32", + "dev": true, + "requires": { + "@types/yargs-parser": "*" + } + } + } + }, + "jest-worker": { + "version": "29.4.3", + "dev": true, + "requires": { + "@types/node": "*", + "jest-util": "^29.4.3", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "dependencies": { + "supports-color": { + "version": "8.1.1", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "jquery": { + "version": "3.7.1", + "resolved": "https://registry.npm.alibaba-inc.com/jquery/download/jquery-3.7.1.tgz", + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==", + "peer": true + }, + "js-tokens": { + "version": "4.0.0" + }, + "js-yaml": { + "version": "4.1.0", + "dev": true, + "requires": { + "argparse": "^2.0.1" + } + }, + "jsesc": { + "version": "2.5.2", + "dev": true + }, + "json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/json-buffer/download/json-buffer-3.0.1.tgz", + "integrity": "sha1-kziAKjDTtmBfvgYT4JQAjKjAWhM=", + "dev": true, + "peer": true + }, + "json-parse-better-errors": { + "version": "1.0.2", + "dev": true + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz", + "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=", + "dev": true + }, + "json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/json-stable-stringify-without-jsonify/download/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true, + "peer": true + }, + "json2mq": { + "version": "0.2.0", + "requires": { + "string-convert": "^0.2.0" + } + }, + "json5": { + "version": "2.2.3", + "dev": true + }, + "jsonfile": { + "version": "6.1.0", + "dev": true, + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "jsonparse": { + "version": "1.3.1", + "dev": true + }, + "JSONStream": { + "version": "1.3.5", + "dev": true, + "requires": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + } + }, + "jsx-ast-utils": { + "version": "3.3.5", + "dev": true, + "requires": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + } + }, + "keyv": { + "version": "4.5.4", + "resolved": "https://registry.npm.alibaba-inc.com/keyv/download/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "peer": true, + "requires": { + "json-buffer": "3.0.1" + } + }, + "kind-of": { + "version": "6.0.3", + "dev": true + }, + "kleur": { + "version": "4.1.5", + "dev": true + }, + "known-css-properties": { + "version": "0.26.0", + "resolved": "https://registry.npm.alibaba-inc.com/known-css-properties/download/known-css-properties-0.26.0.tgz", + "integrity": "sha512-5FZRzrZzNTBruuurWpvZnvP9pum+fe0HcK8z/ooo+U+Hmp4vtbyp1/QDsqmufirXy4egGzbaH/y2uCZf+6W5Kg==", + "dev": true, + "peer": true + }, + "kolorist": { + "version": "1.8.0", + "dev": true + }, + "latest-version": { + "version": "3.1.0", + "dev": true, + "requires": { + "package-json": "^4.0.0" + } + }, + "lazy-cache": { + "version": "1.0.4", + "dev": true + }, + "leac": { + "version": "0.6.0", + "dev": true + }, + "less": { + "version": "4.1.3", + "dev": true, + "requires": { + "copy-anything": "^2.0.1", + "errno": "^0.1.1", + "graceful-fs": "^4.1.2", + "image-size": "~0.5.0", + "make-dir": "^2.1.0", + "mime": "^1.4.1", + "needle": "^3.1.0", + "parse-node-version": "^1.0.1", + "source-map": "~0.6.0", + "tslib": "^2.3.0" + }, + "dependencies": { + "image-size": { + "version": "0.5.5", + "dev": true, + "optional": true + }, + "source-map": { + "version": "0.6.1", + "dev": true, + "optional": true + } + } + }, + "less-plugin-resolve": { + "version": "1.0.2", + "dev": true, + "requires": { + "enhanced-resolve": "^5.15.0" + } + }, + "levn": { + "version": "0.4.1", + "resolved": "https://registry.npm.alibaba-inc.com/levn/download/levn-0.4.1.tgz", + "integrity": "sha1-rkViwAdHO5MqYgDUAyaN0v/8at4=", + "dev": true, + "peer": true, + "requires": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + } + }, + "lightningcss": { + "version": "1.22.1", + "dev": true, + "requires": { + "detect-libc": "^1.0.3", + "lightningcss-darwin-arm64": "1.22.1", + "lightningcss-darwin-x64": "1.22.1", + "lightningcss-freebsd-x64": "1.22.1", + "lightningcss-linux-arm-gnueabihf": "1.22.1", + "lightningcss-linux-arm64-gnu": "1.22.1", + "lightningcss-linux-arm64-musl": "1.22.1", + "lightningcss-linux-x64-gnu": "1.22.1", + "lightningcss-linux-x64-musl": "1.22.1", + "lightningcss-win32-x64-msvc": "1.22.1" + } + }, + "lightningcss-darwin-arm64": { + "version": "1.22.1", + "dev": true, + "optional": true + }, + "lilconfig": { + "version": "2.1.0", + "dev": true + }, + "lines-and-columns": { + "version": "1.2.4", + "dev": true + }, + "lint-staged": { + "version": "13.3.0", + "dev": true, + "requires": { + "chalk": "5.3.0", + "commander": "11.0.0", + "debug": "4.3.4", + "execa": "7.2.0", + "lilconfig": "2.1.0", + "listr2": "6.6.1", + "micromatch": "4.0.5", + "pidtree": "0.6.0", + "string-argv": "0.3.2", + "yaml": "2.3.1" + }, + "dependencies": { + "chalk": { + "version": "5.3.0", + "dev": true + }, + "commander": { + "version": "11.0.0", + "dev": true + }, + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "execa": { + "version": "7.2.0", + "dev": true, + "requires": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.1", + "human-signals": "^4.3.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^3.0.7", + "strip-final-newline": "^3.0.0" + } + }, + "human-signals": { + "version": "4.3.1", + "dev": true + }, + "is-stream": { + "version": "3.0.0", + "dev": true + }, + "mimic-fn": { + "version": "4.0.0", + "dev": true + }, + "ms": { + "version": "2.1.2", + "dev": true + }, + "npm-run-path": { + "version": "5.3.0", + "dev": true, + "requires": { + "path-key": "^4.0.0" + } + }, + "onetime": { + "version": "6.0.0", + "dev": true, + "requires": { + "mimic-fn": "^4.0.0" + } + }, + "path-key": { + "version": "4.0.0", + "dev": true + }, + "strip-final-newline": { + "version": "3.0.0", + "dev": true + } + } + }, + "listr2": { + "version": "6.6.1", + "dev": true, + "requires": { + "cli-truncate": "^3.1.0", + "colorette": "^2.0.20", + "eventemitter3": "^5.0.1", + "log-update": "^5.0.1", + "rfdc": "^1.3.0", + "wrap-ansi": "^8.1.0" + } + }, + "loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npm.alibaba-inc.com/loader-runner/download/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "dev": true, + "peer": true + }, + "loader-utils": { + "version": "2.0.4", + "dev": true, + "requires": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + } + }, + "local-pkg": { + "version": "0.4.3", + "dev": true + }, + "locate-path": { + "version": "6.0.0", + "dev": true, + "requires": { + "p-locate": "^5.0.0" + } + }, + "lodash": { + "version": "4.17.21", + "dev": true + }, + "lodash.camelcase": { + "version": "4.3.0", + "dev": true + }, + "lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npm.alibaba-inc.com/lodash.debounce/download/lodash.debounce-4.0.8.tgz", + "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" + }, + "lodash.isfunction": { + "version": "3.0.9", + "dev": true + }, + "lodash.isplainobject": { + "version": "4.0.6", + "dev": true + }, + "lodash.kebabcase": { + "version": "4.1.1", + "dev": true + }, + "lodash.merge": { + "version": "4.6.2", + "dev": true + }, + "lodash.mergewith": { + "version": "4.6.2", + "dev": true + }, + "lodash.snakecase": { + "version": "4.1.1", + "dev": true + }, + "lodash.startcase": { + "version": "4.4.0", + "dev": true + }, + "lodash.throttle": { + "version": "4.1.1", + "dev": true + }, + "lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npm.alibaba-inc.com/lodash.truncate/download/lodash.truncate-4.4.2.tgz", + "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=", + "dev": true, + "peer": true + }, + "lodash.uniq": { + "version": "4.5.0", + "dev": true + }, + "lodash.upperfirst": { + "version": "4.3.1", + "dev": true + }, + "log-symbols": { + "version": "2.2.0", + "dev": true, + "requires": { + "chalk": "^2.0.1" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "log-update": { + "version": "5.0.1", + "dev": true, + "requires": { + "ansi-escapes": "^5.0.0", + "cli-cursor": "^4.0.0", + "slice-ansi": "^5.0.0", + "strip-ansi": "^7.0.1", + "wrap-ansi": "^8.0.1" + }, + "dependencies": { + "ansi-escapes": { + "version": "5.0.0", + "dev": true, + "requires": { + "type-fest": "^1.0.2" + } + }, + "ansi-regex": { + "version": "6.0.1", + "dev": true + }, + "cli-cursor": { + "version": "4.0.0", + "dev": true, + "requires": { + "restore-cursor": "^4.0.0" + } + }, + "restore-cursor": { + "version": "4.0.0", + "dev": true, + "requires": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + } + }, + "strip-ansi": { + "version": "7.1.0", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + }, + "type-fest": { + "version": "1.4.0", + "dev": true + } + } + }, + "longest-streak": { + "version": "3.1.0", + "dev": true + }, + "loose-envify": { + "version": "1.4.0", + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "lower-case": { + "version": "2.0.2", + "dev": true, + "requires": { + "tslib": "^2.0.3" + } + }, + "lowercase-keys": { + "version": "1.0.1", + "dev": true + }, + "lru-cache": { + "version": "7.18.3", + "dev": true + }, + "lz-string": { + "version": "1.5.0", + "dev": true + }, + "make-dir": { + "version": "2.1.0", + "dev": true, + "optional": true, + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "dependencies": { + "semver": { + "version": "5.7.2", + "dev": true, + "optional": true + } + } + }, + "make-error": { + "version": "1.3.6", + "dev": true + }, + "make-fetch-happen": { + "version": "2.6.0", + "dev": true, + "requires": { + "agentkeepalive": "^3.3.0", + "cacache": "^10.0.0", + "http-cache-semantics": "^3.8.0", + "http-proxy-agent": "^2.0.0", + "https-proxy-agent": "^2.1.0", + "lru-cache": "^4.1.1", + "mississippi": "^1.2.0", + "node-fetch-npm": "^2.0.2", + "promise-retry": "^1.1.1", + "socks-proxy-agent": "^3.0.1", + "ssri": "^5.0.0" + }, + "dependencies": { + "cacache": { + "version": "10.0.4", + "dev": true, + "requires": { + "bluebird": "^3.5.1", + "chownr": "^1.0.1", + "glob": "^7.1.2", + "graceful-fs": "^4.1.11", + "lru-cache": "^4.1.1", + "mississippi": "^2.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.2", + "ssri": "^5.2.4", + "unique-filename": "^1.1.0", + "y18n": "^4.0.0" + }, + "dependencies": { + "mississippi": { + "version": "2.0.0", + "dev": true, + "requires": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^2.0.1", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + } + } + } + }, + "lru-cache": { + "version": "4.1.5", + "dev": true, + "requires": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "pump": { + "version": "2.0.1", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "ssri": { + "version": "5.3.0", + "dev": true, + "requires": { + "safe-buffer": "^5.1.1" + } + }, + "through2": { + "version": "2.0.5", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "y18n": { + "version": "4.0.3", + "dev": true + }, + "yallist": { + "version": "2.1.2", + "dev": true + } + } + }, + "makeerror": { + "version": "1.0.12", + "dev": true, + "requires": { + "tmpl": "1.0.5" + } + }, + "map-obj": { + "version": "4.3.0", + "dev": true + }, + "markdown-table": { + "version": "3.0.3", + "dev": true + }, + "mathml-tag-names": { + "version": "2.1.3", + "resolved": "https://registry.npm.alibaba-inc.com/mathml-tag-names/download/mathml-tag-names-2.1.3.tgz", + "integrity": "sha1-TdrdZzCOeAzxakdoWHjuJ7c2oKM=", + "dev": true, + "peer": true + }, + "md5.js": { + "version": "1.3.5", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "mdast-util-definitions": { + "version": "5.1.2", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "unist-util-visit": "^4.0.0" + } + }, + "mdast-util-directive": { + "version": "2.2.4", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "mdast-util-from-markdown": "^1.3.0", + "mdast-util-to-markdown": "^1.5.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^5.1.3" + } + }, + "mdast-util-find-and-replace": { + "version": "2.2.2", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.0.0" + }, + "dependencies": { + "escape-string-regexp": { + "version": "5.0.0", + "dev": true + } + } + }, + "mdast-util-from-markdown": { + "version": "1.3.1", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" + } + }, + "mdast-util-frontmatter": { + "version": "1.0.1", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0", + "micromark-extension-frontmatter": "^1.0.0" + } + }, + "mdast-util-gfm": { + "version": "2.0.2", + "dev": true, + "requires": { + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-gfm-autolink-literal": "^1.0.0", + "mdast-util-gfm-footnote": "^1.0.0", + "mdast-util-gfm-strikethrough": "^1.0.0", + "mdast-util-gfm-table": "^1.0.0", + "mdast-util-gfm-task-list-item": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + } + }, + "mdast-util-gfm-autolink-literal": { + "version": "1.0.3", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "ccount": "^2.0.0", + "mdast-util-find-and-replace": "^2.0.0", + "micromark-util-character": "^1.0.0" + } + }, + "mdast-util-gfm-footnote": { + "version": "1.0.2", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0", + "micromark-util-normalize-identifier": "^1.0.0" + } + }, + "mdast-util-gfm-strikethrough": { + "version": "1.0.3", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0" + } + }, + "mdast-util-gfm-table": { + "version": "1.0.7", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.3.0" + } + }, + "mdast-util-gfm-task-list-item": { + "version": "1.0.2", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0" + } + }, + "mdast-util-mdx-expression": { + "version": "1.3.2", + "dev": true, + "requires": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + } + }, + "mdast-util-mdxjs-esm": { + "version": "1.3.1", + "dev": true, + "requires": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + } + }, + "mdast-util-phrasing": { + "version": "3.0.1", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "unist-util-is": "^5.0.0" + } + }, + "mdast-util-to-hast": { + "version": "12.3.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-definitions": "^5.0.0", + "micromark-util-sanitize-uri": "^1.1.0", + "trim-lines": "^3.0.0", + "unist-util-generated": "^2.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0" + } + }, + "mdast-util-to-markdown": { + "version": "1.5.0", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^3.0.0", + "mdast-util-to-string": "^3.0.0", + "micromark-util-decode-string": "^1.0.0", + "unist-util-visit": "^4.0.0", + "zwitch": "^2.0.0" + } + }, + "mdast-util-to-string": { + "version": "3.2.0", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0" + } + }, + "mdn-data": { + "version": "2.0.4", + "dev": true + }, + "memfs": { + "version": "3.5.3", + "dev": true, + "requires": { + "fs-monkey": "^1.0.4" + } + }, + "meow": { + "version": "8.1.2", + "dev": true, + "requires": { + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^3.0.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.18.0", + "yargs-parser": "^20.2.3" + } + }, + "merge-stream": { + "version": "2.0.0", + "dev": true + }, + "merge2": { + "version": "1.4.1", + "dev": true + }, + "micromark": { + "version": "3.2.0", + "dev": true, + "requires": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "micromark-core-commonmark": "^1.0.1", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "micromark-core-commonmark": { + "version": "1.1.0", + "dev": true, + "requires": { + "decode-named-character-reference": "^1.0.0", + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "micromark-extension-directive": { + "version": "2.2.1", + "dev": true, + "requires": { + "micromark-factory-space": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "parse-entities": "^4.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-extension-frontmatter": { + "version": "1.1.1", + "dev": true, + "requires": { + "fault": "^2.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-extension-gfm": { + "version": "2.0.3", + "dev": true, + "requires": { + "micromark-extension-gfm-autolink-literal": "^1.0.0", + "micromark-extension-gfm-footnote": "^1.0.0", + "micromark-extension-gfm-strikethrough": "^1.0.0", + "micromark-extension-gfm-table": "^1.0.0", + "micromark-extension-gfm-tagfilter": "^1.0.0", + "micromark-extension-gfm-task-list-item": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-extension-gfm-autolink-literal": { + "version": "1.0.5", + "dev": true, + "requires": { + "micromark-util-character": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-extension-gfm-footnote": { + "version": "1.1.2", + "dev": true, + "requires": { + "micromark-core-commonmark": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-extension-gfm-strikethrough": { + "version": "1.0.7", + "dev": true, + "requires": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-extension-gfm-table": { + "version": "1.0.7", + "dev": true, + "requires": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-extension-gfm-tagfilter": { + "version": "1.0.2", + "dev": true, + "requires": { + "micromark-util-types": "^1.0.0" + } + }, + "micromark-extension-gfm-task-list-item": { + "version": "1.0.5", + "dev": true, + "requires": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-factory-destination": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-factory-label": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-factory-space": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-factory-title": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-factory-whitespace": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-util-character": { + "version": "1.2.0", + "dev": true, + "requires": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-util-chunked": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-symbol": "^1.0.0" + } + }, + "micromark-util-classify-character": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-util-combine-extensions": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "micromark-util-decode-numeric-character-reference": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-symbol": "^1.0.0" + } + }, + "micromark-util-decode-string": { + "version": "1.1.0", + "dev": true, + "requires": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "micromark-util-encode": { + "version": "1.1.0", + "dev": true + }, + "micromark-util-html-tag-name": { + "version": "1.2.0", + "dev": true + }, + "micromark-util-normalize-identifier": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-symbol": "^1.0.0" + } + }, + "micromark-util-resolve-all": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-types": "^1.0.0" + } + }, + "micromark-util-sanitize-uri": { + "version": "1.2.0", + "dev": true, + "requires": { + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "micromark-util-subtokenize": { + "version": "1.1.0", + "dev": true, + "requires": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "micromark-util-symbol": { + "version": "1.1.0", + "dev": true + }, + "micromark-util-types": { + "version": "1.1.0", + "dev": true + }, + "micromatch": { + "version": "4.0.5", + "dev": true, + "requires": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + } + }, + "miller-rabin": { + "version": "4.0.1", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + }, + "dependencies": { + "bn.js": { + "version": "4.12.0", + "dev": true + } + } + }, + "mime": { + "version": "1.6.0", + "dev": true, + "optional": true + }, + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npm.alibaba-inc.com/mime-db/download/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "peer": true + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npm.alibaba-inc.com/mime-types/download/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "peer": true, + "requires": { + "mime-db": "1.52.0" + } + }, + "mimer": { + "version": "1.1.0", + "dev": true + }, + "mimic-fn": { + "version": "2.1.0", + "dev": true + }, + "min-indent": { + "version": "1.0.1", + "dev": true + }, + "minimalistic-assert": { + "version": "1.0.1", + "dev": true + }, + "minimalistic-crypto-utils": { + "version": "1.0.1", + "dev": true + }, + "minimatch": { + "version": "3.1.2", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.8", + "dev": true + }, + "minimist-options": { + "version": "4.1.0", + "dev": true, + "requires": { + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0", + "kind-of": "^6.0.3" + } + }, + "minipass": { + "version": "7.0.4", + "dev": true + }, + "mississippi": { + "version": "1.3.1", + "dev": true, + "requires": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^1.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + }, + "dependencies": { + "through2": { + "version": "2.0.5", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + } + } + }, + "mkdirp": { + "version": "0.5.6", + "dev": true, + "requires": { + "minimist": "^1.2.6" + } + }, + "move-concurrently": { + "version": "1.0.1", + "dev": true, + "requires": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + } + }, + "mri": { + "version": "1.2.0", + "dev": true + }, + "ms": { + "version": "2.1.3", + "dev": true + }, + "mute-stream": { + "version": "0.0.7", + "dev": true + }, + "mz": { + "version": "2.7.0", + "dev": true, + "requires": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "nanoid": { + "version": "2.1.11", + "dev": true + }, + "natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npm.alibaba-inc.com/natural-compare/download/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true, + "peer": true + }, + "natural-compare-lite": { + "version": "1.4.0", + "dev": true + }, + "needle": { + "version": "3.3.1", + "dev": true, + "optional": true, + "requires": { + "iconv-lite": "^0.6.3", + "sax": "^1.2.4" + }, + "dependencies": { + "iconv-lite": { + "version": "0.6.3", + "dev": true, + "optional": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + } + } + } + }, + "neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npm.alibaba-inc.com/neo-async/download/neo-async-2.6.2.tgz", + "integrity": "sha1-tKr7k+OustgXTKU88WOrfXMIMF8=", + "dev": true, + "peer": true + }, + "no-case": { + "version": "3.0.4", + "dev": true, + "requires": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node-abort-controller": { + "version": "3.1.1", + "dev": true + }, + "node-domexception": { + "version": "1.0.0", + "dev": true + }, + "node-fetch": { + "version": "3.3.2", + "dev": true, + "requires": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + } + }, + "node-fetch-npm": { + "version": "2.0.4", + "dev": true, + "requires": { + "encoding": "^0.1.11", + "json-parse-better-errors": "^1.0.0", + "safe-buffer": "^5.1.1" + } + }, + "node-int64": { + "version": "0.4.0", + "dev": true + }, + "node-libs-browser": { + "version": "2.2.1", + "dev": true, + "requires": { + "assert": "^1.1.1", + "browserify-zlib": "^0.2.0", + "buffer": "^4.3.0", + "console-browserify": "^1.1.0", + "constants-browserify": "^1.0.0", + "crypto-browserify": "^3.11.0", + "domain-browser": "^1.1.1", + "events": "^3.0.0", + "https-browserify": "^1.0.0", + "os-browserify": "^0.3.0", + "path-browserify": "0.0.1", + "process": "^0.11.10", + "punycode": "^1.2.4", + "querystring-es3": "^0.2.0", + "readable-stream": "^2.3.3", + "stream-browserify": "^2.0.1", + "stream-http": "^2.7.2", + "string_decoder": "^1.0.0", + "timers-browserify": "^2.0.4", + "tty-browserify": "0.0.0", + "url": "^0.11.0", + "util": "^0.11.0", + "vm-browserify": "^1.0.1" + } + }, + "node-releases": { + "version": "2.0.14", + "dev": true + }, + "normalize-package-data": { + "version": "3.0.3", + "dev": true, + "requires": { + "hosted-git-info": "^4.0.1", + "is-core-module": "^2.5.0", + "semver": "^7.3.4", + "validate-npm-package-license": "^3.0.1" + }, + "dependencies": { + "hosted-git-info": { + "version": "4.1.0", + "dev": true, + "requires": { + "lru-cache": "^6.0.0" + } + }, + "lru-cache": { + "version": "6.0.0", + "dev": true, + "requires": { + "yallist": "^4.0.0" + } + }, + "yallist": { + "version": "4.0.0", + "dev": true + } + } + }, + "normalize-path": { + "version": "3.0.0", + "dev": true + }, + "normalize-range": { + "version": "0.1.2", + "dev": true + }, + "npm-package-arg": { + "version": "5.1.2", + "dev": true, + "requires": { + "hosted-git-info": "^2.4.2", + "osenv": "^0.1.4", + "semver": "^5.1.0", + "validate-npm-package-name": "^3.0.0" + }, + "dependencies": { + "hosted-git-info": { + "version": "2.8.9", + "dev": true + }, + "semver": { + "version": "5.7.2", + "dev": true + } + } + }, + "npm-pick-manifest": { + "version": "1.0.4", + "dev": true, + "requires": { + "npm-package-arg": "^5.1.2", + "semver": "^5.3.0" + }, + "dependencies": { + "semver": { + "version": "5.7.2", + "dev": true + } + } + }, + "npm-run-path": { + "version": "4.0.1", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + }, + "nprogress": { + "version": "0.2.0", + "dev": true + }, + "nth-check": { + "version": "1.0.2", + "dev": true, + "requires": { + "boolbase": "~1.0.0" + } + }, + "object-assign": { + "version": "4.1.1", + "dev": true + }, + "object-inspect": { + "version": "1.13.1", + "dev": true + }, + "object-keys": { + "version": "1.1.1", + "dev": true + }, + "object.assign": { + "version": "4.1.5", + "dev": true, + "requires": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + } + }, + "object.entries": { + "version": "1.1.8", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + } + }, + "object.fromentries": { + "version": "2.0.8", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + } + }, + "object.getownpropertydescriptors": { + "version": "2.1.8", + "dev": true, + "requires": { + "array.prototype.reduce": "^1.0.6", + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "gopd": "^1.0.1", + "safe-array-concat": "^1.1.2" + } + }, + "object.getprototypeof": { + "version": "1.0.6", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "reflect.getprototypeof": "^1.0.5" + } + }, + "object.hasown": { + "version": "1.1.4", + "dev": true, + "requires": { + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + } + }, + "object.values": { + "version": "1.2.0", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + } + }, + "obuf": { + "version": "1.1.2", + "dev": true + }, + "omit-deep": { + "version": "0.3.0", + "dev": true, + "requires": { + "is-plain-object": "^2.0.1", + "unset-value": "^0.1.1" + } + }, + "on-exit-leak-free": { + "version": "0.2.0", + "dev": true + }, + "once": { + "version": "1.4.0", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "5.1.2", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "open": { + "version": "6.4.0", + "dev": true, + "requires": { + "is-wsl": "^1.1.0" + } + }, + "optionator": { + "version": "0.9.3", + "resolved": "https://registry.npm.alibaba-inc.com/optionator/download/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "peer": true, + "requires": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + } + }, + "ora": { + "version": "1.4.0", + "dev": true, + "requires": { + "chalk": "^2.1.0", + "cli-cursor": "^2.1.0", + "cli-spinners": "^1.0.1", + "log-symbols": "^2.1.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "os-browserify": { + "version": "0.3.0", + "dev": true + }, + "os-homedir": { + "version": "1.0.2", + "dev": true + }, + "os-tmpdir": { + "version": "1.0.2", + "dev": true + }, + "osenv": { + "version": "0.1.5", + "dev": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "p-finally": { + "version": "1.0.0", + "dev": true + }, + "p-limit": { + "version": "3.1.0", + "dev": true, + "requires": { + "yocto-queue": "^0.1.0" + } + }, + "p-locate": { + "version": "5.0.0", + "dev": true, + "requires": { + "p-limit": "^3.0.2" + } + }, + "p-try": { + "version": "2.2.0", + "dev": true + }, + "package-json": { + "version": "4.0.1", + "dev": true, + "requires": { + "got": "^6.7.1", + "registry-auth-token": "^3.0.1", + "registry-url": "^3.0.3", + "semver": "^5.1.0" + }, + "dependencies": { + "semver": { + "version": "5.7.2", + "dev": true + } + } + }, + "pacote": { + "version": "2.7.38", + "dev": true, + "requires": { + "bluebird": "^3.5.0", + "cacache": "^9.2.9", + "glob": "^7.1.2", + "lru-cache": "^4.1.1", + "make-fetch-happen": "^2.4.13", + "minimatch": "^3.0.4", + "mississippi": "^1.2.0", + "normalize-package-data": "^2.4.0", + "npm-package-arg": "^5.1.2", + "npm-pick-manifest": "^1.0.4", + "osenv": "^0.1.4", + "promise-inflight": "^1.0.1", + "promise-retry": "^1.1.1", + "protoduck": "^4.0.0", + "safe-buffer": "^5.1.1", + "semver": "^5.3.0", + "ssri": "^4.1.6", + "tar-fs": "^1.15.3", + "tar-stream": "^1.5.4", + "unique-filename": "^1.1.0", + "which": "^1.2.12" + }, + "dependencies": { + "hosted-git-info": { + "version": "2.8.9", + "dev": true + }, + "lru-cache": { + "version": "4.1.5", + "dev": true, + "requires": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "normalize-package-data": { + "version": "2.5.0", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "resolve": { + "version": "1.22.8", + "dev": true, + "requires": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "semver": { + "version": "5.7.2", + "dev": true + }, + "which": { + "version": "1.3.1", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "yallist": { + "version": "2.1.2", + "dev": true + } + } + }, + "pako": { + "version": "1.0.11", + "dev": true + }, + "parallel-transform": { + "version": "1.2.0", + "dev": true, + "requires": { + "cyclist": "^1.0.1", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + } + }, + "param-case": { + "version": "3.0.4", + "dev": true, + "requires": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "parent-module": { + "version": "1.0.1", + "dev": true, + "requires": { + "callsites": "^3.0.0" + } + }, + "parse-asn1": { + "version": "5.1.7", + "dev": true, + "requires": { + "asn1.js": "^4.10.1", + "browserify-aes": "^1.2.0", + "evp_bytestokey": "^1.0.3", + "hash-base": "~3.0", + "pbkdf2": "^3.1.2", + "safe-buffer": "^5.2.1" + } + }, + "parse-entities": { + "version": "4.0.1", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + } + }, + "parse-git-config": { + "version": "1.1.1", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "fs-exists-sync": "^0.1.0", + "git-config-path": "^1.0.1", + "ini": "^1.3.4" + } + }, + "parse-json": { + "version": "5.2.0", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "parse-node-version": { + "version": "1.0.1", + "dev": true + }, + "parse-passwd": { + "version": "1.0.0", + "dev": true + }, + "parse5": { + "version": "7.1.2", + "dev": true, + "requires": { + "entities": "^4.4.0" + } + }, + "parseley": { + "version": "0.12.1", + "dev": true, + "requires": { + "leac": "^0.6.0", + "peberminta": "^0.9.0" + } + }, + "pascal-case": { + "version": "3.1.2", + "dev": true, + "requires": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "path-browserify": { + "version": "0.0.1", + "dev": true + }, + "path-exists": { + "version": "4.0.0", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "dev": true + }, + "path-is-inside": { + "version": "1.0.2", + "dev": true + }, + "path-key": { + "version": "3.1.1", + "dev": true + }, + "path-parse": { + "version": "1.0.7", + "dev": true + }, + "path-scurry": { + "version": "1.10.2", + "dev": true, + "requires": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "dependencies": { + "lru-cache": { + "version": "10.2.2", + "dev": true + } + } + }, + "path-to-regexp": { + "version": "1.7.0", + "dev": true, + "requires": { + "isarray": "0.0.1" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "dev": true + } + } + }, + "path-type": { + "version": "4.0.0", + "dev": true + }, + "pbkdf2": { + "version": "3.1.2", + "dev": true, + "requires": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "peberminta": { + "version": "0.9.0", + "dev": true + }, + "picocolors": { + "version": "1.0.0" + }, + "picomatch": { + "version": "2.3.1", + "dev": true + }, + "pidtree": { + "version": "0.6.0", + "dev": true + }, + "pify": { + "version": "4.0.1", + "dev": true, + "optional": true + }, + "pino": { + "version": "7.11.0", + "dev": true, + "requires": { + "atomic-sleep": "^1.0.0", + "fast-redact": "^3.0.0", + "on-exit-leak-free": "^0.2.0", + "pino-abstract-transport": "v0.5.0", + "pino-std-serializers": "^4.0.0", + "process-warning": "^1.0.0", + "quick-format-unescaped": "^4.0.3", + "real-require": "^0.1.0", + "safe-stable-stringify": "^2.1.0", + "sonic-boom": "^2.2.1", + "thread-stream": "^0.15.1" + } + }, + "pino-abstract-transport": { + "version": "0.5.0", + "dev": true, + "requires": { + "duplexify": "^4.1.2", + "split2": "^4.0.0" + }, + "dependencies": { + "duplexify": { + "version": "4.1.3", + "dev": true, + "requires": { + "end-of-stream": "^1.4.1", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1", + "stream-shift": "^1.0.2" + } + }, + "readable-stream": { + "version": "3.6.2", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + }, + "split2": { + "version": "4.2.0", + "dev": true + } + } + }, + "pino-std-serializers": { + "version": "4.0.0", + "dev": true + }, + "pirates": { + "version": "4.0.6", + "dev": true + }, + "pluralize": { + "version": "8.0.0", + "dev": true + }, + "point-in-polygon": { + "version": "1.1.0", + "dev": true + }, + "possible-typed-array-names": { + "version": "1.0.0", + "dev": true + }, + "postcss": { + "version": "8.4.31", + "requires": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "dependencies": { + "nanoid": { + "version": "3.3.7" + } + } + }, + "postcss-attribute-case-insensitive": { + "version": "5.0.2", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.10" + } + }, + "postcss-clamp": { + "version": "4.1.0", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-color-functional-notation": { + "version": "4.2.4", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-color-hex-alpha": { + "version": "8.0.4", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-color-rebeccapurple": { + "version": "7.1.1", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-custom-media": { + "version": "8.0.2", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-custom-properties": { + "version": "12.1.11", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-custom-selectors": { + "version": "6.0.3", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.4" + } + }, + "postcss-dir-pseudo-class": { + "version": "6.0.5", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.10" + } + }, + "postcss-double-position-gradients": { + "version": "3.1.2", + "dev": true, + "requires": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-env-function": { + "version": "4.0.6", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-flexbugs-fixes": { + "version": "5.0.2", + "dev": true, + "requires": {} + }, + "postcss-focus-visible": { + "version": "6.0.4", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.9" + } + }, + "postcss-focus-within": { + "version": "5.0.4", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.9" + } + }, + "postcss-font-variant": { + "version": "5.0.0", + "dev": true, + "requires": {} + }, + "postcss-gap-properties": { + "version": "3.0.5", + "dev": true, + "requires": {} + }, + "postcss-image-set-function": { + "version": "4.0.7", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-initial": { + "version": "4.0.1", + "dev": true, + "requires": {} + }, + "postcss-lab-function": { + "version": "4.2.1", + "dev": true, + "requires": { + "@csstools/postcss-progressive-custom-properties": "^1.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-logical": { + "version": "5.0.4", + "dev": true, + "requires": {} + }, + "postcss-media-minmax": { + "version": "5.0.0", + "dev": true, + "requires": {} + }, + "postcss-media-query-parser": { + "version": "0.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/postcss-media-query-parser/download/postcss-media-query-parser-0.2.3.tgz", + "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", + "dev": true, + "peer": true + }, + "postcss-modules-extract-imports": { + "version": "3.1.0", + "dev": true, + "requires": {} + }, + "postcss-modules-local-by-default": { + "version": "4.0.5", + "dev": true, + "requires": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + } + }, + "postcss-modules-scope": { + "version": "3.2.0", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.4" + } + }, + "postcss-modules-values": { + "version": "4.0.0", + "dev": true, + "requires": { + "icss-utils": "^5.0.0" + } + }, + "postcss-nesting": { + "version": "10.2.0", + "dev": true, + "requires": { + "@csstools/selector-specificity": "^2.0.0", + "postcss-selector-parser": "^6.0.10" + } + }, + "postcss-opacity-percentage": { + "version": "1.1.3", + "dev": true, + "requires": {} + }, + "postcss-overflow-shorthand": { + "version": "3.0.4", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-page-break": { + "version": "3.0.4", + "dev": true, + "requires": {} + }, + "postcss-place": { + "version": "7.0.5", + "dev": true, + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-prefix-selector": { + "version": "1.16.0", + "dev": true, + "requires": {} + }, + "postcss-preset-env": { + "version": "7.5.0", + "dev": true, + "requires": { + "@csstools/postcss-color-function": "^1.1.0", + "@csstools/postcss-font-format-keywords": "^1.0.0", + "@csstools/postcss-hwb-function": "^1.0.0", + "@csstools/postcss-ic-unit": "^1.0.0", + "@csstools/postcss-is-pseudo-class": "^2.0.2", + "@csstools/postcss-normalize-display-values": "^1.0.0", + "@csstools/postcss-oklab-function": "^1.1.0", + "@csstools/postcss-progressive-custom-properties": "^1.3.0", + "@csstools/postcss-stepped-value-functions": "^1.0.0", + "@csstools/postcss-unset-value": "^1.0.0", + "autoprefixer": "^10.4.6", + "browserslist": "^4.20.3", + "css-blank-pseudo": "^3.0.3", + "css-has-pseudo": "^3.0.4", + "css-prefers-color-scheme": "^6.0.3", + "cssdb": "^6.6.1", + "postcss-attribute-case-insensitive": "^5.0.0", + "postcss-clamp": "^4.1.0", + "postcss-color-functional-notation": "^4.2.2", + "postcss-color-hex-alpha": "^8.0.3", + "postcss-color-rebeccapurple": "^7.0.2", + "postcss-custom-media": "^8.0.0", + "postcss-custom-properties": "^12.1.7", + "postcss-custom-selectors": "^6.0.0", + "postcss-dir-pseudo-class": "^6.0.4", + "postcss-double-position-gradients": "^3.1.1", + "postcss-env-function": "^4.0.6", + "postcss-focus-visible": "^6.0.4", + "postcss-focus-within": "^5.0.4", + "postcss-font-variant": "^5.0.0", + "postcss-gap-properties": "^3.0.3", + "postcss-image-set-function": "^4.0.6", + "postcss-initial": "^4.0.1", + "postcss-lab-function": "^4.2.0", + "postcss-logical": "^5.0.4", + "postcss-media-minmax": "^5.0.0", + "postcss-nesting": "^10.1.4", + "postcss-opacity-percentage": "^1.1.2", + "postcss-overflow-shorthand": "^3.0.3", + "postcss-page-break": "^3.0.4", + "postcss-place": "^7.0.4", + "postcss-pseudo-class-any-link": "^7.1.2", + "postcss-replace-overflow-wrap": "^4.0.0", + "postcss-selector-not": "^5.0.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-pseudo-class-any-link": { + "version": "7.1.6", + "dev": true, + "requires": { + "postcss-selector-parser": "^6.0.10" + } + }, + "postcss-replace-overflow-wrap": { + "version": "4.0.0", + "dev": true, + "requires": {} + }, + "postcss-resolve-nested-selector": { + "version": "0.1.1", + "resolved": "https://registry.npm.alibaba-inc.com/postcss-resolve-nested-selector/download/postcss-resolve-nested-selector-0.1.1.tgz", + "integrity": "sha1-Kcy8fDfe36wwTp//C/FZaz9qDk4=", + "dev": true, + "peer": true + }, + "postcss-safe-parser": { + "version": "6.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/postcss-safe-parser/download/postcss-safe-parser-6.0.0.tgz", + "integrity": "sha1-u0wpiUFxqUvFyZa5owMX70Aq2qE=", + "dev": true, + "peer": true, + "requires": {} + }, + "postcss-selector-not": { + "version": "5.0.0", + "dev": true, + "requires": { + "balanced-match": "^1.0.0" + } + }, + "postcss-selector-parser": { + "version": "6.0.16", + "dev": true, + "requires": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + } + }, + "postcss-syntax": { + "version": "0.36.2", + "dev": true, + "requires": {} + }, + "postcss-value-parser": { + "version": "4.2.0" + }, + "prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npm.alibaba-inc.com/prelude-ls/download/prelude-ls-1.2.1.tgz", + "integrity": "sha1-3rxkidem5rDnYRiIzsiAM30xY5Y=", + "dev": true, + "peer": true + }, + "prepend-http": { + "version": "1.0.4", + "dev": true + }, + "prettier": { + "version": "2.8.8", + "dev": true + }, + "prettier-plugin-organize-imports": { + "version": "3.2.4", + "dev": true, + "requires": {} + }, + "prettier-plugin-packagejson": { + "version": "2.4.3", + "dev": true, + "requires": { + "sort-package-json": "2.4.1", + "synckit": "0.8.5" + } + }, + "pretty-error": { + "version": "4.0.0", + "dev": true, + "requires": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "prism-react-renderer": { + "version": "1.3.5", + "dev": true, + "requires": {} + }, + "prism-themes": { + "version": "1.9.0", + "dev": true + }, + "prismjs": { + "version": "1.29.0", + "dev": true + }, + "process": { + "version": "0.11.10", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.1", + "dev": true + }, + "process-warning": { + "version": "1.0.0", + "dev": true + }, + "promise-inflight": { + "version": "1.0.1", + "dev": true + }, + "promise-retry": { + "version": "1.1.1", + "dev": true, + "requires": { + "err-code": "^1.0.0", + "retry": "^0.10.0" + } + }, + "prop-types": { + "version": "15.8.1", + "dev": true, + "requires": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + }, + "dependencies": { + "react-is": { + "version": "16.13.1", + "dev": true + } + } + }, + "property-information": { + "version": "6.5.0", + "dev": true + }, + "protoduck": { + "version": "4.0.0", + "dev": true, + "requires": { + "genfun": "^4.0.1" + } + }, + "prr": { + "version": "1.0.1", + "dev": true, + "optional": true + }, + "pseudomap": { + "version": "1.0.2", + "dev": true + }, + "public-encrypt": { + "version": "4.0.3", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + }, + "dependencies": { + "bn.js": { + "version": "4.12.0", + "dev": true + } + } + }, + "pump": { + "version": "1.0.3", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "pumpify": { + "version": "1.5.1", + "dev": true, + "requires": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + }, + "dependencies": { + "pump": { + "version": "2.0.1", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + } + } + }, + "punycode": { + "version": "1.4.1", + "dev": true + }, + "q": { + "version": "1.5.1", + "dev": true + }, + "qrcode.react": { + "version": "3.1.0", + "requires": {} + }, + "qs": { + "version": "6.12.1", + "dev": true, + "requires": { + "side-channel": "^1.0.6" + } + }, + "query-string": { + "version": "6.14.1", + "dev": true, + "requires": { + "decode-uri-component": "^0.2.0", + "filter-obj": "^1.1.0", + "split-on-first": "^1.0.0", + "strict-uri-encode": "^2.0.0" + } + }, + "querystring-es3": { + "version": "0.2.1", + "dev": true + }, + "queue": { + "version": "6.0.1", + "dev": true, + "requires": { + "inherits": "~2.0.3" + } + }, + "queue-microtask": { + "version": "1.2.3", + "dev": true + }, + "quick-format-unescaped": { + "version": "4.0.4", + "dev": true + }, + "quick-lru": { + "version": "4.0.1", + "dev": true + }, + "ramda": { + "version": "0.29.0", + "dev": true + }, + "randombytes": { + "version": "2.1.0", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "randomfill": { + "version": "1.0.4", + "dev": true, + "requires": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "raw-loader": { + "version": "4.0.2", + "dev": true, + "requires": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + } + }, + "rc": { + "version": "1.2.8", + "dev": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "rc-cascader": { + "version": "3.24.1", + "requires": { + "@babel/runtime": "^7.12.5", + "array-tree-filter": "^2.1.0", + "classnames": "^2.3.1", + "rc-select": "~14.13.0", + "rc-tree": "~5.8.1", + "rc-util": "^5.37.0" + } + }, + "rc-checkbox": { + "version": "3.2.0", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.25.2" + } + }, + "rc-collapse": { + "version": "3.7.3", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.3.4", + "rc-util": "^5.27.0" + } + }, + "rc-dialog": { + "version": "9.4.0", + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/portal": "^1.0.0-8", + "classnames": "^2.2.6", + "rc-motion": "^2.3.0", + "rc-util": "^5.21.0" + } + }, + "rc-drawer": { + "version": "7.1.0", + "requires": { + "@babel/runtime": "^7.23.9", + "@rc-component/portal": "^1.1.1", + "classnames": "^2.2.6", + "rc-motion": "^2.6.1", + "rc-util": "^5.38.1" + } + }, + "rc-dropdown": { + "version": "4.2.0", + "requires": { + "@babel/runtime": "^7.18.3", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-util": "^5.17.0" + } + }, + "rc-field-form": { + "version": "1.44.0", + "requires": { + "@babel/runtime": "^7.18.0", + "async-validator": "^4.1.0", + "rc-util": "^5.32.2" + } + }, + "rc-image": { + "version": "7.6.0", + "requires": { + "@babel/runtime": "^7.11.2", + "@rc-component/portal": "^1.0.2", + "classnames": "^2.2.6", + "rc-dialog": "~9.4.0", + "rc-motion": "^2.6.2", + "rc-util": "^5.34.1" + } + }, + "rc-input": { + "version": "1.4.5", + "requires": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + } + }, + "rc-input-number": { + "version": "9.0.0", + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/mini-decimal": "^1.0.1", + "classnames": "^2.2.5", + "rc-input": "~1.4.0", + "rc-util": "^5.28.0" + } + }, + "rc-mentions": { + "version": "2.11.1", + "requires": { + "@babel/runtime": "^7.22.5", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-input": "~1.4.0", + "rc-menu": "~9.13.0", + "rc-textarea": "~1.6.1", + "rc-util": "^5.34.1" + } + }, + "rc-menu": { + "version": "9.13.0", + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + } + }, + "rc-motion": { + "version": "2.9.0", + "requires": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.21.0" + } + }, + "rc-notification": { + "version": "5.4.0", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.9.0", + "rc-util": "^5.20.1" + } + }, + "rc-overflow": { + "version": "1.3.2", + "requires": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.37.0" + } + }, + "rc-pagination": { + "version": "4.0.4", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.38.0" + } + }, + "rc-picker": { + "version": "4.4.2", + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.1", + "rc-overflow": "^1.3.2", + "rc-resize-observer": "^1.4.0", + "rc-util": "^5.38.1" + } + }, + "rc-progress": { + "version": "4.0.0", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.6", + "rc-util": "^5.16.1" + } + }, + "rc-rate": { + "version": "2.12.0", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.0.1" + } + }, + "rc-resize-observer": { + "version": "1.4.0", + "requires": { + "@babel/runtime": "^7.20.7", + "classnames": "^2.2.1", + "rc-util": "^5.38.0", + "resize-observer-polyfill": "^1.5.1" + } + }, + "rc-segmented": { + "version": "2.3.0", + "requires": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-motion": "^2.4.4", + "rc-util": "^5.17.0" + } + }, + "rc-select": { + "version": "14.13.1", + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.1.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-overflow": "^1.3.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.2" + } + }, + "rc-slider": { + "version": "10.6.2", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.36.0" + } + }, + "rc-steps": { + "version": "6.0.1", + "requires": { + "@babel/runtime": "^7.16.7", + "classnames": "^2.2.3", + "rc-util": "^5.16.1" + } + }, + "rc-switch": { + "version": "4.1.0", + "requires": { + "@babel/runtime": "^7.21.0", + "classnames": "^2.2.1", + "rc-util": "^5.30.0" + } + }, + "rc-table": { + "version": "7.45.4", + "requires": { + "@babel/runtime": "^7.10.1", + "@rc-component/context": "^1.4.0", + "classnames": "^2.2.5", + "rc-resize-observer": "^1.1.0", + "rc-util": "^5.37.0", + "rc-virtual-list": "^3.11.1" + } + }, + "rc-tabs": { + "version": "14.1.1", + "requires": { + "@babel/runtime": "^7.11.2", + "classnames": "2.x", + "rc-dropdown": "~4.2.0", + "rc-menu": "~9.13.0", + "rc-motion": "^2.6.2", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.34.1" + } + }, + "rc-textarea": { + "version": "1.6.3", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.1", + "rc-input": "~1.4.0", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.27.0" + } + }, + "rc-tooltip": { + "version": "6.2.0", + "requires": { + "@babel/runtime": "^7.11.2", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.1" + } + }, + "rc-tree": { + "version": "5.8.5", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.1" + } + }, + "rc-tree-select": { + "version": "5.19.0", + "requires": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-select": "~14.13.0", + "rc-tree": "~5.8.1", + "rc-util": "^5.16.1" + } + }, + "rc-upload": { + "version": "4.5.2", + "requires": { + "@babel/runtime": "^7.18.3", + "classnames": "^2.2.5", + "rc-util": "^5.2.0" + } + }, + "rc-util": { + "version": "5.39.1", + "requires": { + "@babel/runtime": "^7.18.3", + "react-is": "^18.2.0" + } + }, + "rc-virtual-list": { + "version": "3.11.5", + "requires": { + "@babel/runtime": "^7.20.0", + "classnames": "^2.2.6", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.36.0" + } + }, + "react": { + "version": "18.1.0", + "requires": { + "loose-envify": "^1.1.0" + } + }, + "react-copy-to-clipboard": { + "version": "5.1.0", + "dev": true, + "requires": { + "copy-to-clipboard": "^3.3.1", + "prop-types": "^15.8.1" + } + }, + "react-dom": { + "version": "18.1.0", + "requires": { + "loose-envify": "^1.1.0", + "scheduler": "^0.22.0" + } + }, + "react-error-boundary": { + "version": "4.0.13", + "dev": true, + "requires": { + "@babel/runtime": "^7.12.5" + } + }, + "react-error-overlay": { + "version": "6.0.9", + "dev": true + }, + "react-fast-compare": { + "version": "3.2.2", + "dev": true + }, + "react-helmet-async": { + "version": "1.3.0", + "dev": true, + "requires": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + } + }, + "react-intl": { + "version": "6.6.5", + "dev": true, + "requires": { + "@formatjs/ecma402-abstract": "1.18.2", + "@formatjs/icu-messageformat-parser": "2.7.6", + "@formatjs/intl": "2.10.1", + "@formatjs/intl-displaynames": "6.6.6", + "@formatjs/intl-listformat": "7.5.5", + "@types/hoist-non-react-statics": "^3.3.1", + "@types/react": "16 || 17 || 18", + "hoist-non-react-statics": "^3.3.2", + "intl-messageformat": "10.5.11", + "tslib": "^2.4.0" + } + }, + "react-is": { + "version": "18.3.1" + }, + "react-loading-skeleton": { + "version": "3.4.0", + "dev": true, + "requires": {} + }, + "react-merge-refs": { + "version": "1.1.0", + "dev": true + }, + "react-refresh": { + "version": "0.14.0", + "dev": true + }, + "react-router": { + "version": "6.3.0", + "dev": true, + "requires": { + "history": "^5.2.0" + } + }, + "react-router-dom": { + "version": "6.3.0", + "dev": true, + "requires": { + "history": "^5.2.0", + "react-router": "6.3.0" + } + }, + "react-simple-code-editor": { + "version": "0.13.1", + "dev": true, + "requires": {} + }, + "react-slick": { + "version": "0.30.2", + "resolved": "https://registry.npm.alibaba-inc.com/react-slick/download/react-slick-0.30.2.tgz", + "integrity": "sha512-XvQJi7mRHuiU3b9irsqS9SGIgftIfdV5/tNcURTb5LdIokRA5kIIx3l4rlq2XYHfxcSntXapoRg/GxaVOM1yfg==", + "requires": { + "classnames": "^2.2.5", + "enquire.js": "^2.1.6", + "json2mq": "^0.2.0", + "lodash.debounce": "^4.0.8", + "resize-observer-polyfill": "^1.5.0" + } + }, + "read-pkg": { + "version": "5.2.0", + "dev": true, + "requires": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "dependencies": { + "hosted-git-info": { + "version": "2.8.9", + "dev": true + }, + "normalize-package-data": { + "version": "2.5.0", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "resolve": { + "version": "1.22.8", + "dev": true, + "requires": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "semver": { + "version": "5.7.2", + "dev": true + }, + "type-fest": { + "version": "0.6.0", + "dev": true + } + } + }, + "read-pkg-up": { + "version": "7.0.1", + "dev": true, + "requires": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "dependencies": { + "find-up": { + "version": "4.1.0", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "type-fest": { + "version": "0.8.1", + "dev": true + } + } + }, + "readable-stream": { + "version": "2.3.8", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + }, + "dependencies": { + "safe-buffer": { + "version": "5.1.2", + "dev": true + } + } + }, + "readdirp": { + "version": "3.6.0", + "dev": true, + "requires": { + "picomatch": "^2.2.1" + } + }, + "real-require": { + "version": "0.1.0", + "dev": true + }, + "redent": { + "version": "3.0.0", + "dev": true, + "requires": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + } + }, + "reflect.getprototypeof": { + "version": "1.0.6", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.1", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + } + }, + "regenerate": { + "version": "1.4.2", + "dev": true + }, + "regenerate-unicode-properties": { + "version": "10.1.1", + "dev": true, + "requires": { + "regenerate": "^1.4.2" + } + }, + "regenerator-runtime": { + "version": "0.14.1" + }, + "regexp.prototype.flags": { + "version": "1.5.2", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" + } + }, + "registry-auth-token": { + "version": "3.4.0", + "dev": true, + "requires": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "registry-url": { + "version": "3.1.0", + "dev": true, + "requires": { + "rc": "^1.0.1" + } + }, + "rehype-autolink-headings": { + "version": "6.1.1", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "extend": "^3.0.0", + "hast-util-has-property": "^2.0.0", + "hast-util-heading-rank": "^2.0.0", + "hast-util-is-element": "^2.0.0", + "unified": "^10.0.0", + "unist-util-visit": "^4.0.0" + } + }, + "rehype-remove-comments": { + "version": "5.0.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "hast-util-is-conditional-comment": "^2.0.0", + "unified": "^10.0.0", + "unist-util-filter": "^4.0.0" + } + }, + "rehype-stringify": { + "version": "9.0.4", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "hast-util-to-html": "^8.0.0", + "unified": "^10.0.0" + } + }, + "relateurl": { + "version": "0.2.7", + "dev": true + }, + "remark-directive": { + "version": "2.0.1", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-directive": "^2.0.0", + "micromark-extension-directive": "^2.0.0", + "unified": "^10.0.0" + } + }, + "remark-frontmatter": { + "version": "4.0.1", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-frontmatter": "^1.0.0", + "micromark-extension-frontmatter": "^1.0.0", + "unified": "^10.0.0" + } + }, + "remark-gfm": { + "version": "3.0.1", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-gfm": "^2.0.0", + "micromark-extension-gfm": "^2.0.0", + "unified": "^10.0.0" + } + }, + "remark-parse": { + "version": "10.0.2", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "unified": "^10.0.0" + } + }, + "remark-rehype": { + "version": "10.1.0", + "dev": true, + "requires": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-to-hast": "^12.1.0", + "unified": "^10.0.0" + } + }, + "remote-origin-url": { + "version": "0.5.3", + "dev": true, + "requires": { + "parse-git-config": "^1.1.1" + } + }, + "rename-keys": { + "version": "1.2.0", + "dev": true + }, + "renderkid": { + "version": "3.0.0", + "dev": true, + "requires": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "dev": true + }, + "css-select": { + "version": "4.3.0", + "dev": true, + "requires": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + } + }, + "css-what": { + "version": "6.1.0", + "dev": true + }, + "dom-serializer": { + "version": "1.4.1", + "dev": true, + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + } + }, + "domhandler": { + "version": "4.3.1", + "dev": true, + "requires": { + "domelementtype": "^2.2.0" + } + }, + "domutils": { + "version": "2.8.0", + "dev": true, + "requires": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + } + }, + "entities": { + "version": "2.2.0", + "dev": true + }, + "htmlparser2": { + "version": "6.1.0", + "dev": true, + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "nth-check": { + "version": "2.1.1", + "dev": true, + "requires": { + "boolbase": "^1.0.0" + } + }, + "strip-ansi": { + "version": "6.0.1", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + } + } + }, + "require-directory": { + "version": "2.1.1", + "dev": true + }, + "require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/require-from-string/download/require-from-string-2.0.2.tgz", + "integrity": "sha1-iaf92TgmEmcxjq/hT5wy5ZjDaQk=", + "dev": true + }, + "resize-observer-polyfill": { + "version": "1.5.1" + }, + "resolve": { + "version": "2.0.0-next.5", + "dev": true, + "requires": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "resolve-dir": { + "version": "0.1.1", + "dev": true, + "requires": { + "expand-tilde": "^1.2.2", + "global-modules": "^0.2.3" + } + }, + "resolve-from": { + "version": "5.0.0", + "dev": true + }, + "resolve-global": { + "version": "1.0.0", + "dev": true, + "requires": { + "global-dirs": "^0.1.1" + } + }, + "resolve-pkg-maps": { + "version": "1.0.0", + "dev": true + }, + "restore-cursor": { + "version": "2.0.0", + "dev": true, + "requires": { + "onetime": "^2.0.0", + "signal-exit": "^3.0.2" + }, + "dependencies": { + "mimic-fn": { + "version": "1.2.0", + "dev": true + }, + "onetime": { + "version": "2.0.1", + "dev": true, + "requires": { + "mimic-fn": "^1.0.0" + } + } + } + }, + "retry": { + "version": "0.10.1", + "dev": true + }, + "reusify": { + "version": "1.0.4", + "dev": true + }, + "rfdc": { + "version": "1.3.1", + "dev": true + }, + "rimraf": { + "version": "2.7.1", + "dev": true, + "requires": { + "glob": "^7.1.3" + } + }, + "ripemd160": { + "version": "2.0.2", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "rollup": { + "version": "3.29.4", + "dev": true, + "requires": { + "fsevents": "~2.3.2" + } + }, + "rollup-plugin-visualizer": { + "version": "5.9.0", + "dev": true, + "requires": { + "open": "^8.4.0", + "picomatch": "^2.3.1", + "source-map": "^0.7.4", + "yargs": "^17.5.1" + }, + "dependencies": { + "define-lazy-prop": { + "version": "2.0.0", + "dev": true + }, + "is-docker": { + "version": "2.2.1", + "dev": true + }, + "is-wsl": { + "version": "2.2.0", + "dev": true, + "requires": { + "is-docker": "^2.0.0" + } + }, + "open": { + "version": "8.4.2", + "dev": true, + "requires": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + } + } + } + }, + "run-applescript": { + "version": "5.0.0", + "dev": true, + "requires": { + "execa": "^5.0.0" + } + }, + "run-async": { + "version": "2.4.1", + "dev": true + }, + "run-parallel": { + "version": "1.2.0", + "dev": true, + "requires": { + "queue-microtask": "^1.2.2" + } + }, + "run-queue": { + "version": "1.0.3", + "dev": true, + "requires": { + "aproba": "^1.1.1" + } + }, + "rxjs": { + "version": "6.6.7", + "dev": true, + "requires": { + "tslib": "^1.9.0" + }, + "dependencies": { + "tslib": { + "version": "1.14.1", + "dev": true + } + } + }, + "sade": { + "version": "1.8.1", + "dev": true, + "requires": { + "mri": "^1.1.0" + } + }, + "safe-array-concat": { + "version": "1.1.2", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "dev": true + } + } + }, + "safe-buffer": { + "version": "5.2.1", + "dev": true + }, + "safe-regex-test": { + "version": "1.0.3", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-regex": "^1.1.4" + } + }, + "safe-stable-stringify": { + "version": "2.4.3", + "dev": true + }, + "safer-buffer": { + "version": "2.1.2", + "dev": true + }, + "sass": { + "version": "1.75.0", + "dev": true, + "requires": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + } + }, + "sax": { + "version": "1.3.0", + "dev": true + }, + "scheduler": { + "version": "0.22.0", + "requires": { + "loose-envify": "^1.1.0" + } + }, + "schema-utils": { + "version": "3.3.0", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + } + }, + "scroll-into-view-if-needed": { + "version": "3.1.0", + "requires": { + "compute-scroll-into-view": "^3.0.2" + } + }, + "selderee": { + "version": "0.11.0", + "dev": true, + "requires": { + "parseley": "^0.12.0" + } + }, + "select-hose": { + "version": "2.0.0", + "dev": true + }, + "semver": { + "version": "7.5.4", + "dev": true, + "requires": { + "lru-cache": "^6.0.0" + }, + "dependencies": { + "lru-cache": { + "version": "6.0.0", + "dev": true, + "requires": { + "yallist": "^4.0.0" + } + }, + "yallist": { + "version": "4.0.0", + "dev": true + } + } + }, + "semver-diff": { + "version": "2.1.0", + "dev": true, + "requires": { + "semver": "^5.0.3" + }, + "dependencies": { + "semver": { + "version": "5.7.2", + "dev": true + } + } + }, + "serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npm.alibaba-inc.com/serialize-javascript/download/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "peer": true, + "requires": { + "randombytes": "^2.1.0" + } + }, + "set-function-length": { + "version": "1.2.2", + "dev": true, + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + } + }, + "set-function-name": { + "version": "2.0.2", + "dev": true, + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + } + }, + "setimmediate": { + "version": "1.0.5", + "dev": true + }, + "sha.js": { + "version": "2.4.11", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "shallowequal": { + "version": "1.1.0" + }, + "shebang-command": { + "version": "2.0.0", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "dev": true + }, + "shortid": { + "version": "2.2.16", + "dev": true, + "requires": { + "nanoid": "^2.1.0" + } + }, + "side-channel": { + "version": "1.0.6", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + } + }, + "signal-exit": { + "version": "3.0.7", + "dev": true + }, + "simple-swizzle": { + "version": "0.2.2", + "dev": true, + "requires": { + "is-arrayish": "^0.3.1" + }, + "dependencies": { + "is-arrayish": { + "version": "0.3.2", + "dev": true + } + } + }, + "sitemap": { + "version": "7.1.1", + "dev": true, + "requires": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "dependencies": { + "@types/node": { + "version": "17.0.45", + "dev": true + } + } + }, + "slash": { + "version": "3.0.0", + "dev": true + }, + "slice-ansi": { + "version": "5.0.0", + "dev": true, + "requires": { + "ansi-styles": "^6.0.0", + "is-fullwidth-code-point": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "6.2.1", + "dev": true + } + } + }, + "slick-carousel": { + "version": "1.8.1", + "resolved": "https://registry.npm.alibaba-inc.com/slick-carousel/download/slick-carousel-1.8.1.tgz", + "integrity": "sha1-pL+ykBSIe7Zs5Si5C9DNomLMj40=", + "requires": {} + }, + "smart-buffer": { + "version": "1.1.15", + "dev": true + }, + "socks": { + "version": "1.1.10", + "dev": true, + "requires": { + "ip": "^1.1.4", + "smart-buffer": "^1.0.13" + } + }, + "socks-proxy-agent": { + "version": "3.0.1", + "dev": true, + "requires": { + "agent-base": "^4.1.0", + "socks": "^1.1.10" + } + }, + "sonic-boom": { + "version": "2.8.0", + "dev": true, + "requires": { + "atomic-sleep": "^1.0.0" + } + }, + "sort-object-keys": { + "version": "1.1.3", + "dev": true + }, + "sort-package-json": { + "version": "2.4.1", + "dev": true, + "requires": { + "detect-indent": "^7.0.1", + "detect-newline": "^4.0.0", + "git-hooks-list": "^3.0.0", + "globby": "^13.1.2", + "is-plain-obj": "^4.1.0", + "sort-object-keys": "^1.1.3" + }, + "dependencies": { + "fast-glob": { + "version": "3.3.2", + "dev": true, + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + } + }, + "globby": { + "version": "13.2.2", + "dev": true, + "requires": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + } + }, + "is-plain-obj": { + "version": "4.1.0", + "dev": true + }, + "slash": { + "version": "4.0.0", + "dev": true + } + } + }, + "source-map": { + "version": "0.7.4", + "dev": true + }, + "source-map-js": { + "version": "1.2.0" + }, + "source-map-resolve": { + "version": "0.6.0", + "dev": true, + "requires": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0" + } + }, + "source-map-support": { + "version": "0.5.21", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "dev": true + } + } + }, + "space-separated-tokens": { + "version": "2.0.2", + "dev": true + }, + "spdx-correct": { + "version": "3.2.0", + "dev": true, + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.5.0", + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.1", + "dev": true, + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.17", + "dev": true + }, + "spdy": { + "version": "4.0.2", + "dev": true, + "requires": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + } + } + }, + "spdy-transport": { + "version": "3.0.0", + "dev": true, + "requires": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + }, + "dependencies": { + "debug": { + "version": "4.3.4", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "dev": true + }, + "readable-stream": { + "version": "3.6.2", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "split-on-first": { + "version": "1.1.0", + "dev": true + }, + "split2": { + "version": "3.2.2", + "dev": true, + "requires": { + "readable-stream": "^3.0.0" + }, + "dependencies": { + "readable-stream": { + "version": "3.6.2", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "sprintf-js": { + "version": "1.0.3", + "dev": true + }, + "ssri": { + "version": "4.1.6", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "stable": { + "version": "0.1.8", + "dev": true + }, + "stackframe": { + "version": "1.3.4", + "dev": true + }, + "stop-iteration-iterator": { + "version": "1.0.0", + "dev": true, + "requires": { + "internal-slot": "^1.0.4" + } + }, + "stream-browserify": { + "version": "2.0.2", + "dev": true, + "requires": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "stream-each": { + "version": "1.2.3", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "stream-http": { + "version": "2.8.3", + "dev": true, + "requires": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.3.6", + "to-arraybuffer": "^1.0.0", + "xtend": "^4.0.0" + } + }, + "stream-shift": { + "version": "1.0.3", + "dev": true + }, + "strict-uri-encode": { + "version": "2.0.0", + "dev": true + }, + "string_decoder": { + "version": "1.1.1", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + }, + "dependencies": { + "safe-buffer": { + "version": "5.1.2", + "dev": true + } + } + }, + "string-argv": { + "version": "0.3.2", + "dev": true + }, + "string-convert": { + "version": "0.2.1" + }, + "string-width": { + "version": "2.1.1", + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.1", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "dev": true + }, + "strip-ansi": { + "version": "4.0.0", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "string-width-cjs": { + "version": "npm:string-width@4.2.3", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true + }, + "strip-ansi": { + "version": "6.0.1", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + } + } + }, + "string.prototype.matchall": { + "version": "4.0.11", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "regexp.prototype.flags": "^1.5.2", + "set-function-name": "^2.0.2", + "side-channel": "^1.0.6" + } + }, + "string.prototype.trim": { + "version": "1.2.9", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" + } + }, + "string.prototype.trimend": { + "version": "1.0.8", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + } + }, + "string.prototype.trimstart": { + "version": "1.0.8", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + } + }, + "stringify-entities": { + "version": "4.0.4", + "dev": true, + "requires": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "strip-ansi-cjs": { + "version": "npm:strip-ansi@6.0.1", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "dev": true + } + } + }, + "strip-eof": { + "version": "1.0.0", + "dev": true + }, + "strip-final-newline": { + "version": "2.0.0", + "dev": true + }, + "strip-indent": { + "version": "3.0.0", + "dev": true, + "requires": { + "min-indent": "^1.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "dev": true + }, + "style-search": { + "version": "0.1.0", + "resolved": "https://registry.npm.alibaba-inc.com/style-search/download/style-search-0.1.0.tgz", + "integrity": "sha1-eVjHk+R+MuB9K1yv5cC/jhLneQI=", + "dev": true, + "peer": true + }, + "style-to-object": { + "version": "0.4.4", + "dev": true, + "requires": { + "inline-style-parser": "0.1.1" + } + }, + "styled-components": { + "version": "6.1.8", + "requires": { + "@emotion/is-prop-valid": "1.2.1", + "@emotion/unitless": "0.8.0", + "@types/stylis": "4.2.0", + "css-to-react-native": "3.2.0", + "csstype": "3.1.2", + "postcss": "8.4.31", + "shallowequal": "1.1.0", + "stylis": "4.3.1", + "tslib": "2.5.0" + }, + "dependencies": { + "@emotion/unitless": { + "version": "0.8.0" + }, + "csstype": { + "version": "3.1.2" + }, + "stylis": { + "version": "4.3.1" + }, + "tslib": { + "version": "2.5.0" + } + } + }, + "stylelint": { + "version": "14.16.1", + "resolved": "https://registry.npm.alibaba-inc.com/stylelint/download/stylelint-14.16.1.tgz", + "integrity": "sha512-ErlzR/T3hhbV+a925/gbfc3f3Fep9/bnspMiJPorfGEmcBbXdS+oo6LrVtoUZ/w9fqD6o6k7PtUlCOsCRdjX/A==", + "dev": true, + "peer": true, + "requires": { + "@csstools/selector-specificity": "^2.0.2", + "balanced-match": "^2.0.0", + "colord": "^2.9.3", + "cosmiconfig": "^7.1.0", + "css-functions-list": "^3.1.0", + "debug": "^4.3.4", + "fast-glob": "^3.2.12", + "fastest-levenshtein": "^1.0.16", + "file-entry-cache": "^6.0.1", + "global-modules": "^2.0.0", + "globby": "^11.1.0", + "globjoin": "^0.1.4", + "html-tags": "^3.2.0", + "ignore": "^5.2.1", + "import-lazy": "^4.0.0", + "imurmurhash": "^0.1.4", + "is-plain-object": "^5.0.0", + "known-css-properties": "^0.26.0", + "mathml-tag-names": "^2.1.3", + "meow": "^9.0.0", + "micromatch": "^4.0.5", + "normalize-path": "^3.0.0", + "picocolors": "^1.0.0", + "postcss": "^8.4.19", + "postcss-media-query-parser": "^0.2.3", + "postcss-resolve-nested-selector": "^0.1.1", + "postcss-safe-parser": "^6.0.0", + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0", + "resolve-from": "^5.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "style-search": "^0.1.0", + "supports-hyperlinks": "^2.3.0", + "svg-tags": "^1.0.0", + "table": "^6.8.1", + "v8-compile-cache": "^2.3.0", + "write-file-atomic": "^4.0.2" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/ansi-regex/download/ansi-regex-5.0.1.tgz", + "integrity": "sha1-CCyyyJyf6GWaMRpTvWpNxTAdswQ=", + "dev": true, + "peer": true + }, + "balanced-match": { + "version": "2.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/balanced-match/download/balanced-match-2.0.0.tgz", + "integrity": "sha1-3HD5INeNuLhYU1eVhnv0j4IGM9k=", + "dev": true, + "peer": true + }, + "cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npm.alibaba-inc.com/cosmiconfig/download/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dev": true, + "peer": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + } + }, + "debug": { + "version": "4.3.4", + "resolved": "https://registry.npm.alibaba-inc.com/debug/download/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "peer": true, + "requires": { + "ms": "2.1.2" + } + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/emoji-regex/download/emoji-regex-8.0.0.tgz", + "integrity": "sha1-6Bj9ac5cz8tARZT4QpY79TFkzDc=", + "dev": true, + "peer": true + }, + "global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/global-modules/download/global-modules-2.0.0.tgz", + "integrity": "sha1-mXYFrSNF8n9RU5vqJldEISFcd4A=", + "dev": true, + "peer": true, + "requires": { + "global-prefix": "^3.0.0" + } + }, + "global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/global-prefix/download/global-prefix-3.0.0.tgz", + "integrity": "sha1-/IX3MGTfafUEIfR/iD/luRO6m5c=", + "dev": true, + "peer": true, + "requires": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + } + }, + "import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/import-lazy/download/import-lazy-4.0.0.tgz", + "integrity": "sha1-6OtidIOgpD2jwD8+NVSL5csMwVM=", + "dev": true, + "peer": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/is-fullwidth-code-point/download/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha1-8Rb4Bk/pCz94RKOJl8C3UFEmnx0=", + "dev": true, + "peer": true + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/is-plain-object/download/is-plain-object-5.0.0.tgz", + "integrity": "sha1-RCf1CrNCnpAl6n1S6QQ6nvQVk0Q=", + "dev": true, + "peer": true + }, + "meow": { + "version": "9.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/meow/download/meow-9.0.0.tgz", + "integrity": "sha1-zZUQvFysne59A8c+4fmtlZ9Oo2Q=", + "dev": true, + "peer": true, + "requires": { + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize": "^1.2.0", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^3.0.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.18.0", + "yargs-parser": "^20.2.3" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npm.alibaba-inc.com/ms/download/ms-2.1.2.tgz", + "integrity": "sha1-0J0fNXtEP0kzgqjrPM0YOHKuYAk=", + "dev": true, + "peer": true + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/string-width/download/string-width-4.2.3.tgz", + "integrity": "sha1-JpxxF9J7Ba0uU2gwqOyJXvnG0BA=", + "dev": true, + "peer": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-ansi/download/strip-ansi-6.0.1.tgz", + "integrity": "sha1-nibGPTD1NEPpSJSVshBdN7Z6hdk=", + "dev": true, + "peer": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npm.alibaba-inc.com/which/download/which-1.3.1.tgz", + "integrity": "sha1-pFBD1U9YBTFtqNYvn1CRjT2nCwo=", + "dev": true, + "peer": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "yaml": { + "version": "1.10.2", + "resolved": "https://registry.npm.alibaba-inc.com/yaml/download/yaml-1.10.2.tgz", + "integrity": "sha1-IwHF/78StGfejaIzOkWeKeeSDks=", + "dev": true, + "peer": true + } + } + }, + "stylelint-config-recommended": { + "version": "7.0.0", + "dev": true, + "requires": {} + }, + "stylelint-config-standard": { + "version": "25.0.0", + "dev": true, + "requires": { + "stylelint-config-recommended": "^7.0.0" + } + }, + "stylis": { + "version": "4.3.2" + }, + "sucrase": { + "version": "3.35.0", + "dev": true, + "requires": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "dependencies": { + "brace-expansion": { + "version": "2.0.1", + "dev": true, + "requires": { + "balanced-match": "^1.0.0" + } + }, + "commander": { + "version": "4.1.1", + "dev": true + }, + "glob": { + "version": "10.3.12", + "dev": true, + "requires": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.6", + "minimatch": "^9.0.1", + "minipass": "^7.0.4", + "path-scurry": "^1.10.2" + } + }, + "minimatch": { + "version": "9.0.4", + "dev": true, + "requires": { + "brace-expansion": "^2.0.1" + } + } + } + }, + "supports-color": { + "version": "7.2.0", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "supports-hyperlinks": { + "version": "2.3.0", + "resolved": "https://registry.npm.alibaba-inc.com/supports-hyperlinks/download/supports-hyperlinks-2.3.0.tgz", + "integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==", + "dev": true, + "peer": true, + "requires": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + } + }, + "supports-preserve-symlinks-flag": { + "version": "1.0.0", + "dev": true + }, + "svg-parser": { + "version": "2.0.4", + "dev": true + }, + "svg-pathdata": { + "version": "5.0.5", + "dev": true + }, + "svg-tags": { + "version": "1.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/svg-tags/download/svg-tags-1.0.0.tgz", + "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=", + "dev": true, + "peer": true + }, + "svgo": { + "version": "2.8.0", + "dev": true, + "requires": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "dependencies": { + "commander": { + "version": "7.2.0", + "dev": true + }, + "css-select": { + "version": "4.3.0", + "dev": true, + "requires": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + } + }, + "css-tree": { + "version": "1.1.3", + "dev": true, + "requires": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + } + }, + "css-what": { + "version": "6.1.0", + "dev": true + }, + "dom-serializer": { + "version": "1.4.1", + "dev": true, + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + } + }, + "domhandler": { + "version": "4.3.1", + "dev": true, + "requires": { + "domelementtype": "^2.2.0" + } + }, + "domutils": { + "version": "2.8.0", + "dev": true, + "requires": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + } + }, + "entities": { + "version": "2.2.0", + "dev": true + }, + "mdn-data": { + "version": "2.0.14", + "dev": true + }, + "nth-check": { + "version": "2.1.1", + "dev": true, + "requires": { + "boolbase": "^1.0.0" + } + }, + "source-map": { + "version": "0.6.1", + "dev": true + } + } + }, + "svgo-browser": { + "version": "1.3.8", + "dev": true, + "requires": { + "chalk": "^2.4.1", + "coa": "^2.0.2", + "css-select": "^2.0.0", + "css-select-base-adapter": "^0.1.1", + "css-tree": "1.0.0-alpha.37", + "csso": "^4.0.2", + "js-yaml": "^3.13.1", + "mkdirp": "~0.5.1", + "sax": "~1.2.4", + "stable": "^0.1.8", + "unquote": "~1.1.1", + "util.promisify": "~1.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "argparse": { + "version": "1.0.10", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "js-yaml": { + "version": "3.14.1", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "sax": { + "version": "1.2.4", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "svgson": { + "version": "4.1.0", + "dev": true, + "requires": { + "deep-rename-keys": "^0.2.1", + "omit-deep": "0.3.0", + "xml-reader": "2.4.3" + } + }, + "synckit": { + "version": "0.8.5", + "dev": true, + "requires": { + "@pkgr/utils": "^2.3.1", + "tslib": "^2.5.0" + } + }, + "systemjs": { + "version": "6.15.1", + "dev": true + }, + "table": { + "version": "6.8.2", + "resolved": "https://registry.npm.alibaba-inc.com/table/download/table-6.8.2.tgz", + "integrity": "sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==", + "dev": true, + "peer": true, + "requires": { + "ajv": "^8.0.1", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npm.alibaba-inc.com/ajv/download/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "peer": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/ansi-regex/download/ansi-regex-5.0.1.tgz", + "integrity": "sha1-CCyyyJyf6GWaMRpTvWpNxTAdswQ=", + "dev": true, + "peer": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/emoji-regex/download/emoji-regex-8.0.0.tgz", + "integrity": "sha1-6Bj9ac5cz8tARZT4QpY79TFkzDc=", + "dev": true, + "peer": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/is-fullwidth-code-point/download/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha1-8Rb4Bk/pCz94RKOJl8C3UFEmnx0=", + "dev": true, + "peer": true + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/json-schema-traverse/download/json-schema-traverse-1.0.0.tgz", + "integrity": "sha1-rnvLNlard6c7pcSb9lTzjmtoYOI=", + "dev": true, + "peer": true + }, + "slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npm.alibaba-inc.com/slice-ansi/download/slice-ansi-4.0.0.tgz", + "integrity": "sha1-UA6N0P1VsFgVCGJVsxla3ypF/ms=", + "dev": true, + "peer": true, + "requires": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + } + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/string-width/download/string-width-4.2.3.tgz", + "integrity": "sha1-JpxxF9J7Ba0uU2gwqOyJXvnG0BA=", + "dev": true, + "peer": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npm.alibaba-inc.com/strip-ansi/download/strip-ansi-6.0.1.tgz", + "integrity": "sha1-nibGPTD1NEPpSJSVshBdN7Z6hdk=", + "dev": true, + "peer": true, + "requires": { + "ansi-regex": "^5.0.1" + } + } + } + }, + "tapable": { + "version": "2.2.1", + "dev": true + }, + "tar-fs": { + "version": "1.16.3", + "dev": true, + "requires": { + "chownr": "^1.0.1", + "mkdirp": "^0.5.1", + "pump": "^1.0.0", + "tar-stream": "^1.1.2" + } + }, + "tar-stream": { + "version": "1.6.2", + "dev": true, + "requires": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + } + }, + "term-size": { + "version": "1.2.0", + "dev": true, + "requires": { + "execa": "^0.7.0" + }, + "dependencies": { + "cross-spawn": { + "version": "5.1.0", + "dev": true, + "requires": { + "lru-cache": "^4.0.1", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "execa": { + "version": "0.7.0", + "dev": true, + "requires": { + "cross-spawn": "^5.0.1", + "get-stream": "^3.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + }, + "get-stream": { + "version": "3.0.0", + "dev": true + }, + "is-stream": { + "version": "1.1.0", + "dev": true + }, + "lru-cache": { + "version": "4.1.5", + "dev": true, + "requires": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "npm-run-path": { + "version": "2.0.2", + "dev": true, + "requires": { + "path-key": "^2.0.0" + } + }, + "path-key": { + "version": "2.0.1", + "dev": true + }, + "shebang-command": { + "version": "1.2.0", + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "dev": true + }, + "which": { + "version": "1.3.1", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "yallist": { + "version": "2.1.2", + "dev": true + } + } + }, + "terser": { + "version": "5.31.0", + "dev": true, + "requires": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + } + }, + "terser-webpack-plugin": { + "version": "5.3.10", + "resolved": "https://registry.npm.alibaba-inc.com/terser-webpack-plugin/download/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "dev": true, + "peer": true, + "requires": { + "@jridgewell/trace-mapping": "^0.3.20", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.26.0" + }, + "dependencies": { + "jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npm.alibaba-inc.com/jest-worker/download/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dev": true, + "peer": true, + "requires": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + } + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npm.alibaba-inc.com/supports-color/download/supports-color-8.1.1.tgz", + "integrity": "sha1-zW/BfihQDP9WwbhsCn/UpUpzAFw=", + "dev": true, + "peer": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "test-exclude": { + "version": "6.0.0", + "dev": true, + "requires": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + } + }, + "text-extensions": { + "version": "1.9.0", + "dev": true + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npm.alibaba-inc.com/text-table/download/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true, + "peer": true + }, + "textextensions": { + "version": "2.6.0", + "dev": true + }, + "thenify": { + "version": "3.3.1", + "dev": true, + "requires": { + "any-promise": "^1.0.0" + } + }, + "thenify-all": { + "version": "1.6.0", + "dev": true, + "requires": { + "thenify": ">= 3.1.0 < 4" + } + }, + "thread-stream": { + "version": "0.15.2", + "dev": true, + "requires": { + "real-require": "^0.1.0" + } + }, + "throttle-debounce": { + "version": "5.0.0" + }, + "through": { + "version": "2.3.8", + "dev": true + }, + "through2": { + "version": "4.0.2", + "dev": true, + "requires": { + "readable-stream": "3" + }, + "dependencies": { + "readable-stream": { + "version": "3.6.2", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "timed-out": { + "version": "4.0.1", + "dev": true + }, + "timers-browserify": { + "version": "2.0.12", + "dev": true, + "requires": { + "setimmediate": "^1.0.4" + } + }, + "titleize": { + "version": "3.0.0", + "dev": true + }, + "tmp": { + "version": "0.0.33", + "dev": true, + "requires": { + "os-tmpdir": "~1.0.2" + } + }, + "tmpl": { + "version": "1.0.5", + "dev": true + }, + "to-arraybuffer": { + "version": "1.0.1", + "dev": true + }, + "to-buffer": { + "version": "1.1.1", + "dev": true + }, + "to-fast-properties": { + "version": "2.0.0", + "dev": true + }, + "to-regex-range": { + "version": "5.0.1", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "toggle-selection": { + "version": "1.0.6" + }, + "transformation-matrix": { + "version": "2.16.1", + "dev": true + }, + "trim-lines": { + "version": "3.0.1", + "dev": true + }, + "trim-newlines": { + "version": "3.0.1", + "dev": true + }, + "trough": { + "version": "2.2.0", + "dev": true + }, + "ts-interface-checker": { + "version": "0.1.13", + "dev": true + }, + "ts-node": { + "version": "10.9.2", + "dev": true, + "requires": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "dependencies": { + "arg": { + "version": "4.1.3", + "dev": true + } + } + }, + "ts-toolbelt": { + "version": "9.6.0", + "dev": true + }, + "tslib": { + "version": "2.6.2", + "dev": true + }, + "tsutils": { + "version": "3.21.0", + "dev": true, + "requires": { + "tslib": "^1.8.1" + }, + "dependencies": { + "tslib": { + "version": "1.14.1", + "dev": true + } + } + }, + "tsx": { + "version": "3.12.2", + "dev": true, + "requires": { + "@esbuild-kit/cjs-loader": "^2.4.1", + "@esbuild-kit/core-utils": "^3.0.0", + "@esbuild-kit/esm-loader": "^2.5.4", + "fsevents": "~2.3.2" + } + }, + "tty-browserify": { + "version": "0.0.0", + "dev": true + }, + "type-check": { + "version": "0.4.0", + "resolved": "https://registry.npm.alibaba-inc.com/type-check/download/type-check-0.4.0.tgz", + "integrity": "sha1-B7ggO/pwVsBlcFDjzNLDdzC6uPE=", + "dev": true, + "peer": true, + "requires": { + "prelude-ls": "^1.2.1" + } + }, + "type-fest": { + "version": "0.18.1", + "dev": true + }, + "typed-array-buffer": { + "version": "1.0.2", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + } + }, + "typed-array-byte-length": { + "version": "1.0.1", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + } + }, + "typed-array-byte-offset": { + "version": "1.0.2", + "dev": true, + "requires": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + } + }, + "typed-array-length": { + "version": "1.0.6", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" + } + }, + "typedarray": { + "version": "0.0.6", + "dev": true + }, + "types-ramda": { + "version": "0.29.10", + "dev": true, + "requires": { + "ts-toolbelt": "^9.6.0" + } + }, + "typescript": { + "version": "5.4.5", + "dev": true + }, + "umi": { + "version": "4.1.10", + "dev": true, + "requires": { + "@babel/runtime": "7.23.6", + "@umijs/bundler-utils": "4.1.10", + "@umijs/bundler-webpack": "4.1.10", + "@umijs/core": "4.1.10", + "@umijs/lint": "4.1.10", + "@umijs/preset-umi": "4.1.10", + "@umijs/renderer-react": "4.1.10", + "@umijs/server": "4.1.10", + "@umijs/test": "4.1.10", + "@umijs/utils": "4.1.10", + "prettier-plugin-organize-imports": "^3.2.2", + "prettier-plugin-packagejson": "2.4.3" + }, + "dependencies": { + "@babel/runtime": { + "version": "7.23.6", + "dev": true, + "requires": { + "regenerator-runtime": "^0.14.0" + } + } + } + }, + "unbox-primitive": { + "version": "1.0.2", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "which-boxed-primitive": "^1.0.2" + } + }, + "unfetch": { + "version": "5.0.0", + "dev": true + }, + "unified": { + "version": "10.1.2", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "bail": "^2.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^5.0.0" + }, + "dependencies": { + "is-plain-obj": { + "version": "4.1.0", + "dev": true + } + } + }, + "unique-filename": { + "version": "1.1.1", + "dev": true, + "requires": { + "unique-slug": "^2.0.0" + } + }, + "unique-slug": { + "version": "2.0.2", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4" + } + }, + "unique-string": { + "version": "1.0.0", + "dev": true, + "requires": { + "crypto-random-string": "^1.0.0" + } + }, + "unist-util-filter": { + "version": "4.0.1", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.0.0" + } + }, + "unist-util-generated": { + "version": "2.0.1", + "dev": true + }, + "unist-util-is": { + "version": "5.2.1", + "dev": true, + "requires": { + "@types/unist": "^2.0.0" + } + }, + "unist-util-position": { + "version": "4.0.4", + "dev": true, + "requires": { + "@types/unist": "^2.0.0" + } + }, + "unist-util-stringify-position": { + "version": "3.0.3", + "dev": true, + "requires": { + "@types/unist": "^2.0.0" + } + }, + "unist-util-visit": { + "version": "4.1.2", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + } + }, + "unist-util-visit-parents": { + "version": "5.1.3", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0" + } + }, + "universalify": { + "version": "2.0.1", + "dev": true + }, + "unquote": { + "version": "1.1.1", + "dev": true + }, + "unset-value": { + "version": "0.1.2", + "dev": true, + "requires": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + } + }, + "untildify": { + "version": "4.0.0", + "dev": true + }, + "unzip-response": { + "version": "2.0.1", + "dev": true + }, + "update-browserslist-db": { + "version": "1.0.13", + "dev": true, + "requires": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + } + }, + "update-notifier": { + "version": "2.5.0", + "dev": true, + "requires": { + "boxen": "^1.2.1", + "chalk": "^2.0.1", + "configstore": "^3.0.0", + "import-lazy": "^2.1.0", + "is-ci": "^1.0.10", + "is-installed-globally": "^0.1.0", + "is-npm": "^1.0.0", + "latest-version": "^3.0.0", + "semver-diff": "^2.0.0", + "xdg-basedir": "^3.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "uri-js": { + "version": "4.4.1", + "dev": true, + "requires": { + "punycode": "^2.1.0" + }, + "dependencies": { + "punycode": { + "version": "2.3.1", + "dev": true + } + } + }, + "url": { + "version": "0.11.3", + "dev": true, + "requires": { + "punycode": "^1.4.1", + "qs": "^6.11.2" + } + }, + "url-parse-lax": { + "version": "1.0.0", + "dev": true, + "requires": { + "prepend-http": "^1.0.1" + } + }, + "use-isomorphic-layout-effect": { + "version": "1.1.2", + "dev": true, + "requires": {} + }, + "util": { + "version": "0.11.1", + "dev": true, + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "dev": true + } + } + }, + "util-deprecate": { + "version": "1.0.2", + "dev": true + }, + "util.promisify": { + "version": "1.0.1", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.2", + "has-symbols": "^1.0.1", + "object.getownpropertydescriptors": "^2.1.0" + } + }, + "utila": { + "version": "0.4.0", + "dev": true + }, + "uuid": { + "version": "8.3.2", + "dev": true + }, + "uvu": { + "version": "0.5.6", + "dev": true, + "requires": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "dependencies": { + "diff": { + "version": "5.2.0", + "dev": true + } + } + }, + "v8-compile-cache": { + "version": "2.3.0", + "dev": true + }, + "v8-compile-cache-lib": { + "version": "3.0.1", + "dev": true + }, + "validate-npm-package-license": { + "version": "3.0.4", + "dev": true, + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "validate-npm-package-name": { + "version": "3.0.0", + "dev": true, + "requires": { + "builtins": "^1.0.3" + } + }, + "vary": { + "version": "1.1.2", + "dev": true + }, + "vfile": { + "version": "5.3.7", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^3.0.0", + "vfile-message": "^3.0.0" + } + }, + "vfile-location": { + "version": "4.1.0", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "vfile": "^5.0.0" + } + }, + "vfile-message": { + "version": "3.1.4", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^3.0.0" + } + }, + "vite": { + "version": "4.5.2", + "dev": true, + "requires": { + "esbuild": "^0.18.10", + "fsevents": "~2.3.2", + "postcss": "^8.4.27", + "rollup": "^3.27.1" + }, + "dependencies": { + "@esbuild/darwin-arm64": { + "version": "0.18.20", + "dev": true, + "optional": true + }, + "esbuild": { + "version": "0.18.20", + "dev": true, + "requires": { + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" + } + } + } + }, + "vm-browserify": { + "version": "1.1.2", + "dev": true + }, + "walker": { + "version": "1.0.8", + "dev": true, + "requires": { + "makeerror": "1.0.12" + } + }, + "watchpack": { + "version": "2.4.0", + "resolved": "https://registry.npm.alibaba-inc.com/watchpack/download/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "dev": true, + "peer": true, + "requires": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + } + }, + "wbuf": { + "version": "1.7.3", + "dev": true, + "requires": { + "minimalistic-assert": "^1.0.0" + } + }, + "web-namespaces": { + "version": "2.0.1", + "dev": true + }, + "web-streams-polyfill": { + "version": "3.3.3", + "dev": true + }, + "webpack": { + "version": "5.89.0", + "resolved": "https://registry.npm.alibaba-inc.com/webpack/download/webpack-5.89.0.tgz", + "integrity": "sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==", + "dev": true, + "peer": true, + "requires": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.0", + "@webassemblyjs/ast": "^1.11.5", + "@webassemblyjs/wasm-edit": "^1.11.5", + "@webassemblyjs/wasm-parser": "^1.11.5", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.15.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.7", + "watchpack": "^2.4.0", + "webpack-sources": "^3.2.3" + } + }, + "webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npm.alibaba-inc.com/webpack-sources/download/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "dev": true, + "peer": true + }, + "which": { + "version": "2.0.2", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-boxed-primitive": { + "version": "1.0.2", + "dev": true, + "requires": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + } + }, + "which-builtin-type": { + "version": "1.1.3", + "dev": true, + "requires": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "dev": true + } + } + }, + "which-collection": { + "version": "1.0.2", + "dev": true, + "requires": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + } + }, + "which-typed-array": { + "version": "1.1.15", + "dev": true, + "requires": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.2" + } + }, + "widest-line": { + "version": "2.0.1", + "dev": true, + "requires": { + "string-width": "^2.1.1" + } + }, + "wrap-ansi": { + "version": "8.1.0", + "dev": true, + "requires": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "dev": true + }, + "ansi-styles": { + "version": "6.2.1", + "dev": true + }, + "string-width": { + "version": "5.1.2", + "dev": true, + "requires": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + } + }, + "strip-ansi": { + "version": "7.1.0", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "wrap-ansi-cjs": { + "version": "npm:wrap-ansi@7.0.0", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true + }, + "string-width": { + "version": "4.2.3", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + } + } + }, + "wrappy": { + "version": "1.0.2", + "dev": true + }, + "write-file-atomic": { + "version": "4.0.2", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + } + }, + "xdg-basedir": { + "version": "3.0.0", + "dev": true + }, + "xml-lexer": { + "version": "0.2.2", + "dev": true, + "requires": { + "eventemitter3": "^2.0.0" + }, + "dependencies": { + "eventemitter3": { + "version": "2.0.3", + "dev": true + } + } + }, + "xml-reader": { + "version": "2.4.3", + "dev": true, + "requires": { + "eventemitter3": "^2.0.0", + "xml-lexer": "^0.2.2" + }, + "dependencies": { + "eventemitter3": { + "version": "2.0.3", + "dev": true + } + } + }, + "xtend": { + "version": "4.0.2", + "dev": true + }, + "y18n": { + "version": "3.2.2", + "dev": true + }, + "yallist": { + "version": "3.1.1", + "dev": true + }, + "yaml": { + "version": "2.3.1", + "dev": true + }, + "yargs": { + "version": "17.7.2", + "dev": true, + "requires": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true + }, + "string-width": { + "version": "4.2.3", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "y18n": { + "version": "5.0.8", + "dev": true + }, + "yargs-parser": { + "version": "21.1.1", + "dev": true + } + } + }, + "yargs-parser": { + "version": "20.2.9", + "dev": true + }, + "yn": { + "version": "3.1.1", + "dev": true + }, + "yocto-queue": { + "version": "0.1.0", + "dev": true + }, + "zwitch": { + "version": "2.0.4", + "dev": true + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..dc1f19e --- /dev/null +++ b/package.json @@ -0,0 +1,46 @@ +{ + "name": "CodeFuse-Docs", + "version": "0.0.1", + "description": "A static-site powered by dumi", + "repository": { + "type": "git", + "url": "https://code.alipay.com/LinkE/CodeFuse-Docs" + }, + "license": "MIT", + "author": "zhenjinsong.szj", + "scripts": { + "build": "dumi build", + "dev": "dumi dev", + "prepare": "husky install && dumi setup", + "start": "npm run dev" + }, + "commitlint": { + "extends": [ + "@commitlint/config-conventional" + ] + }, + "lint-staged": { + "*.{md,json}": [ + "prettier --write --no-error-on-unmatched-pattern" + ] + }, + "dependencies": { + "antd": "^5.16.5", + "react-slick": "^0.30.2", + "slick-carousel": "^1.8.1", + "styled-components": "^6.1.8" + }, + "devDependencies": { + "@commitlint/cli": "^17.1.2", + "@commitlint/config-conventional": "^17.1.0", + "@types/lodash": "^4.17.0", + "dumi": "^2.2.0", + "husky": "^8.0.1", + "lint-staged": "^13.0.3", + "prettier": "^2.7.1" + }, + "tnpm": { + "mode": "npm" + }, + "yuyanId": "180020010001264005" +} diff --git a/resources/_gen/assets/scss/scss/base.scss_7724f67189cff0c6ae476b070cf609b9.content b/resources/_gen/assets/scss/scss/base.scss_7724f67189cff0c6ae476b070cf609b9.content deleted file mode 100644 index 230c42c..0000000 --- a/resources/_gen/assets/scss/scss/base.scss_7724f67189cff0c6ae476b070cf609b9.content +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Docura (https://docura.github.io/) - * Copyright 2022-2023 Dumindu Madunuwan - * Licensed under the MIT License. - */*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)){all:unset;display:revert}*,*::before,*::after{box-sizing:border-box}html{-moz-text-size-adjust:none;-webkit-text-size-adjust:none;text-size-adjust:none}a,button{cursor:revert}ol,ul,menu{list-style:none}img{max-inline-size:100%;max-block-size:100%}table{border-collapse:collapse}input,textarea{-webkit-user-select:auto}textarea{white-space:revert}meter{-webkit-appearance:revert;appearance:revert}:where(pre){all:revert;box-sizing:border-box}::placeholder{color:unset}::marker{content:initial}:where([hidden]){display:none}:where([contenteditable]:not([contenteditable="false"])){-moz-user-modify:read-write;-webkit-user-modify:read-write;overflow-wrap:break-word;-webkit-line-break:after-white-space;-webkit-user-select:auto}:where([draggable="true"]){-webkit-user-drag:element}:where(dialog:modal){all:revert;box-sizing:border-box}pre,code{margin:0}:root{--site-header-height: 46px;--site-footer-height: 46px}@media (min-width: 1025px) and (max-width: 1280px),(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait){:root{--site-header-height: 60px;--site-footer-height: 60px}}@media (min-width: 1281px){:root{--site-header-height: 80px;--site-footer-height: 80px}}body{font-family:var(--font-family);background:var(--background);color:var(--color);display:flex;flex-direction:column;min-height:100svh}#site-header{display:grid;grid-template-columns:2fr 1fr;grid-template-rows:repeat(3, var(--site-header-height))}#site-header-menu,#site-header-search{grid-column:1 / 3}#site-footer{display:grid;grid-template-columns:1fr 1fr;grid-template-rows:repeat(3, var(--site-footer-height))}#site-footer-copyright,#site-footer-love{grid-column:1 / 3}#site-main-content-wrapper{display:flex;flex:1}#sidebar,#toc,#article-nav,#sidebar .btn-close,#toc .btn-close{display:none}main{flex:1;display:flex;overflow:auto}#article{flex:1;width:100vw}#sidebar{width:85%;left:-85%}#toc{width:85%;right:-85%}@media (min-width: 768px) and (max-width: 1023px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:repeat(2, var(--site-header-height))}#site-header-brand{grid-column:1 / 6}#site-header-controls{grid-column:6 / 7}#site-header-menu{grid-column:1 / 5}#site-header-search{grid-column:5 / 7}#site-footer{grid-template-columns:repeat(4, 1fr);grid-template-rows:repeat(2, var(--site-footer-height))}#site-footer-copyright{grid-column:1 / 3}#site-footer-social{grid-column:3 / 4}#site-footer-fund{grid-column:4 / 5}#site-footer-love{grid-column:1 / 5}#sidebar{width:50%;left:-50%}#toc{width:50%;right:-50%}}@media (min-width: 1024px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:var(--site-header-height)}#site-header-brand{grid-column:1 / 2}#site-header-menu{grid-column:2 / 5;grid-row:1}#site-header-search{grid-column:5 / 6;grid-row:1}#site-header-controls{grid-column:6 / 7}#site-footer{grid-template-columns:repeat(5, 1fr);grid-template-rows:var(--site-footer-height)}#site-footer-copyright{grid-column:1 / 3}#site-footer-love{grid-column:3 / 4;grid-row:1}#site-footer-social{grid-column:4 / 5}#site-footer-fund{grid-column:5 / 6}#article-nav-toc-btn{display:none}}@media (min-width: 1024px) and (max-width: 1279px){#sidebar{width:33%;left:-33%}#article{width:75vw}#toc{width:25%;display:flex;flex-direction:column}#toc .sticky{position:fixed;right:0;width:25%}}@media (min-width: 1280px){#sidebar{width:20%;display:flex;flex-direction:column}#article{width:60vw}#toc{width:25%;display:flex;flex-direction:column}#sidebar .sticky{position:fixed;left:0;width:20%}#toc .sticky{position:fixed;right:0;width:20%}}@media (max-width: 1023px){#toc{position:fixed;top:0;height:100%;transition:.3s;z-index:300;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #toc,:root[data-color="night"] #toc{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-toc-on #toc{animation:slide-in-right .3s forwards;display:flex;flex-direction:column;padding-left:16px;z-index:10;cursor:default}.offcanvas-toc-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-toc-on #toc .btn-close{display:block;position:absolute;top:10px;left:10px}#article-nav-toc-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}@media (max-width: 1279px){#sidebar{position:fixed;top:0;height:100%;transition:.3s;z-index:200;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #sidebar,:root[data-color="night"] #sidebar{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-sidebar-on #sidebar{animation:slide-in-left .3s forwards;display:flex;flex-direction:column;z-index:10;cursor:default}.offcanvas-sidebar-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-sidebar-on #sidebar .btn-close{display:block;position:absolute;top:10px;right:10px}#article-nav{display:flex;gap:12px;overflow:auto;justify-content:space-between;height:var(--site-header-height);align-items:center;padding:0 2px}#article-nav-menu-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}body.offcanvas-sidebar-on,body.offcanvas-toc-on{cursor:pointer;overflow:hidden}.offcanvas-sidebar-on:before,.offcanvas-toc-on:before{background:rgba(255,255,255,0.1);backdrop-filter:blur(var(--blur));-webkit-backdrop-filter:blur(var(--blur))}@keyframes slide-in-left{from{transform:translateX(0)}to{transform:translateX(100%)}}@keyframes slide-in-right{from{transform:translateX(0)}to{transform:translateX(-100%)}}#site-header-brand{display:flex;align-items:center;font-family:var(--font-family-brand);font-size:1.4em;color:var(--color2)}#site-header-brand a{padding:12px}#site-header-menu{padding:0 12px;display:flex;align-items:center;color:var(--color3)}#site-header-menu nav{width:100%;overflow:auto}#site-header-menu ul{display:flex;height:100%;align-items:center;gap:12px}#site-header-menu a{display:flex;padding:12px 6px;gap:3px;white-space:nowrap}#site-header-menu a:focus,#site-header-menu a:hover,#site-header-menu a.active{border-bottom:3px solid}#site-header-controls{display:flex;align-items:center;padding-right:12px;justify-content:flex-end;gap:12px}#site-header-search{display:flex;align-items:flex-end}@media (min-width: 768px){#site-header-search{align-items:center}}#site-footer-social{display:flex;gap:12px;justify-content:flex-start;padding-left:12px;align-items:center}#site-footer-fund{display:flex;gap:12px;overflow:auto;justify-content:flex-end;padding-right:12px;align-items:center}#site-footer-copyright,#site-footer-love{display:flex;align-items:center;justify-content:center;color:var(--color3)}#site-footer-copyright a{display:flex;align-items:center}@media (min-width: 768px){#site-footer-copyright{justify-content:flex-start;padding-left:12px}#site-footer-social{justify-content:flex-end;padding-right:12px}}#article{padding:8px 16px}#article-header{font-size:3em;font-weight:400;margin-bottom:1em;color:var(--color2)}#article-content h1,#article-content h2,#article-content h3,#article-content h4,#article-content h5,#article-content h6{line-height:1em;font-weight:400;margin:2.6em 0 .1em;color:var(--color2)}#article-content h1{font-size:1.8em}#article-content h2{font-size:1.5em}#article-content h3{font-size:1.3em}#article-content h4{font-size:1.1em}#article-content .highlight,#article-content blockquote,#article-content dl,#article-content iframe,#article-content ol,#article-content p,#article-content table,#article-content ul{margin-top:1em;line-height:1.8rem;letter-spacing:-.1px}#article-content blockquote p{margin:1em 0}#article-content blockquote dl,#article-content blockquote ol,#article-content blockquote ul{margin:0 1em 1em 1em}#article-content a{color:var(--color-anchor);text-decoration:none}#article-content a:hover{color:var(--color-hover);text-decoration:underline}@media print{#article-content a{color:#355265;text-decoration:underline}#article-content a:after{content:" (" attr(href) ")";font-size:80%}}#article-content strong,#article-content b,#article-content table th{font-weight:600}#article-content em{font-style:italic}#article-content dl,#article-content ol,#article-content ul{margin-left:20px}#article-content dl dl,#article-content dl ol,#article-content dl ul,#article-content ol dl,#article-content ol ol,#article-content ol ul,#article-content ul dl,#article-content ul ol,#article-content ul ul{margin-top:0;margin-bottom:0}#article-content ul{list-style:disc}#article-content ol{list-style:decimal}#article-content dl{list-style:square}#article-content li>ul{list-style:circle}#article-content li>ol{list-style:lower-alpha}#article-content li p{margin:0}#article-content li .highlight,#article-content li blockquote,#article-content li iframe,#article-content li table{margin:1em 0}#article-content img,#article-content video{max-width:100%;border-radius:4px}#article-content blockquote{padding:8px 12px;position:relative;background:var(--background-fg);border-left:4px solid var(--border-color);border-radius:6px}#article-content blockquote footer{margin:1em 0;font-style:italic}#article-content blockquote footer cite:before{content:"—";padding:0 .3em}#article-content blockquote footer cite a{color:var(--border-color)}#article-content code,#article-content pre{font-family:var(--font-family-code)}#article-content h1 code,#article-content h2 code,#article-content h3 code,#article-content h4 code,#article-content h5 code,#article-content h6 code,#article-content p code,#article-content blockquote code,#article-content ul code,#article-content ol code,#article-content dl code,#article-content table code{background:var(--chroma-base00);padding:4px;border-radius:4px;font-size:.9em}#article-content pre:not(.chroma){color:var(--chroma-base05);font-size:.9em;line-height:1.8;letter-spacing:-.1px;background-color:var(--chroma-base00);border-radius:6px;padding:16px 24px;overflow-x:auto;margin-top:1em}#article-content blockquote code{background:var(--background-fg2);opacity:.8}#article-content blockquote .chroma,#article-content blockquote pre:not(.chroma){background:var(--background-fg2);margin-bottom:1em}#article-content blockquote .chroma code,#article-content blockquote pre:not(.chroma) code{padding:0}#article-content table{max-width:100%;border:1px solid var(--border-color)}#article-content table td,#article-content table th{padding:5px 15px}#article-content table tr:nth-child(2n){background:var(--background-fg)}#article-footer{display:grid;grid-template-columns:1fr 1fr;padding-top:20px}#article-last-updated,#article-prev-link,#article-next-link{display:flex;align-items:center;padding:12px 0}#article-last-updated{grid-column:1 / 3;justify-content:center;color:var(--color3)}#article-prev-link,#article-next-link{color:var(--color-anchor)}#article-prev-link:hover,#article-next-link:hover{color:var(--color-hover);font-weight:600;font-size:98%}#article-next-link{justify-content:flex-end}#article-prev-link .icon{padding-right:6px}#article-next-link .icon{padding-left:6px}@media (max-width: 767px){#article-next-link[data-first-page="true"]{grid-column:2/ 3}}@media (min-width: 768px){#article{padding:16px 24px}#article-footer{display:grid;grid-template-columns:repeat(3, 1fr)}#article-prev-link{grid-column:1/ 2;grid-row:1}#article-last-updated{grid-column:2 / 3}#article-next-link{grid-column:3 / 4}}@media (min-width: 1024px){#article{padding:24px 32px}}@media (min-width: 1281px){#article{padding:32px 40px}}@media (min-width: 1920px){#article{padding:40px 48px}#article-content{width:90%}}@media (min-width: 2560px){#article-content{width:85%}}@media (min-width: 3840px){#article-content{width:80%}}#sidebar{padding:40px 0}#sidebar .sticky{display:flex;flex-direction:column;padding:0 20px;overflow:auto}.sidebar-section,.sidebar-link{padding:7px 0}.sidebar-section{margin-top:40px;font-weight:600;color:var(--color2)}#sidebar .sidebar-section:first-child{margin-top:0}.sidebar-link{padding-left:10px;color:var(--color3);border-left:1px solid var(--border-color);margin-left:4px}.sidebar-link::before{content:'';display:inline-block;width:6px;height:6px;background:var(--background);box-shadow:var(--box-shadow);border-radius:50%;position:relative;left:-13.5px;top:-3px}.sidebar-link:hover{color:var(--color-hover);font-weight:600;font-size:98%}.sidebar-link.current{color:var(--color-anchor);font-weight:600;font-size:98%}.sidebar-link.current::before,.sidebar-link:hover::before{background:var(--color-anchor)}#toc{padding-top:40px;padding-bottom:40px}#toc .sticky{overflow:auto}#toc strong{font-weight:600;padding:7px 10px 7px 0;display:flex;gap:3px;position:relative;left:-3px;color:var(--color2)}#toc ul{margin-left:.3em;border-left:1px solid var(--border-color)}#toc ul ul{margin-left:1em}#toc ul a{display:inline-block;padding:7px;color:var(--color3)}#toc ul a.active,#toc ul a:hover{color:var(--color-hover)}#toc ul a::before{content:'';display:inline-block;width:6px;height:6px;background:var(--background);box-shadow:var(--box-shadow);position:relative;left:-10.5px;top:-3px}#toc ul a.active::before,#toc ul a:hover::before{background:var(--color-hover)}.btn-github{display:flex;flex-direction:row;gap:2px;font-size:.7em;font-weight:700;line-height:1.8em;color:#576060;background:#f6f8fa;border:1px solid #d5d7da;border-radius:6px;padding:2px 4px}:root[data-color="dark"] .btn-github,:root[data-color="night"] .btn-github{color:#c9d1d9;background:#21262d;border:1px solid #576060}.btn-github .icon{transform:scale(0.8)}.btn-buymeacoffee{width:86px;height:24px;background-image:url("data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A");border-radius:6px;box-shadow:0px 2px 3px rgba(0,0,0,0.1)}:root[data-color="dark"] .btn-buymeacoffee,:root[data-color="night"] .btn-buymeacoffee{box-shadow:0px 2px 3px rgba(255,255,255,0.1)}.btn-close{background:var(--background-fg);border:1px dotted var(--border-color);border-radius:4px;cursor:pointer}.dropdown{position:relative}.dropdown-btn{display:flex;flex-direction:row;box-shadow:var(--box-shadow);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap}.dropdown-btn .icon-select{opacity:.4}.dropdown-menu{display:none;position:absolute;right:0;top:34px;min-width:100px;max-height:240px;overflow-x:auto;background:var(--background);color:var(--color3);box-shadow:var(--box-shadow2);z-index:1;border-radius:6px;padding:3px}.dropdown-menu.show{display:block}.dropdown-menu button,.dropdown-menu a{width:100%;display:flex;gap:2px;padding:6px;align-items:center;justify-content:center;cursor:pointer}.dropdown-menu button:hover,.dropdown-menu a:hover{background:var(--background-fg)}.chroma{font-size:.9em;color:var(--chroma-base05);background-color:var(--chroma-base00);border-radius:6px;padding:16px 24px;overflow-x:auto}.chroma .x{color:var(--chroma-base05)}.chroma .err{color:var(--chroma-base08)}.chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}.chroma .lntable{border-spacing:0;padding:0;margin:0;border:0;width:auto;overflow:auto;display:block}.chroma .hl{display:block;width:100%;background-color:var(--chroma-base02)}.chroma .lnt{margin-right:0.4em;padding:0 0.4em 0 0.4em}.chroma .ln{margin-right:0.4em;padding:0 0.4em 0 0.4em;border-right:1px solid var(--chroma-base0A)}.chroma .line{display:flex}.chroma .k{color:var(--chroma-base0E)}.chroma .kc{color:var(--chroma-base0E)}.chroma .kd{color:var(--chroma-base0E)}.chroma .kn{color:var(--chroma-base0E)}.chroma .kp{color:var(--chroma-base0D)}.chroma .kr{color:var(--chroma-base0E)}.chroma .kt{color:var(--chroma-base0E)}.chroma .n{color:var(--chroma-base05)}.chroma .na{color:var(--chroma-base05)}.chroma .nb{color:var(--chroma-base0D)}.chroma .bp{color:var(--chroma-base0D)}.chroma .nc{color:var(--chroma-base0A)}.chroma .no{color:var(--chroma-base09)}.chroma .nd{color:var(--chroma-base09)}.chroma .ni{color:var(--chroma-base0A)}.chroma .ne{color:var(--chroma-base0A)}.chroma .nf{color:var(--chroma-base05)}.chroma .fm{color:var(--chroma-base05)}.chroma .nl{color:var(--chroma-base08)}.chroma .nn{color:var(--chroma-base0A)}.chroma .nx{color:var(--chroma-base0D)}.chroma .py{color:var(--chroma-base08)}.chroma .nt{color:var(--chroma-base0D)}.chroma .nv{color:var(--chroma-base0D)}.chroma .vc{color:var(--chroma-base0D)}.chroma .vg{color:var(--chroma-base0D)}.chroma .vi{color:var(--chroma-base08)}.chroma .vm{color:var(--chroma-base0D)}.chroma .l{color:var(--chroma-base0B)}.chroma .ld{color:var(--chroma-base0B)}.chroma .s{color:var(--chroma-base0B)}.chroma .sa{color:var(--chroma-base0B)}.chroma .sb{color:var(--chroma-base0B)}.chroma .sc{color:var(--chroma-base0B)}.chroma .dl{color:var(--chroma-base0F)}.chroma .sd{color:var(--chroma-base03)}.chroma .s2{color:var(--chroma-base0B)}.chroma .se{color:var(--chroma-base0C)}.chroma .sh{color:var(--chroma-base0B)}.chroma .si{color:var(--chroma-base0F)}.chroma .sx{color:var(--chroma-base0B)}.chroma .sr{color:var(--chroma-base0C)}.chroma .s1{color:var(--chroma-base0B)}.chroma .ss{color:var(--chroma-base0B)}.chroma .m{color:var(--chroma-base09)}.chroma .mb{color:var(--chroma-base09)}.chroma .mf{color:var(--chroma-base09)}.chroma .mh{color:var(--chroma-base09)}.chroma .mi{color:var(--chroma-base09)}.chroma .il{color:var(--chroma-base09)}.chroma .mo{color:var(--chroma-base09)}.chroma .o{color:var(--chroma-base05)}.chroma .ow{color:var(--chroma-base05)}.chroma .p{color:var(--chroma-base05)}.chroma .c{color:var(--chroma-base03)}.chroma .ch{color:var(--chroma-base03)}.chroma .cm{color:var(--chroma-base03)}.chroma .c1{color:var(--chroma-base03)}.chroma .cs{color:var(--chroma-base03)}.chroma .cp{color:var(--chroma-base0F)}.chroma .cpf{color:var(--chroma-base0B)}.chroma .g{color:var(--chroma-base05)}.chroma .gd{color:var(--chroma-base08)}.chroma .ge{color:var(--chroma-base05);font-style:italic}.chroma .gr{color:var(--chroma-base05)}.chroma .gh{color:var(--chroma-base0D)}.chroma .gi{color:var(--chroma-base0B)}.chroma .go{color:var(--chroma-base05)}.chroma .gp{color:var(--chroma-base05)}.chroma .gs{color:var(--chroma-base05);font-weight:bold}.chroma .gu{color:var(--chroma-base0D)}.chroma .gt{color:var(--chroma-base05)}.chroma .gl{color:var(--chroma-base05);text-decoration:underline}.chroma .w{color:var(--chroma-base00)}html{font-family:var(--font-family);background:var(--background);color:var(--color);scroll-behavior:smooth;scroll-padding:2em} - -/*# sourceMappingURL=base.css.map */ \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/base.scss_7724f67189cff0c6ae476b070cf609b9.json b/resources/_gen/assets/scss/scss/base.scss_7724f67189cff0c6ae476b070cf609b9.json deleted file mode 100644 index 2388e9a..0000000 --- a/resources/_gen/assets/scss/scss/base.scss_7724f67189cff0c6ae476b070cf609b9.json +++ /dev/null @@ -1 +0,0 @@ -{"Target":"scss/base.css","MediaType":"text/css","Data":{}} \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/component/docsearch.scss_7724f67189cff0c6ae476b070cf609b9.content b/resources/_gen/assets/scss/scss/component/docsearch.scss_7724f67189cff0c6ae476b070cf609b9.content deleted file mode 100644 index 7b45a98..0000000 --- a/resources/_gen/assets/scss/scss/component/docsearch.scss_7724f67189cff0c6ae476b070cf609b9.content +++ /dev/null @@ -1,7 +0,0 @@ -/*! @docsearch/css 3.2.0 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com | https://cdn.jsdelivr.net/npm/@docsearch/css@3 */:root{--docsearch-primary-color: #5468ff;--docsearch-spacing: 12px;--docsearch-icon-stroke-width: 1.4;--docsearch-highlight-color: var(--docsearch-primary-color);--docsearch-muted-color: #969faf;--docsearch-container-background: rgba(255, 255, 255, 0.1);--docsearch-logo-color: #5468ff;--docsearch-modal-width: 560px;--docsearch-modal-height: 600px;--docsearch-modal-shadow: inset 1px 1px 0 0 hsla(0, 0%, 100%, 0.5), 0 3px 8px 0 #555a64;--docsearch-searchbox-height: 56px;--docsearch-searchbox-focus-background: #fff;--docsearch-searchbox-shadow: inset 0 0 0 2px var(--docsearch-primary-color);--docsearch-hit-height: 56px;--docsearch-hit-color: #444950;--docsearch-hit-active-color: #fff;--docsearch-hit-background: #fff;--docsearch-hit-shadow: 0 1px 3px 0 #d4d9e1;--docsearch-footer-height: 44px;--docsearch-footer-shadow: 0 -1px 0 0 #e0e3e8, 0 -3px 6px 0 rgba(69, 98, 155, 0.12) -}:root[data-color="dark"]{--docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309;--docsearch-searchbox-focus-background: #000;--docsearch-hit-color: #bec3c9;--docsearch-hit-shadow: none;--docsearch-hit-background: #090a11;--docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2);--docsearch-muted-color: #7f8497 -}:root[data-color="night"]{--docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309;--docsearch-searchbox-focus-background: #000;--docsearch-hit-color: #bec3c9;--docsearch-hit-shadow: none;--docsearch-hit-background: #090a11;--docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2);--docsearch-muted-color: #7f8497 -}.DocSearch-Button{width:100%;line-height:1.6em;align-items:center;box-shadow:var(--box-shadow);border-radius:24px;color:var(--color);cursor:pointer;display:flex;justify-content:space-between;margin:0 12px;padding:3px 6px;user-select:none}.DocSearch-Button:active,.DocSearch-Button:focus,.DocSearch-Button:hover{background:var(--docsearch-searchbox-focus-background);box-shadow:var(--docsearch-searchbox-shadow);color:var(--color);outline:none}.DocSearch-Button-Container{align-items:center;display:flex}.DocSearch-Search-Icon{stroke-width:1.6}.DocSearch-Button-Placeholder{font-size:1rem;padding:0 12px 0 6px;color:var(--color3)}.DocSearch-Button-Keys{display:flex;min-width:calc(40px + .8em)}.DocSearch-Button-Key{align-items:center;border-radius:3px;color:var(--docsearch-muted-color);display:flex;height:18px;justify-content:center;margin-right:.4em;position:relative;border:1px solid var(--border-color);width:20px}@media (min-width: 1278px){.DocSearch-Button{width:80%;margin:0}}@media (min-width: 2558px){.DocSearch-Button{width:60%}}@media (min-width: 3838px){.DocSearch-Button{width:40%}}.DocSearch--active{overflow:hidden !important}.DocSearch-Container,.DocSearch-Container *{box-sizing:border-box}.DocSearch-Container{background-color:var(--docsearch-container-background);height:100vh;left:0;position:fixed;top:0;width:100vw;z-index:200;backdrop-filter:blur(var(--blur));-webkit-backdrop-filter:blur(var(--blur))}.DocSearch-Container a{text-decoration:none}.DocSearch-Link{appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;font:inherit;margin:0;padding:0}.DocSearch-Modal{background:var(--background);border-radius:6px;box-shadow:var(--docsearch-modal-shadow);flex-direction:column;margin:60px auto auto;max-width:var(--docsearch-modal-width);position:relative}.DocSearch-SearchBar{display:flex;padding:var(--docsearch-spacing) var(--docsearch-spacing) 0}.DocSearch-Form{align-items:center;background:var(--docsearch-searchbox-focus-background);border-radius:4px;box-shadow:var(--docsearch-searchbox-shadow);display:flex;height:var(--docsearch-searchbox-height);margin:0;padding:0 var(--docsearch-spacing);position:relative;width:100%}.DocSearch-Input{appearance:none;background:transparent;border:0;color:var(--docsearch-text-color);flex:1;font:inherit;font-size:1.2em;height:100%;outline:none;padding:0 0 0 8px;width:80%}.DocSearch-Input::placeholder{color:var(--docsearch-muted-color);opacity:1}.DocSearch-Input::-webkit-search-cancel-button,.DocSearch-Input::-webkit-search-decoration,.DocSearch-Input::-webkit-search-results-button,.DocSearch-Input::-webkit-search-results-decoration{display:none}.DocSearch-LoadingIndicator,.DocSearch-MagnifierLabel,.DocSearch-Reset{margin:0;padding:0}.DocSearch-MagnifierLabel,.DocSearch-Reset{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}.DocSearch-Container--Stalled .DocSearch-MagnifierLabel,.DocSearch-LoadingIndicator{display:none}.DocSearch-Container--Stalled .DocSearch-LoadingIndicator{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Reset{animation:none;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;right:0;stroke-width:var(--docsearch-icon-stroke-width)}}.DocSearch-Reset{animation:fade-in .1s ease-in forwards;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;padding:2px;right:0;stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Reset[hidden]{display:none}.DocSearch-Reset:focus{outline:none}.DocSearch-Reset:hover{color:var(--docsearch-highlight-color)}.DocSearch-LoadingIndicator svg,.DocSearch-MagnifierLabel svg{height:24px;width:24px}.DocSearch-Cancel{display:none}.DocSearch-Dropdown{max-height:calc(var(--docsearch-modal-height) - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height));min-height:var(--docsearch-spacing);overflow-y:auto;overflow-y:overlay;padding:0 var(--docsearch-spacing);scrollbar-color:var(--docsearch-muted-color) var(--docsearch-modal-background);scrollbar-width:thin}.DocSearch-Dropdown::-webkit-scrollbar{width:12px}.DocSearch-Dropdown::-webkit-scrollbar-track{background:transparent}.DocSearch-Dropdown::-webkit-scrollbar-thumb{background-color:var(--docsearch-muted-color);border:3px solid var(--docsearch-modal-background);border-radius:20px}.DocSearch-Dropdown ul{list-style:none;margin:0;padding:0}.DocSearch-Label{font-size:.75em;line-height:1.6em}.DocSearch-Help,.DocSearch-Label{color:var(--docsearch-muted-color)}.DocSearch-Help{font-size:.9em;margin:0;user-select:none}.DocSearch-Title{font-size:1.2em}.DocSearch-Logo a{display:flex}.DocSearch-Logo svg{color:var(--docsearch-logo-color);margin-left:8px}.DocSearch-Hits:last-of-type{margin-bottom:24px}.DocSearch-Hits mark{background:none;color:var(--docsearch-highlight-color)}.DocSearch-HitsFooter{color:var(--docsearch-muted-color);display:flex;font-size:.85em;justify-content:center;margin-bottom:var(--docsearch-spacing);padding:var(--docsearch-spacing)}.DocSearch-HitsFooter a{border-bottom:1px solid;color:inherit}.DocSearch-Hit{border-radius:4px;display:flex;padding-bottom:4px;position:relative}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit--deleting{transition:none}}.DocSearch-Hit--deleting{opacity:0;transition:all .25s linear}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit--favoriting{transition:none}}.DocSearch-Hit--favoriting{transform:scale(0);transform-origin:top center;transition:all .25s linear;transition-delay:.25s}.DocSearch-Hit a{background:var(--docsearch-hit-background);border-radius:4px;box-shadow:var(--docsearch-hit-shadow);display:block;padding-left:var(--docsearch-spacing);width:100%}.DocSearch-Hit-source{background:var(--docsearch-modal-background);color:var(--docsearch-highlight-color);font-size:.85em;font-weight:600;line-height:32px;margin:0 -4px;padding:8px 4px 0;position:sticky;top:0;z-index:10}.DocSearch-Hit-Tree{color:var(--docsearch-muted-color);height:var(--docsearch-hit-height);opacity:.5;stroke-width:var(--docsearch-icon-stroke-width);width:24px}.DocSearch-Hit[aria-selected=true] a{background-color:var(--docsearch-highlight-color)}.DocSearch-Hit[aria-selected=true] mark{text-decoration:underline}.DocSearch-Hit-Container{align-items:center;color:var(--docsearch-hit-color);display:flex;flex-direction:row;height:var(--docsearch-hit-height);padding:0 var(--docsearch-spacing) 0 0}.DocSearch-Hit-icon{height:20px;width:20px}.DocSearch-Hit-action,.DocSearch-Hit-icon{color:var(--docsearch-muted-color);stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Hit-action{align-items:center;display:flex;height:22px;width:22px}.DocSearch-Hit-action svg{display:block;height:18px;width:18px}.DocSearch-Hit-action+.DocSearch-Hit-action{margin-left:6px}.DocSearch-Hit-action-button{appearance:none;background:none;border:0;border-radius:50%;color:inherit;cursor:pointer;padding:2px}svg.DocSearch-Hit-Select-Icon{display:none}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Select-Icon{display:block}.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:rgba(0,0,0,0.2);transition:background-color .1s ease-in}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{transition:none}}.DocSearch-Hit-action-button:focus path,.DocSearch-Hit-action-button:hover path{fill:#fff}.DocSearch-Hit-content-wrapper{display:flex;flex:1 1 auto;flex-direction:column;font-weight:500;justify-content:center;line-height:1.2em;margin:0 8px;overflow-x:hidden;position:relative;text-overflow:ellipsis;white-space:nowrap;width:80%}.DocSearch-Hit-title{font-size:.9em}.DocSearch-Hit-path{color:var(--docsearch-muted-color);font-size:.75em}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-action,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-icon,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-path,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-text,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-title,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Tree,.DocSearch-Hit[aria-selected=true] mark{color:var(--docsearch-hit-active-color) !important}@media screen and (prefers-reduced-motion: reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:rgba(0,0,0,0.2);transition:none}}.DocSearch-ErrorScreen,.DocSearch-NoResults,.DocSearch-StartScreen{font-size:.9em;margin:0 auto;padding:36px 0;text-align:center;width:80%}.DocSearch-Screen-Icon{color:var(--docsearch-muted-color);padding-bottom:12px}.DocSearch-NoResults-Prefill-List{display:inline-block;padding-bottom:24px;text-align:left}.DocSearch-NoResults-Prefill-List ul{display:inline-block;padding:8px 0 0}.DocSearch-NoResults-Prefill-List li{list-style-position:inside;list-style-type:"» "}.DocSearch-Prefill{appearance:none;background:none;border:0;border-radius:1em;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;font-size:1em;font-weight:700;padding:0}.DocSearch-Prefill:focus,.DocSearch-Prefill:hover{outline:none;text-decoration:underline}.DocSearch-Footer{align-items:center;border-radius:0 0 8px 8px;box-shadow:var(--docsearch-footer-shadow);display:flex;flex-direction:row-reverse;flex-shrink:0;height:var(--docsearch-footer-height);justify-content:space-between;padding:0 var(--docsearch-spacing);position:relative;user-select:none;width:100%;z-index:300}.DocSearch-Commands{color:var(--docsearch-muted-color);display:flex;list-style:none;margin:0;padding:0}.DocSearch-Commands li{align-items:center;display:flex}.DocSearch-Commands li:not(:last-of-type){margin-right:.8em}.DocSearch-Commands-Key{align-items:center;border-radius:2px;display:flex;height:18px;justify-content:center;margin-right:.4em;padding:0 0 1px;color:var(--docsearch-muted-color);border:1px solid var(--border-color);width:20px}@media (max-width: 768px){:root{--docsearch-spacing: 10px;--docsearch-footer-height: 40px - }.DocSearch-Dropdown{height:100%}.DocSearch-Container{height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh) * 100);position:absolute}.DocSearch-Footer{border-radius:0;bottom:0;position:absolute}.DocSearch-Hit-content-wrapper{display:flex;position:relative;width:80%}.DocSearch-Modal{border-radius:0;box-shadow:none;height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh) * 100);margin:0;max-width:100%;width:100%}.DocSearch-Dropdown{max-height:calc(var(--docsearch-vh, 1vh) * 100 - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height))}.DocSearch-Cancel{appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;flex:none;font:inherit;font-size:1em;font-weight:500;margin-left:var(--docsearch-spacing);outline:none;overflow:hidden;padding:0;user-select:none;white-space:nowrap}.DocSearch-Commands,.DocSearch-Hit-Tree{display:none}}@keyframes fade-in{0%{opacity:0}to{opacity:1}} - -/*# sourceMappingURL=docsearch.css.map */ \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/component/docsearch.scss_7724f67189cff0c6ae476b070cf609b9.json b/resources/_gen/assets/scss/scss/component/docsearch.scss_7724f67189cff0c6ae476b070cf609b9.json deleted file mode 100644 index c22d96a..0000000 --- a/resources/_gen/assets/scss/scss/component/docsearch.scss_7724f67189cff0c6ae476b070cf609b9.json +++ /dev/null @@ -1 +0,0 @@ -{"Target":"scss/component/docsearch.css","MediaType":"text/css","Data":{}} \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/home.scss_7724f67189cff0c6ae476b070cf609b9.content b/resources/_gen/assets/scss/scss/home.scss_7724f67189cff0c6ae476b070cf609b9.content deleted file mode 100644 index 8de9d5c..0000000 --- a/resources/_gen/assets/scss/scss/home.scss_7724f67189cff0c6ae476b070cf609b9.content +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Docura (https://docura.github.io/) - * Copyright 2022-2023 Dumindu Madunuwan - * Licensed under the MIT License. - */*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)){all:unset;display:revert}*,*::before,*::after{box-sizing:border-box}html{-moz-text-size-adjust:none;-webkit-text-size-adjust:none;text-size-adjust:none}a,button{cursor:revert}ol,ul,menu{list-style:none}img{max-inline-size:100%;max-block-size:100%}table{border-collapse:collapse}input,textarea{-webkit-user-select:auto}textarea{white-space:revert}meter{-webkit-appearance:revert;appearance:revert}:where(pre){all:revert;box-sizing:border-box}::placeholder{color:unset}::marker{content:initial}:where([hidden]){display:none}:where([contenteditable]:not([contenteditable="false"])){-moz-user-modify:read-write;-webkit-user-modify:read-write;overflow-wrap:break-word;-webkit-line-break:after-white-space;-webkit-user-select:auto}:where([draggable="true"]){-webkit-user-drag:element}:where(dialog:modal){all:revert;box-sizing:border-box}pre,code{margin:0}:root{--site-header-height: 46px;--site-footer-height: 46px}@media (min-width: 1025px) and (max-width: 1280px),(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait){:root{--site-header-height: 60px;--site-footer-height: 60px}}@media (min-width: 1281px){:root{--site-header-height: 80px;--site-footer-height: 80px}}body{font-family:var(--font-family);background:var(--background);color:var(--color);display:flex;flex-direction:column;min-height:100svh}#site-header{display:grid;grid-template-columns:2fr 1fr;grid-template-rows:repeat(3, var(--site-header-height))}#site-header-menu,#site-header-search{grid-column:1 / 3}#site-footer{display:grid;grid-template-columns:1fr 1fr;grid-template-rows:repeat(3, var(--site-footer-height))}#site-footer-copyright,#site-footer-love{grid-column:1 / 3}#site-main-content-wrapper{display:flex;flex:1}#sidebar,#toc,#article-nav,#sidebar .btn-close,#toc .btn-close{display:none}main{flex:1;display:flex;overflow:auto}#article{flex:1;width:100vw}#sidebar{width:85%;left:-85%}#toc{width:85%;right:-85%}@media (min-width: 768px) and (max-width: 1023px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:repeat(2, var(--site-header-height))}#site-header-brand{grid-column:1 / 6}#site-header-controls{grid-column:6 / 7}#site-header-menu{grid-column:1 / 5}#site-header-search{grid-column:5 / 7}#site-footer{grid-template-columns:repeat(4, 1fr);grid-template-rows:repeat(2, var(--site-footer-height))}#site-footer-copyright{grid-column:1 / 3}#site-footer-social{grid-column:3 / 4}#site-footer-fund{grid-column:4 / 5}#site-footer-love{grid-column:1 / 5}#sidebar{width:50%;left:-50%}#toc{width:50%;right:-50%}}@media (min-width: 1024px){#site-header{grid-template-columns:repeat(6, 1fr);grid-template-rows:var(--site-header-height)}#site-header-brand{grid-column:1 / 2}#site-header-menu{grid-column:2 / 5;grid-row:1}#site-header-search{grid-column:5 / 6;grid-row:1}#site-header-controls{grid-column:6 / 7}#site-footer{grid-template-columns:repeat(5, 1fr);grid-template-rows:var(--site-footer-height)}#site-footer-copyright{grid-column:1 / 3}#site-footer-love{grid-column:3 / 4;grid-row:1}#site-footer-social{grid-column:4 / 5}#site-footer-fund{grid-column:5 / 6}#article-nav-toc-btn{display:none}}@media (min-width: 1024px) and (max-width: 1279px){#sidebar{width:33%;left:-33%}#article{width:75vw}#toc{width:25%;display:flex;flex-direction:column}#toc .sticky{position:fixed;right:0;width:25%}}@media (min-width: 1280px){#sidebar{width:20%;display:flex;flex-direction:column}#article{width:60vw}#toc{width:25%;display:flex;flex-direction:column}#sidebar .sticky{position:fixed;left:0;width:20%}#toc .sticky{position:fixed;right:0;width:20%}}@media (max-width: 1023px){#toc{position:fixed;top:0;height:100%;transition:.3s;z-index:300;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #toc,:root[data-color="night"] #toc{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-toc-on #toc{animation:slide-in-right .3s forwards;display:flex;flex-direction:column;padding-left:16px;z-index:10;cursor:default}.offcanvas-toc-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-toc-on #toc .btn-close{display:block;position:absolute;top:10px;left:10px}#article-nav-toc-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}@media (max-width: 1279px){#sidebar{position:fixed;top:0;height:100%;transition:.3s;z-index:200;overflow-x:auto;background:var(--background);box-shadow:0 4px 30px rgba(0,0,0,0.1)}:root[data-color="dark"] #sidebar,:root[data-color="night"] #sidebar{box-shadow:0 4px 30px rgba(255,255,255,0.1)}.offcanvas-sidebar-on #sidebar{animation:slide-in-left .3s forwards;display:flex;flex-direction:column;z-index:10;cursor:default}.offcanvas-sidebar-on:before{content:"";position:fixed;top:0;left:0;width:100%;height:100%;z-index:5}.offcanvas-sidebar-on #sidebar .btn-close{display:block;position:absolute;top:10px;right:10px}#article-nav{display:flex;gap:12px;overflow:auto;justify-content:space-between;height:var(--site-header-height);align-items:center;padding:0 2px}#article-nav-menu-btn{display:flex;box-shadow:var(--box-shadow2);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap;gap:6px;color:var(--color2)}}body.offcanvas-sidebar-on,body.offcanvas-toc-on{cursor:pointer;overflow:hidden}.offcanvas-sidebar-on:before,.offcanvas-toc-on:before{background:rgba(255,255,255,0.1);backdrop-filter:blur(var(--blur));-webkit-backdrop-filter:blur(var(--blur))}@keyframes slide-in-left{from{transform:translateX(0)}to{transform:translateX(100%)}}@keyframes slide-in-right{from{transform:translateX(0)}to{transform:translateX(-100%)}}#site-header-brand{display:flex;align-items:center;font-family:var(--font-family-brand);font-size:1.4em;color:var(--color2)}#site-header-brand a{padding:12px}#site-header-menu{padding:0 12px;display:flex;align-items:center;color:var(--color3)}#site-header-menu nav{width:100%;overflow:auto}#site-header-menu ul{display:flex;height:100%;align-items:center;gap:12px}#site-header-menu a{display:flex;padding:12px 6px;gap:3px;white-space:nowrap}#site-header-menu a:focus,#site-header-menu a:hover,#site-header-menu a.active{border-bottom:3px solid}#site-header-controls{display:flex;align-items:center;padding-right:12px;justify-content:flex-end;gap:12px}#site-header-search{display:flex;align-items:flex-end}@media (min-width: 768px){#site-header-search{align-items:center}}#site-footer-social{display:flex;gap:12px;justify-content:flex-start;padding-left:12px;align-items:center}#site-footer-fund{display:flex;gap:12px;overflow:auto;justify-content:flex-end;padding-right:12px;align-items:center}#site-footer-copyright,#site-footer-love{display:flex;align-items:center;justify-content:center;color:var(--color3)}#site-footer-copyright a{display:flex;align-items:center}@media (min-width: 768px){#site-footer-copyright{justify-content:flex-start;padding-left:12px}#site-footer-social{justify-content:flex-end;padding-right:12px}}.cover{padding:40px 20px;width:100vw;flex:1;display:flex;align-items:center;justify-content:center;flex-direction:column;background:var(--home-cover-background);position:relative;color:var(--color2)}.cover::after{content:"";position:absolute;top:0;left:0;right:0;bottom:0;z-index:-1;background:inherit;filter:blur(1rem)}.cover h1{font-family:var(--font-family-brand);font-size:4em;text-align:center}.cover h2{font-family:var(--font-family-brand);font-size:2em;text-align:center}.cover h3{font-family:var(--font-family-brand);font-size:1.5em;text-align:center;padding-top:.8em}.cover p{font-size:1em;padding-top:.8em}.github-buttons{display:flex;gap:10px;padding-top:20px;justify-content:center}.github-repos-grid{display:flex;flex-wrap:wrap;padding-top:4em;padding-bottom:2em;gap:4em;width:100%}.github-repo-tile{width:100%}.github-repo-tile .icon{width:80px;height:80px;background-size:5em}.github-repo-tile a{display:flex;flex-direction:column;align-items:center}@media (min-width: 768px){.github-repos-grid{flex-direction:row;width:80%;padding-top:4em;gap:0}.github-repo-tile{width:50%}}@media (min-width: 1024px){.github-repos-grid{width:60%;padding-top:6em}.github-repo-tile .icon{width:100px;height:100px;background-size:6.25em}}@media (min-width: 1281px){.github-repos-grid{width:50%}.github-repo-tile .icon{width:120px;height:120px;background-size:7.5em}}@media (min-width: 1920px){.github-repos-grid{width:40%}.github-repo-tile .icon{width:160px;height:160px;background-size:10em}}.btn-github{display:flex;flex-direction:row;gap:2px;font-size:.7em;font-weight:700;line-height:1.8em;color:#576060;background:#f6f8fa;border:1px solid #d5d7da;border-radius:6px;padding:2px 4px}:root[data-color="dark"] .btn-github,:root[data-color="night"] .btn-github{color:#c9d1d9;background:#21262d;border:1px solid #576060}.btn-github .icon{transform:scale(0.8)}.btn-buymeacoffee{width:86px;height:24px;background-image:url("data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A");border-radius:6px;box-shadow:0px 2px 3px rgba(0,0,0,0.1)}:root[data-color="dark"] .btn-buymeacoffee,:root[data-color="night"] .btn-buymeacoffee{box-shadow:0px 2px 3px rgba(255,255,255,0.1)}.btn-close{background:var(--background-fg);border:1px dotted var(--border-color);border-radius:4px;cursor:pointer}.dropdown{position:relative}.dropdown-btn{display:flex;flex-direction:row;box-shadow:var(--box-shadow);border-radius:6px;padding:6px;cursor:pointer;white-space:nowrap}.dropdown-btn .icon-select{opacity:.4}.dropdown-menu{display:none;position:absolute;right:0;top:34px;min-width:100px;max-height:240px;overflow-x:auto;background:var(--background);color:var(--color3);box-shadow:var(--box-shadow2);z-index:1;border-radius:6px;padding:3px}.dropdown-menu.show{display:block}.dropdown-menu button,.dropdown-menu a{width:100%;display:flex;gap:2px;padding:6px;align-items:center;justify-content:center;cursor:pointer}.dropdown-menu button:hover,.dropdown-menu a:hover{background:var(--background-fg)} - -/*# sourceMappingURL=home.css.map */ \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/home.scss_7724f67189cff0c6ae476b070cf609b9.json b/resources/_gen/assets/scss/scss/home.scss_7724f67189cff0c6ae476b070cf609b9.json deleted file mode 100644 index 708cd10..0000000 --- a/resources/_gen/assets/scss/scss/home.scss_7724f67189cff0c6ae476b070cf609b9.json +++ /dev/null @@ -1 +0,0 @@ -{"Target":"scss/home.css","MediaType":"text/css","Data":{}} \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/theme/default.scss_7724f67189cff0c6ae476b070cf609b9.content b/resources/_gen/assets/scss/scss/theme/default.scss_7724f67189cff0c6ae476b070cf609b9.content deleted file mode 100644 index 0c51d7a..0000000 --- a/resources/_gen/assets/scss/scss/theme/default.scss_7724f67189cff0c6ae476b070cf609b9.content +++ /dev/null @@ -1,3 +0,0 @@ -@font-face{font-family:'Inter';font-style:normal;font-weight:400;font-display:swap;src:url("/font/Inter-Regular.woff2?v=3.19") format("woff2"),url("/font/Inter-Regular.woff?v=3.19") format("woff")}@font-face{font-family:'Inter';font-style:italic;font-weight:400;font-display:swap;src:url("/font/Inter-Italic.woff2?v=3.19") format("woff2"),url("/font/Inter-Italic.woff?v=3.19") format("woff")}@font-face{font-family:'Inter';font-style:normal;font-weight:600;font-display:swap;src:url("/font/Inter-SemiBold.woff2?v=3.19") format("woff2"),url("/font/Inter-SemiBold.woff?v=3.19") format("woff")}@font-face{font-family:'Inter';font-style:italic;font-weight:600;font-display:swap;src:url("/font/Inter-SemiBoldItalic.woff2?v=3.19") format("woff2"),url("/font/Inter-SemiBoldItalic.woff?v=3.19") format("woff")}.icon{display:block;width:18px;height:18px}.icon-facebook{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 30 30' fill='%231877f2' %3E%3Cpath d='M30 15.091C30 6.756 23.285 0 15 0S0 6.756 0 15.091C0 22.625 5.484 28.868 12.656 30V19.454H8.848V15.09h3.808v-3.324c0-3.782 2.239-5.872 5.666-5.872 1.64 0 3.358.295 3.358.295v3.714h-1.893c-1.863 0-2.443 1.164-2.443 2.358v2.83h4.16l-.665 4.362h-3.495V30C24.516 28.868 30 22.625 30 15.091z'%3E%3C/path%3E%3C/svg%3E")}.icon-twitter{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%231d9bf0' %3E%3Cpath d='M24 4.557c-.883.392-1.832.656-2.828.775 1.017-.609 1.798-1.574 2.165-2.724-.951.564-2.005.974-3.127 1.195-.897-.957-2.178-1.555-3.594-1.555-3.179 0-5.515 2.966-4.797 6.045-4.091-.205-7.719-2.165-10.148-5.144-1.29 2.213-.669 5.108 1.523 6.574-.806-.026-1.566-.247-2.229-.616-.054 2.281 1.581 4.415 3.949 4.89-.693.188-1.452.232-2.224.084.626 1.956 2.444 3.379 4.6 3.419-2.07 1.623-4.678 2.348-7.29 2.04 2.179 1.397 4.768 2.212 7.548 2.212 9.142 0 14.307-7.721 13.995-14.646.962-.695 1.797-1.562 2.457-2.549z'/%3E%3C/svg%3E");transform:scale(1.1)}.icon-youtube{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%23ff0000' %3E%3Cpath d='M23.498 6.186a3.016 3.016 0 0 0-2.122-2.136C19.505 3.545 12 3.545 12 3.545s-7.505 0-9.377.505A3.017 3.017 0 0 0 .502 6.186C0 8.07 0 12 0 12s0 3.93.502 5.814a3.016 3.016 0 0 0 2.122 2.136c1.871.505 9.376.505 9.376.505s7.505 0 9.377-.505a3.015 3.015 0 0 0 2.122-2.136C24 15.93 24 12 24 12s0-3.93-.502-5.814zM9.545 15.568V8.432L15.818 12l-6.273 3.568z'%3E%3C/path%3E%3C/svg%3E");transform:scale(1.1)}.icon-github{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%2324292f' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E")}:root[data-color="dark"] .icon-github,:root[data-color="night"] .icon-github{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%236e7681' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E")}.icon-menu{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3Cpath d='M4,18h11c0.55,0,1-0.45,1-1v0c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,17.55,3.45,18,4,18z M4,13h8c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,12.55,3.45,13,4,13z M3,7L3,7c0,0.55,0.45,1,1,1h11c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4C3.45,6,3,6.45,3,7z M20.3,14.88L17.42,12l2.88-2.88c0.39-0.39,0.39-1.02,0-1.41l0,0 c-0.39-0.39-1.02-0.39-1.41,0l-3.59,3.59c-0.39,0.39-0.39,1.02,0,1.41l3.59,3.59c0.39,0.39,1.02,0.39,1.41,0l0,0 C20.68,15.91,20.69,15.27,20.3,14.88z'/%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3C/svg%3E")}.icon-toc{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23000000'%3E%3Cpath d='M0 0h24v24H0V0zm0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M3 9h14V7H3v2zm0 4h14v-2H3v2zm0 4h14v-2H3v2zm16 0h2v-2h-2v2zm0-10v2h2V7h-2zm0 6h2v-2h-2v2z'/%3E%3C/svg%3E")}.icon-close{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z'/%3E%3C/svg%3E")}.icon-home{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Crect fill='none' height='24' width='24'/%3E%3Cpolygon opacity='.3' points='18,19 13,19 13,15 11,15 11,19 6,19 6,10.1 12,5.52 18,10.1'/%3E%3Cpath d='M12,3L6,7.58V6H4v3.11L1,11.4l1.21,1.59L4,11.62V21h16v-9.38l1.79,1.36L23,11.4L12,3z M18,19h-5v-4h-2v4H6v-8.9l6-4.58 l6,4.58V19z M10,1c0,1.66-1.34,3-3,3C6.45,4,6,4.45,6,5H4c0-1.66,1.34-3,3-3c0.55,0,1-0.45,1-1H10z'/%3E%3C/svg%3E")}.icon-book{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg/%3E%3Cg%3E%3Cpath d='M21,5c-1.11-0.35-2.33-0.5-3.5-0.5c-1.95,0-4.05,0.4-5.5,1.5c-1.45-1.1-3.55-1.5-5.5-1.5S2.45,4.9,1,6v14.65 c0,0.25,0.25,0.5,0.5,0.5c0.1,0,0.15-0.05,0.25-0.05C3.1,20.45,5.05,20,6.5,20c1.95,0,4.05,0.4,5.5,1.5c1.35-0.85,3.8-1.5,5.5-1.5 c1.65,0,3.35,0.3,4.75,1.05c0.1,0.05,0.15,0.05,0.25,0.05c0.25,0,0.5-0.25,0.5-0.5V6C22.4,5.55,21.75,5.25,21,5z M3,18.5V7 c1.1-0.35,2.3-0.5,3.5-0.5c1.34,0,3.13,0.41,4.5,0.99v11.5C9.63,18.41,7.84,18,6.5,18C5.3,18,4.1,18.15,3,18.5z M21,18.5 c-1.1-0.35-2.3-0.5-3.5-0.5c-1.34,0-3.13,0.41-4.5,0.99V7.49c1.37-0.59,3.16-0.99,4.5-0.99c1.2,0,2.4,0.15,3.5,0.5V18.5z'/%3E%3Cpath d='M11,7.49C9.63,6.91,7.84,6.5,6.5,6.5C5.3,6.5,4.1,6.65,3,7v11.5C4.1,18.15,5.3,18,6.5,18 c1.34,0,3.13,0.41,4.5,0.99V7.49z' opacity='.3'/%3E%3C/g%3E%3Cg%3E%3Cpath d='M17.5,10.5c0.88,0,1.73,0.09,2.5,0.26V9.24C19.21,9.09,18.36,9,17.5,9c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,10.69,16.18,10.5,17.5,10.5z'/%3E%3Cpath d='M17.5,13.16c0.88,0,1.73,0.09,2.5,0.26V11.9c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,13.36,16.18,13.16,17.5,13.16z'/%3E%3Cpath d='M17.5,15.83c0.88,0,1.73,0.09,2.5,0.26v-1.52c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,16.02,16.18,15.83,17.5,15.83z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E")}.icon-theme{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0z' fill='none'/%3E%3Cpath d='M12 3c-4.97 0-9 4.03-9 9s4.03 9 9 9c.83 0 1.5-.67 1.5-1.5 0-.39-.15-.74-.39-1.01-.23-.26-.38-.61-.38-.99 0-.83.67-1.5 1.5-1.5H16c2.76 0 5-2.24 5-5 0-4.42-4.03-8-9-8zm-5.5 9c-.83 0-1.5-.67-1.5-1.5S5.67 9 6.5 9 8 9.67 8 10.5 7.33 12 6.5 12zm3-4C8.67 8 8 7.33 8 6.5S8.67 5 9.5 5s1.5.67 1.5 1.5S10.33 8 9.5 8zm5 0c-.83 0-1.5-.67-1.5-1.5S13.67 5 14.5 5s1.5.67 1.5 1.5S15.33 8 14.5 8zm3 4c-.83 0-1.5-.67-1.5-1.5S16.67 9 17.5 9s1.5.67 1.5 1.5-.67 1.5-1.5 1.5z'/%3E%3C/svg%3E")}.icon-brightness{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M18 9.52V6h-3.52L12 3.52 9.52 6H6v3.52L3.52 12 6 14.48V18h3.52L12 20.48 14.48 18H18v-3.52L20.48 12 18 9.52zm-6 7.98v-11c3.03 0 5.5 2.47 5.5 5.5s-2.47 5.5-5.5 5.5z' opacity='.3'/%3E%3Cpath d='M20 8.69V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12 20 8.69zm-2 5.79V18h-3.52L12 20.48 9.52 18H6v-3.52L3.52 12 6 9.52V6h3.52L12 3.52 14.48 6H18v3.52L20.48 12 18 14.48zM12 6.5v11c3.03 0 5.5-2.47 5.5-5.5S15.03 6.5 12 6.5z'/%3E%3C/svg%3E")}.icon-light-mode{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Ccircle cx='12' cy='12' opacity='.3' r='3'/%3E%3Cpath d='M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z'/%3E%3C/svg%3E")}.icon-dark-mode{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27 C17.45,17.19,14.93,19,12,19c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z' opacity='.3'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z'/%3E%3C/svg%3E")}.icon-night-mode{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg%3E%3Cpath d='M8.1,14.15C9.77,14.63,11,16.17,11,18c0,0.68-0.19,1.31-0.48,1.87c0.48,0.09,0.97,0.14,1.48,0.14 c1.48,0,2.9-0.41,4.13-1.15c-2.62-0.92-5.23-2.82-6.8-5.86C7.74,9.94,7.78,7.09,8.29,4.9c-2.57,1.33-4.3,4.01-4.3,7.1c0,0,0,0,0,0 c0.01,0,0.01,0,0.02,0C5.66,12,7.18,12.83,8.1,14.15z' opacity='.3'/%3E%3Cpath d='M19.78,17.51c-2.47,0-6.57-1.33-8.68-5.43C8.77,7.57,10.6,3.6,11.63,2.01C6.27,2.2,1.98,6.59,1.98,12 c0,0.14,0.02,0.28,0.02,0.42C2.61,12.16,3.28,12,3.98,12c0,0,0,0,0,0c0-3.09,1.73-5.77,4.3-7.1C7.78,7.09,7.74,9.94,9.32,13 c1.57,3.04,4.18,4.95,6.8,5.86c-1.23,0.74-2.65,1.15-4.13,1.15c-0.5,0-1-0.05-1.48-0.14c-0.37,0.7-0.94,1.27-1.64,1.64 c0.98,0.32,2.03,0.5,3.11,0.5c3.5,0,6.58-1.8,8.37-4.52C20.18,17.5,19.98,17.51,19.78,17.51z'/%3E%3Cpath d='M7,16l-0.18,0C6.4,14.84,5.3,14,4,14c-1.66,0-3,1.34-3,3s1.34,3,3,3c0.62,0,2.49,0,3,0c1.1,0,2-0.9,2-2 C9,16.9,8.1,16,7,16z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E")}.icon-translate{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12.65 15.67c.14-.36.05-.77-.23-1.05l-2.09-2.06.03-.03c1.74-1.94 2.98-4.17 3.71-6.53h1.94c.54 0 .99-.45.99-.99v-.02c0-.54-.45-.99-.99-.99H10V3c0-.55-.45-1-1-1s-1 .45-1 1v1H1.99c-.54 0-.99.45-.99.99 0 .55.45.99.99.99h10.18C11.5 7.92 10.44 9.75 9 11.35c-.81-.89-1.49-1.86-2.06-2.88-.16-.29-.45-.47-.78-.47-.69 0-1.13.75-.79 1.35.63 1.13 1.4 2.21 2.3 3.21L3.3 16.87c-.4.39-.4 1.03 0 1.42.39.39 1.02.39 1.42 0L9 14l2.02 2.02c.51.51 1.38.32 1.63-.35zM17.5 10c-.6 0-1.14.37-1.35.94l-3.67 9.8c-.24.61.22 1.26.87 1.26.39 0 .74-.24.88-.61l.89-2.39h4.75l.9 2.39c.14.36.49.61.88.61.65 0 1.11-.65.88-1.26l-3.67-9.8c-.22-.57-.76-.94-1.36-.94zm-1.62 7l1.62-4.33L19.12 17h-3.24z'/%3E%3C/svg%3E")}.icon-search{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M15.5 14h-.79l-.28-.27c1.2-1.4 1.82-3.31 1.48-5.34-.47-2.78-2.79-5-5.59-5.34-4.23-.52-7.79 3.04-7.27 7.27.34 2.8 2.56 5.12 5.34 5.59 2.03.34 3.94-.28 5.34-1.48l.27.28v.79l4.25 4.25c.41.41 1.08.41 1.49 0 .41-.41.41-1.08 0-1.49L15.5 14zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E")}.icon-select{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12 5.83L15.17 9l1.41-1.41L12 3 7.41 7.59 8.83 9 12 5.83zm0 12.34L8.83 15l-1.41 1.41L12 21l4.59-4.59L15.17 15 12 18.17z'/%3E%3C/svg%3E")}.icon-calendar{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Crect height='2' opacity='.3' width='14' x='5' y='6'/%3E%3Cpath d='M19,4h-1V2h-2v2H8V2H6v2H5C3.89,4,3.01,4.9,3.01,6L3,20c0,1.1,0.89,2,2,2h14c1.1,0,2-0.9,2-2V6C21,4.9,20.1,4,19,4z M19,20 H5V10h14V20z M19,8H5V6h14V8z M9,14H7v-2h2V14z M13,14h-2v-2h2V14z M17,14h-2v-2h2V14z M9,18H7v-2h2V18z M13,18h-2v-2h2V18z M17,18 h-2v-2h2V18z'/%3E%3C/g%3E%3C/svg%3E")}.icon-next{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M24 24H0V0h24v24z' fill='none' opacity='.87'/%3E%3Cpath d='M7.38 21.01c.49.49 1.28.49 1.77 0l8.31-8.31c.39-.39.39-1.02 0-1.41L9.15 2.98c-.49-.49-1.28-.49-1.77 0s-.49 1.28 0 1.77L14.62 12l-7.25 7.25c-.48.48-.48 1.28.01 1.76z' fill='%23328ac1'/%3E%3C/svg%3E")}.icon-prev{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cg%3E%3Cpath d='M16.88,2.88L16.88,2.88c-0.49-0.49-1.28-0.49-1.77,0l-8.41,8.41c-0.39,0.39-0.39,1.02,0,1.41l8.41,8.41 c0.49,0.49,1.28,0.49,1.77,0l0,0c0.49-0.49,0.49-1.28,0-1.77L9.54,12l7.35-7.35C17.37,4.16,17.37,3.37,16.88,2.88z' fill='%23328ac1'/%3E%3C/g%3E%3C/svg%3E")}.icon-copyright{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M10.08 10.86c.05-.33.16-.62.3-.87s.34-.46.59-.62c.24-.15.54-.22.91-.23.23.01.44.05.63.13.2.09.38.21.52.36s.25.33.34.53.13.42.14.64h1.79c-.02-.47-.11-.9-.28-1.29s-.4-.73-.7-1.01-.66-.5-1.08-.66-.88-.23-1.39-.23c-.65 0-1.22.11-1.7.34s-.88.53-1.2.92-.56.84-.71 1.36S8 11.29 8 11.87v.27c0 .58.08 1.12.23 1.64s.39.97.71 1.35.72.69 1.2.91c.48.22 1.05.34 1.7.34.47 0 .91-.08 1.32-.23s.77-.36 1.08-.63.56-.58.74-.94.29-.74.3-1.15h-1.79c-.01.21-.06.4-.15.58s-.21.33-.36.46-.32.23-.52.3c-.19.07-.39.09-.6.1-.36-.01-.66-.08-.89-.23-.25-.16-.45-.37-.59-.62s-.25-.55-.3-.88-.08-.67-.08-1v-.27c0-.35.03-.68.08-1.01zM12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8z'/%3E%3C/svg%3E")}.icon-love{background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23ff4d4d' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M13.35 20.13c-.76.69-1.93.69-2.69-.01l-.11-.1C5.3 15.27 1.87 12.16 2 8.28c.06-1.7.93-3.33 2.34-4.29 2.64-1.8 5.9-.96 7.66 1.1 1.76-2.06 5.02-2.91 7.66-1.1 1.41.96 2.28 2.59 2.34 4.29.14 3.88-3.3 6.99-8.55 11.76l-.1.09z'/%3E%3C/svg%3E")}:root{--font-family: 'Inter', sans-serif;--font-family-brand: 'Times', serif;--font-family-code: 'Menlo', monospace;--background: #ffffff;--color: #355265;--color2: #274457;--color3: #476d86;--color-anchor: #328ac1;--color-hover: #4b9dd0;--background-fg: #f7f7f7;--background-fg2: #ebebeb;--border-color: #dddddd;--box-shadow: 0 0 1px rgba(0, 0, 0, .7);--box-shadow2: 0 0 3px rgba(0, 0, 0, .2);--blur: 10px;--home-cover-background: radial-gradient(circle, rgba(255,255,255,1) 0%, rgba(255,255,250,1) 25%, rgba(214,219,220,1) 50%, rgba(255,255,250,1) 75%, rgba(255,255,255,1) 100%);--icon-filter: invert(41%) sepia(19%) saturate(840%) hue-rotate(161deg) brightness(92%) contrast(92%);--chroma-base00: #f9f9f9;--chroma-base01: #e0e0e0;--chroma-base02: rgba(159, 218, 159, .2);--chroma-base03: #8e908c;--chroma-base04: #969896;--chroma-base05: #4d4d4c;--chroma-base06: #282a2e;--chroma-base07: #1d1f21;--chroma-base08: #c82829;--chroma-base09: #f5871f;--chroma-base0A: #eab700;--chroma-base0B: #718c00;--chroma-base0C: #3e999f;--chroma-base0D: #4271ae;--chroma-base0E: #8959a8;--chroma-base0F: #a3685a}:root[data-color="dark"]{--background: #121212;--color: #efefef;--color2: #ffffff;--color3: #b3b3b3;--background-fg: #333333;--background-fg2: #1f1f1f;--border-color: rgba(255, 255, 255, .4);--box-shadow: 0 0 1px rgba(255, 255, 255, 1);--box-shadow2: 0 0 3px rgba(255, 255, 255, .6);--home-cover-background: radial-gradient(circle, rgba(23,23,25,1) 0%, rgba(18,18,0,1) 25%, rgba(32,32,32,1) 50%, rgba(18,18,0,1) 75%, rgba(23,23,25,1) 100%);--icon-filter: invert(83%) sepia(0%) saturate(1582%) hue-rotate(126deg) brightness(86%) contrast(80%);--chroma-base00: #080808;--chroma-base01: #393939;--chroma-base02: rgba(159, 218, 159, .1);--chroma-base03: #999999;--chroma-base04: #b4b7b4;--chroma-base05: #cccccc;--chroma-base06: #e0e0e0;--chroma-base07: #ffffff;--chroma-base08: #f2777a;--chroma-base09: #f99157;--chroma-base0A: #ffcc66;--chroma-base0B: #99cc99;--chroma-base0C: #66cccc;--chroma-base0D: #6699cc;--chroma-base0E: #cc99cc;--chroma-base0F: #a3685a}:root[data-color="night"]{--background: #333333;--color: #cccccc;--color2: #dedede;--color3: #9d9d9d;--background-fg: #444444;--background-fg2: #303030;--border-color: rgba(255, 255, 255, 0.2);--box-shadow: 0 0 1px rgba(225, 255, 255, 1);--box-shadow2: 0 0 3px rgba(255, 255, 255, .6);--home-cover-background: radial-gradient(circle, rgba(52,52,52,1) 0%, rgba(42,42,42,1) 25%, rgba(57,57,57,1) 50%, rgba(42,42,42,1) 75%, rgba(52,52,52,1) 100%);--icon-filter: invert(60%) sepia(25%) saturate(20%) hue-rotate(343deg) brightness(98%) contrast(94%);--chroma-base00: #1e1e1e;--chroma-base01: #323537;--chroma-base02: rgba(159, 218, 159, .1);--chroma-base03: #5f5a60;--chroma-base04: #838184;--chroma-base05: #a7a7a7;--chroma-base06: #c3c3c3;--chroma-base07: #ffffff;--chroma-base08: #cf6a4c;--chroma-base09: #cda869;--chroma-base0A: #f9ee98;--chroma-base0B: #8f9d6a;--chroma-base0C: #afc4db;--chroma-base0D: #7587a6;--chroma-base0E: #9b859d;--chroma-base0F: #9b703f}.icon:not(.icon-colored){filter:var(--icon-filter)} - -/*# sourceMappingURL=default.css.map */ \ No newline at end of file diff --git a/resources/_gen/assets/scss/scss/theme/default.scss_7724f67189cff0c6ae476b070cf609b9.json b/resources/_gen/assets/scss/scss/theme/default.scss_7724f67189cff0c6ae476b070cf609b9.json deleted file mode 100644 index f9a48ed..0000000 --- a/resources/_gen/assets/scss/scss/theme/default.scss_7724f67189cff0c6ae476b070cf609b9.json +++ /dev/null @@ -1 +0,0 @@ -{"Target":"scss/theme/default.css","MediaType":"text/css","Data":{}} \ No newline at end of file diff --git a/static/images/LOGO.png b/static/images/LOGO.png deleted file mode 100644 index 08a2d34..0000000 Binary files a/static/images/LOGO.png and /dev/null differ diff --git a/static/images/chatbot/BaseAgent.png b/static/images/chatbot/BaseAgent.png deleted file mode 100644 index 1f022d3..0000000 Binary files a/static/images/chatbot/BaseAgent.png and /dev/null differ diff --git a/static/images/chatbot/agent-flow.png b/static/images/chatbot/agent-flow.png deleted file mode 100644 index 2358cc8..0000000 Binary files a/static/images/chatbot/agent-flow.png and /dev/null differ diff --git a/static/images/chatbot/devops-chatbot-module-v2.png b/static/images/chatbot/devops-chatbot-module-v2.png deleted file mode 100644 index b905e86..0000000 Binary files a/static/images/chatbot/devops-chatbot-module-v2.png and /dev/null differ diff --git a/static/images/chatbot/luban.png b/static/images/chatbot/luban.png deleted file mode 100644 index 9cf8576..0000000 Binary files a/static/images/chatbot/luban.png and /dev/null differ diff --git a/static/images/chatbot/objective_v4.png b/static/images/chatbot/objective_v4.png deleted file mode 100644 index 82cacac..0000000 Binary files a/static/images/chatbot/objective_v4.png and /dev/null differ diff --git a/static/images/codefuse-evalution/EnglishIntroduction.png b/static/images/codefuse-evalution/EnglishIntroduction.png deleted file mode 100644 index 5e37155..0000000 Binary files a/static/images/codefuse-evalution/EnglishIntroduction.png and /dev/null differ diff --git "a/static/images/codefuse-evalution/\344\270\255\346\226\207\344\273\213\347\273\215.png" "b/static/images/codefuse-evalution/\344\270\255\346\226\207\344\273\213\347\273\215.png" deleted file mode 100644 index 14c1ee7..0000000 Binary files "a/static/images/codefuse-evalution/\344\270\255\346\226\207\344\273\213\347\273\215.png" and /dev/null differ diff --git a/static/images/codefuse-modelcache/modelcache_modules_20231114.png b/static/images/codefuse-modelcache/modelcache_modules_20231114.png deleted file mode 100644 index 596c1ac..0000000 Binary files a/static/images/codefuse-modelcache/modelcache_modules_20231114.png and /dev/null differ diff --git a/static/images/codefuse-query/introduction01.png b/static/images/codefuse-query/introduction01.png deleted file mode 100644 index b86708b..0000000 Binary files a/static/images/codefuse-query/introduction01.png and /dev/null differ diff --git a/static/images/codefuse-query/introduction02.png b/static/images/codefuse-query/introduction02.png deleted file mode 100644 index 5cd1949..0000000 Binary files a/static/images/codefuse-query/introduction02.png and /dev/null differ diff --git a/static/images/codefuse-query/introduction03.png b/static/images/codefuse-query/introduction03.png deleted file mode 100644 index de6f7f3..0000000 Binary files a/static/images/codefuse-query/introduction03.png and /dev/null differ diff --git a/static/images/codefuse-query/macos_cannot_open_godel.png b/static/images/codefuse-query/macos_cannot_open_godel.png deleted file mode 100644 index d417a04..0000000 Binary files a/static/images/codefuse-query/macos_cannot_open_godel.png and /dev/null differ diff --git a/static/images/codefuse-query/panel.jpg b/static/images/codefuse-query/panel.jpg deleted file mode 100644 index 2c05e75..0000000 Binary files a/static/images/codefuse-query/panel.jpg and /dev/null differ diff --git a/static/images/codefuse-query/security_allow_godel_run.png b/static/images/codefuse-query/security_allow_godel_run.png deleted file mode 100644 index d01d2d5..0000000 Binary files a/static/images/codefuse-query/security_allow_godel_run.png and /dev/null differ diff --git a/static/images/codefuse-query/toolchain01.png b/static/images/codefuse-query/toolchain01.png deleted file mode 100644 index b271660..0000000 Binary files a/static/images/codefuse-query/toolchain01.png and /dev/null differ diff --git a/static/images/codefuse-query/toolchain02.gif b/static/images/codefuse-query/toolchain02.gif deleted file mode 100644 index 6c2b75e..0000000 Binary files a/static/images/codefuse-query/toolchain02.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain03.gif b/static/images/codefuse-query/toolchain03.gif deleted file mode 100644 index 8471c8d..0000000 Binary files a/static/images/codefuse-query/toolchain03.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain04.gif b/static/images/codefuse-query/toolchain04.gif deleted file mode 100644 index 611c286..0000000 Binary files a/static/images/codefuse-query/toolchain04.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain05.gif b/static/images/codefuse-query/toolchain05.gif deleted file mode 100644 index e7b2905..0000000 Binary files a/static/images/codefuse-query/toolchain05.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain06.gif b/static/images/codefuse-query/toolchain06.gif deleted file mode 100644 index 49ac0fe..0000000 Binary files a/static/images/codefuse-query/toolchain06.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain07.gif b/static/images/codefuse-query/toolchain07.gif deleted file mode 100644 index f6d276e..0000000 Binary files a/static/images/codefuse-query/toolchain07.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain08.gif b/static/images/codefuse-query/toolchain08.gif deleted file mode 100644 index 440d7a6..0000000 Binary files a/static/images/codefuse-query/toolchain08.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain09.gif b/static/images/codefuse-query/toolchain09.gif deleted file mode 100644 index 02f6696..0000000 Binary files a/static/images/codefuse-query/toolchain09.gif and /dev/null differ diff --git a/static/images/codefuse-query/toolchain10.gif b/static/images/codefuse-query/toolchain10.gif deleted file mode 100644 index eadb3c0..0000000 Binary files a/static/images/codefuse-query/toolchain10.gif and /dev/null differ diff --git a/static/images/codefuse-query/wechat_qrcode.JPG b/static/images/codefuse-query/wechat_qrcode.JPG deleted file mode 100644 index 7c49013..0000000 Binary files a/static/images/codefuse-query/wechat_qrcode.JPG and /dev/null differ diff --git a/static/images/devops_eval/categroy_mapping.json b/static/images/devops_eval/categroy_mapping.json deleted file mode 100644 index 4ed77ff..0000000 --- a/static/images/devops_eval/categroy_mapping.json +++ /dev/null @@ -1,479 +0,0 @@ -{ - "Visualization.csv":[ - "visualization", - "可视化", - { - "dev":5, - "test":44 - }, - "Visualization.csv" - ], - "Logging.csv":[ - "logging", - "日志", - { - "dev":5, - "test":100 - }, - "Logging.csv" - ], - "Storage.csv":[ - "storage", - "存储", - { - "dev":5, - "test":36 - }, - "Storage.csv" - ], - "DataAcquisition.csv":[ - "data acquisition", - "数据采集", - { - "dev":5, - "test":36 - }, - "DataAcquisition.csv" - ], - "IntegrationTesting.csv":[ - "integration testing", - "集成测试", - { - "dev":5, - "test":31 - }, - "IntegrationTesting.csv" - ], - "UserAcceptanceTesting.csv":[ - "user acceptance testing", - "用户验收测试", - { - "dev":5, - "test":39 - }, - "UserAcceptanceTesting.csv" - ], - "SecurityTesting.csv":[ - "security testing", - "安全测试", - { - "dev":5, - "test":38 - }, - "SecurityTesting.csv" - ], - "UnitTesting.csv":[ - "unit testing", - "单元测试", - { - "dev":5, - "test":32 - }, - "UnitTesting.csv" - ], - "PerformanceTesting.csv":[ - "performance testing", - "性能测试", - { - "dev":5, - "test":36 - }, - "PerformanceTesting.csv" - ], - "SystemTesting.csv":[ - "system testing", - "系统测试", - { - "dev":5, - "test":52 - }, - "SystemTesting.csv" - ], - "ProgM.csv":[ - "programme management", - "进度管理", - { - "dev":5, - "test":21 - }, - "ProgM.csv" - ], - "REQM.csv":[ - "requirements management", - "需求管理", - { - "dev":5, - "test":24 - }, - "REQM.csv" - ], - "RiskMgmt.csv":[ - "risk management", - "风险管理", - { - "dev":5, - "test":21 - }, - "RiskMgmt.csv" - ], - "InfrastructureAsCode.csv":[ - "infrastructure as code", - "基础设施即代码", - { - "dev":5, - "test":34 - }, - "InfrastructureAsCode.csv" - ], - "Provisioning.csv":[ - "provisioning", - "置备", - { - "dev":5, - "test":19 - }, - "Provisioning.csv" - ], - "ConfigMgmt.csv":[ - "config management", - "配置管理", - { - "dev":5, - "test":100 - }, - "ConfigMgmt.csv" - ], - "Azure.csv":[ - "microsoft azure", - "微软云服务", - { - "dev":5, - "test":27 - }, - "Azure.csv" - ], - "GoogleCloud.csv":[ - "google cloud", - "谷歌云服务", - { - "dev":5, - "test":31 - }, - "GoogleCloud.csv" - ], - "AWS.csv":[ - "amazon web services", - "亚马逊云服务", - { - "dev":5, - "test":44 - }, - "AWS.csv" - ], - "LogDesign.csv":[ - "log design", - "日志设计", - { - "dev":5, - "test":33 - }, - "LogDesign.csv" - ], - "ServiceDesign.csv":[ - "service design", - "服务设计", - { - "dev":5, - "test":44 - }, - "ServiceDesign.csv" - ], - "CapabilityDesign.csv":[ - "capability design", - "容量设计", - { - "dev":5, - "test":33 - }, - "CapabilityDesign.csv" - ], - "CloudNativeDesign.csv":[ - "cloud native design", - "云原生设计", - { - "dev":5, - "test":44 - }, - "CloudNativeDesign.csv" - ], - "CacheDesign.csv":[ - "cache design", - "缓存设计", - { - "dev":5, - "test":28 - }, - "CacheDesign.csv" - ], - "DBDesign.csv":[ - "database design", - "数据库设计", - { - "dev":5, - "test":38 - }, - "DBDesign.csv" - ], - "ArtificialIntelligence.csv":[ - "artificial intelligence", - "人工智能", - { - "dev":5, - "test":45 - }, - "ArtificialIntelligence.csv" - ], - "ComputerBasics.csv":[ - "computer basics", - "计算机基础", - { - "dev":5, - "test":100 - }, - "ComputerBasics.csv" - ], - "DataBase.csv":[ - "database", - "数据库", - { - "dev":5, - "test":75 - }, - "DataBase.csv" - ], - "ComputerNetwork.csv":[ - "computer network", - "计算机网络", - { - "dev":5, - "test":88 - }, - "ComputerNetwork.csv" - ], - "OperatingSystem.csv":[ - "operating system", - "操作系统", - { - "dev":5, - "test":36 - }, - "OperatingSystem.csv" - ], - "Go.csv":[ - "go", - "go语言", - { - "dev":5, - "test":100 - }, - "Go.csv" - ], - "Java.csv":[ - "java", - "java语言", - { - "dev":5, - "test":100 - }, - "Java.csv" - ], - "C:C++.csv":[ - "c/c++", - "c/c++语言", - { - "dev":5, - "test":100 - }, - "C:C++.csv" - ], - "Python.csv":[ - "python", - "python语言", - { - "dev":5, - "test":73 - }, - "Python.csv" - ], - "BigData.csv":[ - "big data", - "大数据", - { - "dev":5, - "test":15 - }, - "BigData.csv" - ], - "Front-end.csv":[ - "front-end", - "前端", - { - "dev":5, - "test":100 - }, - "Front-end.csv" - ], - "MobileApp.csv":[ - "mobile app", - "移动应用", - { - "dev":5, - "test":100 - }, - "MobileApp.csv" - ], - "MachineLearning.csv":[ - "machine learning", - "机器学习", - { - "dev":5, - "test":69 - }, - "MachineLearning.csv" - ], - "Back-end.csv":[ - "back-end", - "后端", - { - "dev":5, - "test":100 - }, - "Back-end.csv" - ], - "ArtifactMgmt.csv":[ - "artifact management", - "产出物管理", - { - "dev":5, - "test":12 - }, - "ArtifactMgmt.csv" - ], - "CI:CD.csv":[ - "cd/cd", - "持续集成/持续部署", - { - "dev":5, - "test":100 - }, - "CI:CD.csv" - ], - "Linux.csv":[ - "linux", - "linux操作系统", - { - "dev":5, - "test":100 - }, - "Linux.csv" - ], - "ContainerOrchestration.csv":[ - "container orchestration", - "容器编排", - { - "dev":5, - "test":100 - }, - "ContainerOrchestration.csv" - ], - "Virtualization.csv":[ - "virtualization", - "虚拟化技术", - { - "dev":5, - "test":34 - }, - "Virtualization.csv" - ], - "TimeSeriesAnomalyDetection.csv":[ - "time series anomaly detection", - "时序异常检测", - { - "dev":5, - "test":300 - }, - "TimeSeriesAnomalyDetection.csv" - ], - "TimeSeriesClassification.csv":[ - "time series classification", - "时序分类", - { - "dev":5, - "test":200 - }, - "TimeSeriesClassification.csv" - ], - "RootCauseAnalysis.csv":[ - "root cause analysis", - "根因分析", - { - "dev":5, - "test":250 - }, - "RootCauseAnalysis.csv" - ], - "LogParser.csv":[ - "log parser", - "日志解析", - { - "dev":5, - "test":350 - }, - "LogParser.csv" - ], - "VersionControl.csv":[ - "version control", - "版本控制", - { - "dev":5, - "test":100 - }, - "VersionControl.csv" - ], - "DBMgnt.csv":[ - "database management", - "数据库管理", - { - "dev":5, - "test":19 - }, - "DBMgnt.csv" - ], - "Dependency.csv":[ - "dependency", - "依赖管理", - { - "dev":5, - "test":44 - }, - "Dependency.csv" - ], - "Compile.csv":[ - "compile", - "编译", - { - "dev":5, - "test":31 - }, - "Compile.csv" - ], - "Package.csv":[ - "package", - "包管理", - { - "dev":5, - "test":24 - }, - "Package.csv" - ] -} \ No newline at end of file diff --git a/static/images/devops_eval/data_info.png b/static/images/devops_eval/data_info.png deleted file mode 100644 index 5b42add..0000000 Binary files a/static/images/devops_eval/data_info.png and /dev/null differ diff --git a/static/images/devops_eval/devops_diagram_zh.jpg b/static/images/devops_eval/devops_diagram_zh.jpg deleted file mode 100644 index f75a1a4..0000000 Binary files a/static/images/devops_eval/devops_diagram_zh.jpg and /dev/null differ diff --git a/static/images/devops_eval/devops_eval_logo.png b/static/images/devops_eval/devops_eval_logo.png deleted file mode 100644 index 8ed3852..0000000 Binary files a/static/images/devops_eval/devops_eval_logo.png and /dev/null differ diff --git a/static/images/devops_eval/toolLearning_performance_metrics copy.png b/static/images/devops_eval/toolLearning_performance_metrics copy.png deleted file mode 100644 index 8ade560..0000000 Binary files a/static/images/devops_eval/toolLearning_performance_metrics copy.png and /dev/null differ diff --git a/static/images/devops_eval/toolLearning_performance_metrics.png b/static/images/devops_eval/toolLearning_performance_metrics.png deleted file mode 100644 index 8ade560..0000000 Binary files a/static/images/devops_eval/toolLearning_performance_metrics.png and /dev/null differ diff --git a/static/images/devops_model/devops-data-filter.webp b/static/images/devops_model/devops-data-filter.webp deleted file mode 100644 index 7df027c..0000000 Binary files a/static/images/devops_model/devops-data-filter.webp and /dev/null differ diff --git a/static/images/devops_model/devops-train-framework.webp b/static/images/devops_model/devops-train-framework.webp deleted file mode 100644 index e34391f..0000000 Binary files a/static/images/devops_model/devops-train-framework.webp and /dev/null differ diff --git a/static/images/devops_model/devops_data_filter.png b/static/images/devops_model/devops_data_filter.png deleted file mode 100644 index 1f89571..0000000 Binary files a/static/images/devops_model/devops_data_filter.png and /dev/null differ diff --git a/static/images/devops_model/devops_eval.webp b/static/images/devops_model/devops_eval.webp deleted file mode 100644 index 696c382..0000000 Binary files a/static/images/devops_model/devops_eval.webp and /dev/null differ diff --git a/static/images/devops_model/devops_train_framework.png b/static/images/devops_model/devops_train_framework.png deleted file mode 100644 index 9301c46..0000000 Binary files a/static/images/devops_model/devops_train_framework.png and /dev/null differ diff --git a/static/images/mft-vlm/CodeFuse-VLM-14B-performance.png b/static/images/mft-vlm/CodeFuse-VLM-14B-performance.png deleted file mode 100644 index e3b8f7a..0000000 Binary files a/static/images/mft-vlm/CodeFuse-VLM-14B-performance.png and /dev/null differ diff --git a/static/images/mft-vlm/CodeFuse-VLM-arch.png b/static/images/mft-vlm/CodeFuse-VLM-arch.png deleted file mode 100644 index 58e7f33..0000000 Binary files a/static/images/mft-vlm/CodeFuse-VLM-arch.png and /dev/null differ diff --git a/static/images/mft-vlm/MFT-VLM-arch.png b/static/images/mft-vlm/MFT-VLM-arch.png deleted file mode 100644 index a9487fe..0000000 Binary files a/static/images/mft-vlm/MFT-VLM-arch.png and /dev/null differ diff --git a/static/images/mftcoder/github-codefuse-logo-update.jpg b/static/images/mftcoder/github-codefuse-logo-update.jpg deleted file mode 100644 index 0cfa493..0000000 Binary files a/static/images/mftcoder/github-codefuse-logo-update.jpg and /dev/null differ diff --git a/static/images/mftcoder/img.jpg b/static/images/mftcoder/img.jpg deleted file mode 100644 index 199cc8e..0000000 Binary files a/static/images/mftcoder/img.jpg and /dev/null differ diff --git a/static/images/mftcoder/img_1.jpg b/static/images/mftcoder/img_1.jpg deleted file mode 100644 index bde7dac..0000000 Binary files a/static/images/mftcoder/img_1.jpg and /dev/null differ diff --git a/static/images/muagent/agent-flow.png b/static/images/muagent/agent-flow.png deleted file mode 100644 index 2358cc8..0000000 Binary files a/static/images/muagent/agent-flow.png and /dev/null differ diff --git a/static/images/muagent/baseagent.png b/static/images/muagent/baseagent.png deleted file mode 100644 index c9144c2..0000000 Binary files a/static/images/muagent/baseagent.png and /dev/null differ diff --git a/static/images/muagent/executoragent.png b/static/images/muagent/executoragent.png deleted file mode 100644 index 836cc59..0000000 Binary files a/static/images/muagent/executoragent.png and /dev/null differ diff --git a/static/images/muagent/memory manager.webp b/static/images/muagent/memory manager.webp deleted file mode 100644 index 0bed6d2..0000000 Binary files a/static/images/muagent/memory manager.webp and /dev/null differ diff --git a/static/images/muagent/muagent framework.png b/static/images/muagent/muagent framework.png deleted file mode 100644 index 1f97176..0000000 Binary files a/static/images/muagent/muagent framework.png and /dev/null differ diff --git a/static/images/muagent/reactagent.webp b/static/images/muagent/reactagent.webp deleted file mode 100644 index 12087bc..0000000 Binary files a/static/images/muagent/reactagent.webp and /dev/null differ diff --git a/static/images/muagent/selectoragent.webp b/static/images/muagent/selectoragent.webp deleted file mode 100644 index 2307746..0000000 Binary files a/static/images/muagent/selectoragent.webp and /dev/null differ diff --git a/themes/docura/.gitattributes b/themes/docura/.gitattributes deleted file mode 100644 index 94f480d..0000000 --- a/themes/docura/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text=auto eol=lf \ No newline at end of file diff --git a/themes/docura/.github/FUNDING.yml b/themes/docura/.github/FUNDING.yml deleted file mode 100644 index f87c0a3..0000000 --- a/themes/docura/.github/FUNDING.yml +++ /dev/null @@ -1,2 +0,0 @@ -github: [dumindu] -custom: https://www.buymeacoffee.com/dumindu \ No newline at end of file diff --git a/themes/docura/.gitignore b/themes/docura/.gitignore deleted file mode 100644 index a5e3ace..0000000 --- a/themes/docura/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -/.idea -/.vscode -/public -.DS_Store -/resources/ -.hugo_build.lock \ No newline at end of file diff --git a/themes/docura/LICENSE b/themes/docura/LICENSE deleted file mode 100644 index 61eeeda..0000000 --- a/themes/docura/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022-2023 Dumindu Madunuwan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/themes/docura/README.md b/themes/docura/README.md deleted file mode 100644 index b5210b8..0000000 --- a/themes/docura/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# 🦄 Docura -A modular Hugo theme to build your next documentation site - -## 🌱 Features - -- Responsive and adaptive layouts. -- Built-in dark, light and night themes. -- Customizable menu with Hugo configs. -- Customizable sidebars using Hugo data templates. -- Support for multiple documentation sets. -- Minimal reliance on external frameworks (No CSS, JS/npm, icon, font frameworks). -- SCSS/Vanilla JS based UI components and Hugo pipes based building process. - -## 🚀 Getting Started - -1. [Install Hugo extended version](https://gohugo.io/installation/). - - Use prebuilt binaries via https://github.com/gohugoio/hugo/releases/latest - - Verify the installation via `hugo version` - -2. Create a new site with the Docura theme. - ```shell - hugo new site newsite - cd newsite - git init - git submodule add https://github.com/docura/docura.git themes/docura - rm hugo.toml && cp themes/docura/hugo.yaml . - hugo server - ``` diff --git a/themes/docura/assets/js/component/article-nav.js b/themes/docura/assets/js/component/article-nav.js deleted file mode 100644 index eccd246..0000000 --- a/themes/docura/assets/js/component/article-nav.js +++ /dev/null @@ -1,50 +0,0 @@ -const body = document.body; - -const btnArticleNavMenu = document.querySelector("#article-nav-menu-btn") -if (btnArticleNavMenu) { - btnArticleNavMenu.addEventListener('click', function () { - body.classList.add('offcanvas-sidebar-on') - }); -} - -const btnArticleNavToc = document.querySelector("#article-nav-toc-btn") -if (btnArticleNavToc) { - btnArticleNavToc.addEventListener('click', function () { - body.classList.add('offcanvas-toc-on') - }); -} - -const btnCloseArticleNavMenu = document.querySelector("#sidebar .btn-close") -if (btnCloseArticleNavMenu) { - btnCloseArticleNavMenu.addEventListener('click', function () { - body.classList.remove('offcanvas-sidebar-on') - }); -} - -const btnCloseArticleNavToc = document.querySelector("#toc .btn-close") -if (btnCloseArticleNavToc) { - btnCloseArticleNavToc.addEventListener('click', function () { - body.classList.remove('offcanvas-toc-on') - }); - - const tocLinks = document.querySelectorAll("#toc ul a"); - tocLinks.forEach(link => { - link.addEventListener('click', function () { - body.classList.remove('offcanvas-toc-on') - }); - }); -} - -body.addEventListener('click', e => { - const isBtnArticleNavMenu = e.target.closest('#article-nav-menu-btn'); - const isSidebar = e.target.closest('#sidebar'); - if (!isBtnArticleNavMenu && !isSidebar && body.classList.contains('offcanvas-sidebar-on')) { - body.classList.remove('offcanvas-sidebar-on'); - } - - const isBtnArticleNavToc = e.target.closest('#article-nav-toc-btn'); - const toc = e.target.closest('#toc'); - if (!isBtnArticleNavToc && !toc && body.classList.contains('offcanvas-toc-on')) { - body.classList.remove('offcanvas-toc-on'); - } -}); \ No newline at end of file diff --git a/themes/docura/assets/js/component/color-preference.js b/themes/docura/assets/js/component/color-preference.js deleted file mode 100644 index b3c7b07..0000000 --- a/themes/docura/assets/js/component/color-preference.js +++ /dev/null @@ -1,47 +0,0 @@ -const lsKeyColorPreference = 'color-preference' -const lsKeyColorPreferenceDarkVariant = 'color-preference-dark-variant' - -const getColorPreference = () => { - let lastUsedColorPreference = localStorage.getItem(lsKeyColorPreference) - if (lastUsedColorPreference !== null) - return lastUsedColorPreference - else - return window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light' -} - -let colorPreference = getColorPreference() -document.firstElementChild.setAttribute('data-color', colorPreference) - -const getColorPreferenceDarkVariant = () => { - let lastUsedColorPreferenceDarkVariant = localStorage.getItem(lsKeyColorPreferenceDarkVariant) - return lastUsedColorPreferenceDarkVariant !== null ? lastUsedColorPreferenceDarkVariant : 'dark' -} - -let colorPreferenceDarkVariant = getColorPreferenceDarkVariant() - -let colorSchemes = document.querySelectorAll('.color-scheme') -colorSchemes.forEach(el => { - el.addEventListener('click', function () { - let newColorPreference = el.dataset.value - if (newColorPreference !== colorPreference) { - colorPreference = newColorPreference - setColorPreference() - - if (newColorPreference === 'dark' || newColorPreference === 'night') { - colorPreferenceDarkVariant = newColorPreference - localStorage.setItem(lsKeyColorPreferenceDarkVariant, colorPreferenceDarkVariant) - } - } - }) -}); - -const setColorPreference = () => { - localStorage.setItem(lsKeyColorPreference, colorPreference) - document.firstElementChild.setAttribute('data-color', colorPreference) -} - -window.matchMedia('(prefers-color-scheme: dark)') - .addEventListener('change', ({matches: isDark}) => { - colorPreference = isDark ? colorPreferenceDarkVariant : 'light' - setColorPreference() - }) diff --git a/themes/docura/assets/js/component/docsearch.min.js b/themes/docura/assets/js/component/docsearch.min.js deleted file mode 100644 index 9a7413d..0000000 --- a/themes/docura/assets/js/component/docsearch.min.js +++ /dev/null @@ -1,3 +0,0 @@ -/*! @docsearch/js 3.2.0 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com | https://cdn.jsdelivr.net/npm/@docsearch/js@3 */ -!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e=e||self).docsearch=t()}(this,(function(){"use strict";function e(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function t(t){for(var n=1;n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function i(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=null==e?null:"undefined"!=typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null==n)return;var r,o,c=[],i=!0,a=!1;try{for(n=n.call(e);!(i=(r=n.next()).done)&&(c.push(r.value),!t||c.length!==t);i=!0);}catch(e){a=!0,o=e}finally{try{i||null==n.return||n.return()}finally{if(a)throw o}}return c}(e,t)||u(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function a(e){return function(e){if(Array.isArray(e))return l(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||u(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function u(e,t){if(e){if("string"==typeof e)return l(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);return"Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n?Array.from(e):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?l(e,t):void 0}}function l(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n3)for(n=[n],c=3;c0?O(m.type,m.props,m.key,null,m.__v):m)){if(m.__=n,m.__b=n.__b+1,null===(p=b[s])||p&&m.key==p.key&&m.type===p.type)b[s]=void 0;else for(f=0;f3)for(n=[n],c=3;c=n.__.length&&n.__.push({}),n.__[e]}function ne(e){return $=1,re(pe,e)}function re(e,t,n){var r=te(W++,2);return r.t=e,r.__c||(r.__=[n?n(t):pe(void 0,t),function(e){var t=r.t(r.__[0],e);r.__[0]!==t&&(r.__=[t,r.__[1]],r.__c.setState({}))}],r.__c=K),r.__}function oe(e,t){var n=te(W++,3);!s.__s&&fe(n.__H,t)&&(n.__=e,n.__H=t,K.__H.__h.push(n))}function ce(e,t){var n=te(W++,4);!s.__s&&fe(n.__H,t)&&(n.__=e,n.__H=t,K.__h.push(n))}function ie(e,t){var n=te(W++,7);return fe(n.__H,t)&&(n.__=e(),n.__H=t,n.__h=e),n.__}function ae(){Q.forEach((function(e){if(e.__P)try{e.__H.__h.forEach(le),e.__H.__h.forEach(se),e.__H.__h=[]}catch(t){e.__H.__h=[],s.__e(t,e.__v)}})),Q=[]}s.__b=function(e){K=null,Y&&Y(e)},s.__r=function(e){G&&G(e),W=0;var t=(K=e.__c).__H;t&&(t.__h.forEach(le),t.__h.forEach(se),t.__h=[])},s.diffed=function(e){Z&&Z(e);var t=e.__c;t&&t.__H&&t.__H.__h.length&&(1!==Q.push(t)&&J===s.requestAnimationFrame||((J=s.requestAnimationFrame)||function(e){var t,n=function(){clearTimeout(r),ue&&cancelAnimationFrame(t),setTimeout(e)},r=setTimeout(n,100);ue&&(t=requestAnimationFrame(n))})(ae)),K=void 0},s.__c=function(e,t){t.some((function(e){try{e.__h.forEach(le),e.__h=e.__h.filter((function(e){return!e.__||se(e)}))}catch(n){t.some((function(e){e.__h&&(e.__h=[])})),t=[],s.__e(n,e.__v)}})),X&&X(e,t)},s.unmount=function(e){ee&&ee(e);var t=e.__c;if(t&&t.__H)try{t.__H.__.forEach(le)}catch(e){s.__e(e,t.__v)}};var ue="function"==typeof requestAnimationFrame;function le(e){var t=K;"function"==typeof e.__c&&e.__c(),K=t}function se(e){var t=K;e.__c=e.__(),K=t}function fe(e,t){return!e||e.length!==t.length||t.some((function(t,n){return t!==e[n]}))}function pe(e,t){return"function"==typeof t?t(e):t}function me(e,t){for(var n in t)e[n]=t[n];return e}function de(e,t){for(var n in e)if("__source"!==n&&!(n in t))return!0;for(var r in t)if("__source"!==r&&e[r]!==t[r])return!0;return!1}function he(e){this.props=e}(he.prototype=new E).isPureReactComponent=!0,he.prototype.shouldComponentUpdate=function(e,t){return de(this.props,e)||de(this.state,t)};var ve=s.__b;s.__b=function(e){e.type&&e.type.__f&&e.ref&&(e.props.ref=e.ref,e.ref=null),ve&&ve(e)};var ye="undefined"!=typeof Symbol&&Symbol.for&&Symbol.for("react.forward_ref")||3911;var _e=function(e,t){return null==e?null:C(C(e).map(t))},be={map:_e,forEach:_e,count:function(e){return e?C(e).length:0},only:function(e){var t=C(e);if(1!==t.length)throw"Children.only";return t[0]},toArray:C},ge=s.__e;function Oe(){this.__u=0,this.t=null,this.__b=null}function Se(e){var t=e.__.__c;return t&&t.__e&&t.__e(e)}function Ee(){this.u=null,this.o=null}s.__e=function(e,t,n){if(e.then)for(var r,o=t;o=o.__;)if((r=o.__c)&&r.__c)return null==t.__e&&(t.__e=n.__e,t.__k=n.__k),r.__c(e,t);ge(e,t,n)},(Oe.prototype=new E).__c=function(e,t){var n=t.__c,r=this;null==r.t&&(r.t=[]),r.t.push(n);var o=Se(r.__v),c=!1,i=function(){c||(c=!0,n.componentWillUnmount=n.__c,o?o(a):a())};n.__c=n.componentWillUnmount,n.componentWillUnmount=function(){i(),n.__c&&n.__c()};var a=function(){if(!--r.__u){if(r.state.__e){var e=r.state.__e;r.__v.__k[0]=function e(t,n,r){return t&&(t.__v=null,t.__k=t.__k&&t.__k.map((function(t){return e(t,n,r)})),t.__c&&t.__c.__P===n&&(t.__e&&r.insertBefore(t.__e,t.__d),t.__c.__e=!0,t.__c.__P=r)),t}(e,e.__c.__P,e.__c.__O)}var t;for(r.setState({__e:r.__b=null});t=r.t.pop();)t.forceUpdate()}},u=!0===t.__h;r.__u++||u||r.setState({__e:r.__b=r.__v.__k[0]}),e.then(i,i)},Oe.prototype.componentWillUnmount=function(){this.t=[]},Oe.prototype.render=function(e,t){if(this.__b){if(this.__v.__k){var n=document.createElement("div"),r=this.__v.__k[0].__c;this.__v.__k[0]=function e(t,n,r){return t&&(t.__c&&t.__c.__H&&(t.__c.__H.__.forEach((function(e){"function"==typeof e.__c&&e.__c()})),t.__c.__H=null),null!=(t=me({},t)).__c&&(t.__c.__P===r&&(t.__c.__P=n),t.__c=null),t.__k=t.__k&&t.__k.map((function(t){return e(t,n,r)}))),t}(this.__b,n,r.__O=r.__P)}this.__b=null}var o=t.__e&&g(S,null,e.fallback);return o&&(o.__h=null),[g(S,null,t.__e?null:e.children),o]};var we=function(e,t,n){if(++n[1]===n[0]&&e.o.delete(t),e.props.revealOrder&&("t"!==e.props.revealOrder[0]||!e.o.size))for(n=e.u;n;){for(;n.length>3;)n.pop()();if(n[1]>>1,1),t.i.removeChild(e)}}),B(g(je,{context:t.context},e.__v),t.l)):t.l&&t.componentWillUnmount()}function Ie(e,t){return g(Pe,{__v:e,i:t})}(Ee.prototype=new E).__e=function(e){var t=this,n=Se(t.__v),r=t.o.get(e);return r[0]++,function(o){var c=function(){t.props.revealOrder?(r.push(o),we(t,e,r)):o()};n?n(c):c()}},Ee.prototype.render=function(e){this.u=null,this.o=new Map;var t=C(e.children);e.revealOrder&&"b"===e.revealOrder[0]&&t.reverse();for(var n=t.length;n--;)this.o.set(t[n],this.u=[1,0,this.u]);return e.children},Ee.prototype.componentDidUpdate=Ee.prototype.componentDidMount=function(){var e=this;this.o.forEach((function(t,n){we(e,n,t)}))};var ke="undefined"!=typeof Symbol&&Symbol.for&&Symbol.for("react.element")||60103,De=/^(?:accent|alignment|arabic|baseline|cap|clip(?!PathU)|color|fill|flood|font|glyph(?!R)|horiz|marker(?!H|W|U)|overline|paint|stop|strikethrough|stroke|text(?!L)|underline|unicode|units|v|vector|vert|word|writing|x(?!C))[A-Z]/,Ce=function(e){return("undefined"!=typeof Symbol&&"symbol"==n(Symbol())?/fil|che|rad/i:/fil|che|ra/i).test(e)};function Ae(e,t,n){return null==t.__k&&(t.textContent=""),B(e,t),"function"==typeof n&&n(),e?e.__c:null}E.prototype.isReactComponent={},["componentWillMount","componentWillReceiveProps","componentWillUpdate"].forEach((function(e){Object.defineProperty(E.prototype,e,{configurable:!0,get:function(){return this["UNSAFE_"+e]},set:function(t){Object.defineProperty(this,e,{configurable:!0,writable:!0,value:t})}})}));var xe=s.event;function Ne(){}function Re(){return this.cancelBubble}function Te(){return this.defaultPrevented}s.event=function(e){return xe&&(e=xe(e)),e.persist=Ne,e.isPropagationStopped=Re,e.isDefaultPrevented=Te,e.nativeEvent=e};var Le,qe={configurable:!0,get:function(){return this.class}},Me=s.vnode;s.vnode=function(e){var t=e.type,n=e.props,r=n;if("string"==typeof t){for(var o in r={},n){var c=n[o];"value"===o&&"defaultValue"in n&&null==c||("defaultValue"===o&&"value"in n&&null==n.value?o="value":"download"===o&&!0===c?c="":/ondoubleclick/i.test(o)?o="ondblclick":/^onchange(textarea|input)/i.test(o+t)&&!Ce(n.type)?o="oninput":/^on(Ani|Tra|Tou|BeforeInp)/.test(o)?o=o.toLowerCase():De.test(o)?o=o.replace(/[A-Z0-9]/,"-$&").toLowerCase():null===c&&(c=void 0),r[o]=c)}"select"==t&&r.multiple&&Array.isArray(r.value)&&(r.value=C(n.children).forEach((function(e){e.props.selected=-1!=r.value.indexOf(e.props.value)}))),"select"==t&&null!=r.defaultValue&&(r.value=C(n.children).forEach((function(e){e.props.selected=r.multiple?-1!=r.defaultValue.indexOf(e.props.value):r.defaultValue==e.props.value}))),e.props=r}t&&n.class!=n.className&&(qe.enumerable="className"in n,null!=n.className&&(r.class=n.className),Object.defineProperty(r,"className",qe)),e.$$typeof=ke,Me&&Me(e)};var He=s.__r;s.__r=function(e){He&&He(e),Le=e.__c};var Ue={ReactCurrentDispatcher:{current:{readContext:function(e){return Le.__n[e.__c].props.value}}}};"object"==("undefined"==typeof performance?"undefined":n(performance))&&"function"==typeof performance.now&&performance.now.bind(performance);function Fe(e){return!!e&&e.$$typeof===ke}var Be={useState:ne,useReducer:re,useEffect:oe,useLayoutEffect:ce,useRef:function(e){return $=5,ie((function(){return{current:e}}),[])},useImperativeHandle:function(e,t,n){$=6,ce((function(){"function"==typeof e?e(t()):e&&(e.current=t())}),null==n?n:n.concat(e))},useMemo:ie,useCallback:function(e,t){return $=8,ie((function(){return e}),t)},useContext:function(e){var t=K.context[e.__c],n=te(W++,9);return n.__c=e,t?(null==n.__&&(n.__=!0,t.sub(K)),t.props.value):e.__},useDebugValue:function(e,t){s.useDebugValue&&s.useDebugValue(t?t(e):e)},version:"16.8.0",Children:be,render:Ae,hydrate:function(e,t,n){return V(e,t),"function"==typeof n&&n(),e?e.__c:null},unmountComponentAtNode:function(e){return!!e.__k&&(B(null,e),!0)},createPortal:Ie,createElement:g,createContext:function(e,t){var n={__c:t="__cC"+d++,__:e,Consumer:function(e,t){return e.children(t)},Provider:function(e){var n,r;return this.getChildContext||(n=[],(r={})[t]=this,this.getChildContext=function(){return r},this.shouldComponentUpdate=function(e){this.props.value!==e.value&&n.some(P)},this.sub=function(e){n.push(e);var t=e.componentWillUnmount;e.componentWillUnmount=function(){n.splice(n.indexOf(e),1),t&&t.call(e)}}),e.children}};return n.Provider.__=n.Consumer.contextType=n},createFactory:function(e){return g.bind(null,e)},cloneElement:function(e){return Fe(e)?z.apply(null,arguments):e},createRef:function(){return{current:null}},Fragment:S,isValidElement:Fe,findDOMNode:function(e){return e&&(e.base||1===e.nodeType&&e)||null},Component:E,PureComponent:he,memo:function(e,t){function n(e){var n=this.props.ref,r=n==e.ref;return!r&&n&&(n.call?n(null):n.current=null),t?!t(this.props,e)||!r:de(this.props,e)}function r(t){return this.shouldComponentUpdate=n,g(e,t)}return r.displayName="Memo("+(e.displayName||e.name)+")",r.prototype.isReactComponent=!0,r.__f=!0,r},forwardRef:function(e){function t(t,r){var o=me({},t);return delete o.ref,e(o,(r=t.ref||r)&&("object"!=n(r)||"current"in r)?r:null)}return t.$$typeof=ye,t.render=t,t.prototype.isReactComponent=t.__f=!0,t.displayName="ForwardRef("+(e.displayName||e.name)+")",t},unstable_batchedUpdates:function(e,t){return e(t)},StrictMode:S,Suspense:Oe,SuspenseList:Ee,lazy:function(e){var t,n,r;function o(o){if(t||(t=e()).then((function(e){n=e.default||e}),(function(e){r=e})),r)throw r;if(!n)throw t;return g(n,o)}return o.displayName="Lazy",o.__f=!0,o},__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:Ue};function Ve(){return Be.createElement("svg",{width:"15",height:"15",className:"DocSearch-Control-Key-Icon"},Be.createElement("path",{d:"M4.505 4.496h2M5.505 5.496v5M8.216 4.496l.055 5.993M10 7.5c.333.333.5.667.5 1v2M12.326 4.5v5.996M8.384 4.496c1.674 0 2.116 0 2.116 1.5s-.442 1.5-2.116 1.5M3.205 9.303c-.09.448-.277 1.21-1.241 1.203C1 10.5.5 9.513.5 8V7c0-1.57.5-2.5 1.464-2.494.964.006 1.134.598 1.24 1.342M12.553 10.5h1.953",strokeWidth:"1.2",stroke:"currentColor",fill:"none",strokeLinecap:"square"}))}function ze(){return Be.createElement("i",{className:"icon icon-search DocSearch-Search-Icon"})}var We=["translations"];function Ke(){return Ke=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var Ye="Ctrl";var Ge=Be.forwardRef((function(e,t){var n=e.translations,r=void 0===n?{}:n,o=Qe(e,We),c=r.buttonText,i=void 0===c?"Search":c,a=r.buttonAriaLabel,u=void 0===a?"Search":a,l=Je(ne(null),2),s=l[0],f=l[1];return oe((function(){"undefined"!=typeof navigator&&(/(Mac|iPhone|iPod|iPad)/i.test(navigator.platform)?f("⌘"):f(Ye))}),[]),Be.createElement("button",Ke({type:"button",className:"DocSearch DocSearch-Button","aria-label":u},o,{ref:t}),Be.createElement("span",{className:"DocSearch-Button-Container"},Be.createElement(ze,null),Be.createElement("span",{className:"DocSearch-Button-Placeholder"},i)),Be.createElement("span",{className:"DocSearch-Button-Keys"},null!==s&&Be.createElement(Be.Fragment,null,Be.createElement("kbd",{className:"DocSearch-Button-Key"},s===Ye?Be.createElement(Ve,null):s),Be.createElement("kbd",{className:"DocSearch-Button-Key"},"K"))))}));function Ze(e){return e.reduce((function(e,t){return e.concat(t)}),[])}var Xe=0;function et(e){return 0===e.collections.length?0:e.collections.reduce((function(e,t){return e+t.items.length}),0)}var tt=function(){},nt=[{segment:"autocomplete-core",version:"1.7.1"}];function rt(e,t){var n=t;return{then:function(t,r){return rt(e.then(ct(t,n,e),ct(r,n,e)),n)},catch:function(t){return rt(e.catch(ct(t,n,e)),n)},finally:function(t){return t&&n.onCancelList.push(t),rt(e.finally(ct(t&&function(){return n.onCancelList=[],t()},n,e)),n)},cancel:function(){n.isCanceled=!0;var e=n.onCancelList;n.onCancelList=[],e.forEach((function(e){e()}))},isCanceled:function(){return!0===n.isCanceled}}}function ot(e){return rt(e,{isCanceled:!1,onCancelList:[]})}function ct(e,t,n){return e?function(n){return t.isCanceled?n:e(n)}:n}function it(e,t,n,r){if(!n)return null;if(e<0&&(null===t||null!==r&&0===t))return n+e;var o=(null===t?-1:t)+e;return o<=-1||o>=n?null===r?null:0:o}function at(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function ut(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function lt(e,t){var n=[];return Promise.resolve(e(t)).then((function(e){return Promise.all(e.filter((function(e){return Boolean(e)})).map((function(e){if(e.sourceId,n.includes(e.sourceId))throw new Error("[Autocomplete] The `sourceId` ".concat(JSON.stringify(e.sourceId)," is not unique."));n.push(e.sourceId);var t=function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var Vt,zt,Wt,Kt=null,Jt=(Vt=-1,zt=-1,Wt=void 0,function(e){var t=++Vt;return Promise.resolve(e).then((function(e){return Wt&&t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var en=["props","refresh","store"],tn=["inputElement","formElement","panelElement"],nn=["inputElement"],rn=["inputElement","maxLength"],on=["item","source"];function cn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function an(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function sn(e){var t=e.props,n=e.refresh,r=e.store,o=ln(e,en);return{getEnvironmentProps:function(e){var n=e.inputElement,o=e.formElement,c=e.panelElement;function i(e){!r.getState().isOpen&&r.pendingRequests.isEmpty()||e.target===n||!1===[o,c].some((function(t){return n=t,r=e.target,n===r||n.contains(r);var n,r}))&&(r.dispatch("blur",null),t.debug||r.pendingRequests.cancelAll())}return an({onTouchStart:i,onMouseDown:i,onTouchMove:function(e){!1!==r.getState().isOpen&&n===t.environment.document.activeElement&&e.target!==n&&n.blur()}},ln(e,tn))},getRootProps:function(e){return an({role:"combobox","aria-expanded":r.getState().isOpen,"aria-haspopup":"listbox","aria-owns":r.getState().isOpen?"".concat(t.id,"-list"):void 0,"aria-labelledby":"".concat(t.id,"-label")},e)},getFormProps:function(e){e.inputElement;return an({action:"",noValidate:!0,role:"search",onSubmit:function(c){var i;c.preventDefault(),t.onSubmit(an({event:c,refresh:n,state:r.getState()},o)),r.dispatch("submit",null),null===(i=e.inputElement)||void 0===i||i.blur()},onReset:function(c){var i;c.preventDefault(),t.onReset(an({event:c,refresh:n,state:r.getState()},o)),r.dispatch("reset",null),null===(i=e.inputElement)||void 0===i||i.focus()}},ln(e,nn))},getLabelProps:function(e){return an({htmlFor:"".concat(t.id,"-input"),id:"".concat(t.id,"-label")},e)},getInputProps:function(e){var c;function i(e){(t.openOnFocus||Boolean(r.getState().query))&&$t(an({event:e,props:t,query:r.getState().completion||r.getState().query,refresh:n,store:r},o)),r.dispatch("focus",null)}var a=e||{},u=(a.inputElement,a.maxLength),l=void 0===u?512:u,s=ln(a,rn),f=st(r.getState()),p=function(e){return Boolean(e&&e.match(ft))}((null===(c=t.environment.navigator)||void 0===c?void 0:c.userAgent)||""),m=null!=f&&f.itemUrl&&!p?"go":"search";return an({"aria-autocomplete":"both","aria-activedescendant":r.getState().isOpen&&null!==r.getState().activeItemId?"".concat(t.id,"-item-").concat(r.getState().activeItemId):void 0,"aria-controls":r.getState().isOpen?"".concat(t.id,"-list"):void 0,"aria-labelledby":"".concat(t.id,"-label"),value:r.getState().completion||r.getState().query,id:"".concat(t.id,"-input"),autoComplete:"off",autoCorrect:"off",autoCapitalize:"off",enterKeyHint:m,spellCheck:"false",autoFocus:t.autoFocus,placeholder:t.placeholder,maxLength:l,type:"search",onChange:function(e){$t(an({event:e,props:t,query:e.currentTarget.value.slice(0,l),refresh:n,store:r},o))},onKeyDown:function(e){!function(e){var t=e.event,n=e.props,r=e.refresh,o=e.store,c=Xt(e,Qt);if("ArrowUp"===t.key||"ArrowDown"===t.key){var i=function(){var e=n.environment.document.getElementById("".concat(n.id,"-item-").concat(o.getState().activeItemId));e&&(e.scrollIntoViewIfNeeded?e.scrollIntoViewIfNeeded(!1):e.scrollIntoView(!1))},a=function(){var e=st(o.getState());if(null!==o.getState().activeItemId&&e){var n=e.item,i=e.itemInputValue,a=e.itemUrl,u=e.source;u.onActive(Gt({event:t,item:n,itemInputValue:i,itemUrl:a,refresh:r,source:u,state:o.getState()},c))}};t.preventDefault(),!1===o.getState().isOpen&&(n.openOnFocus||Boolean(o.getState().query))?$t(Gt({event:t,props:n,query:o.getState().query,refresh:r,store:o},c)).then((function(){o.dispatch(t.key,{nextActiveItemId:n.defaultActiveItemId}),a(),setTimeout(i,0)})):(o.dispatch(t.key,{}),a(),i())}else if("Escape"===t.key)t.preventDefault(),o.dispatch(t.key,null),o.pendingRequests.cancelAll();else if("Tab"===t.key)o.dispatch("blur",null),o.pendingRequests.cancelAll();else if("Enter"===t.key){if(null===o.getState().activeItemId||o.getState().collections.every((function(e){return 0===e.items.length})))return void(n.debug||o.pendingRequests.cancelAll());t.preventDefault();var u=st(o.getState()),l=u.item,s=u.itemInputValue,f=u.itemUrl,p=u.source;if(t.metaKey||t.ctrlKey)void 0!==f&&(p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c)),n.navigator.navigateNewTab({itemUrl:f,item:l,state:o.getState()}));else if(t.shiftKey)void 0!==f&&(p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c)),n.navigator.navigateNewWindow({itemUrl:f,item:l,state:o.getState()}));else if(t.altKey);else{if(void 0!==f)return p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c)),void n.navigator.navigate({itemUrl:f,item:l,state:o.getState()});$t(Gt({event:t,nextState:{isOpen:!1},props:n,query:s,refresh:r,store:o},c)).then((function(){p.onSelect(Gt({event:t,item:l,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},c))}))}}}(an({event:e,props:t,refresh:n,store:r},o))},onFocus:i,onBlur:tt,onClick:function(n){e.inputElement!==t.environment.document.activeElement||r.getState().isOpen||i(n)}},s)},getPanelProps:function(e){return an({onMouseDown:function(e){e.preventDefault()},onMouseLeave:function(){r.dispatch("mouseleave",null)}},e)},getListProps:function(e){return an({role:"listbox","aria-labelledby":"".concat(t.id,"-label"),id:"".concat(t.id,"-list")},e)},getItemProps:function(e){var c=e.item,i=e.source,a=ln(e,on);return an({id:"".concat(t.id,"-item-").concat(c.__autocomplete_id),role:"option","aria-selected":r.getState().activeItemId===c.__autocomplete_id,onMouseMove:function(e){if(c.__autocomplete_id!==r.getState().activeItemId){r.dispatch("mousemove",c.__autocomplete_id);var t=st(r.getState());if(null!==r.getState().activeItemId&&t){var i=t.item,a=t.itemInputValue,u=t.itemUrl,l=t.source;l.onActive(an({event:e,item:i,itemInputValue:a,itemUrl:u,refresh:n,source:l,state:r.getState()},o))}}},onMouseDown:function(e){e.preventDefault()},onClick:function(e){var a=i.getItemInputValue({item:c,state:r.getState()}),u=i.getItemUrl({item:c,state:r.getState()});(u?Promise.resolve():$t(an({event:e,nextState:{isOpen:!1},props:t,query:a,refresh:n,store:r},o))).then((function(){i.onSelect(an({event:e,item:c,itemInputValue:a,itemUrl:u,refresh:n,source:i,state:r.getState()},o))}))}},a)}}}function fn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function pn(e){for(var t=1;t0},reshape:function(e){return e.sources}},e),{},{id:null!==(n=e.id)&&void 0!==n?n:"autocomplete-".concat(Xe++),plugins:o,initialState:wt({activeItemId:null,query:"",completion:null,collections:[],isOpen:!1,status:"idle",context:{}},e.initialState),onStateChange:function(t){var n;null===(n=e.onStateChange)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onStateChange)||void 0===n?void 0:n.call(e,t)}))},onSubmit:function(t){var n;null===(n=e.onSubmit)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onSubmit)||void 0===n?void 0:n.call(e,t)}))},onReset:function(t){var n;null===(n=e.onReset)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onReset)||void 0===n?void 0:n.call(e,t)}))},getSources:function(n){return Promise.all([].concat(Ot(o.map((function(e){return e.getSources}))),[e.getSources]).filter(Boolean).map((function(e){return lt(e,n)}))).then((function(e){return Ze(e)})).then((function(e){return e.map((function(e){return wt(wt({},e),{},{onSelect:function(n){e.onSelect(n),t.forEach((function(e){var t;return null===(t=e.onSelect)||void 0===t?void 0:t.call(e,n)}))},onActive:function(n){e.onActive(n),t.forEach((function(e){var t;return null===(t=e.onActive)||void 0===t?void 0:t.call(e,n)}))}})}))}))},navigator:wt({navigate:function(e){var t=e.itemUrl;r.location.assign(t)},navigateNewTab:function(e){var t=e.itemUrl,n=r.open(t,"_blank","noopener");null==n||n.focus()},navigateNewWindow:function(e){var t=e.itemUrl;r.open(t,"_blank","noopener")}},e.navigator)})}(e,t),r=yt(bn,n,(function(e){var t=e.prevState,r=e.state;n.onStateChange(On({prevState:t,state:r,refresh:i},o))})),o=function(e){var t=e.store;return{setActiveItemId:function(e){t.dispatch("setActiveItemId",e)},setQuery:function(e){t.dispatch("setQuery",e)},setCollections:function(e){var n=0,r=e.map((function(e){return bt(bt({},e),{},{items:Ze(e.items).map((function(e){return bt(bt({},e),{},{__autocomplete_id:n++})}))})}));t.dispatch("setCollections",r)},setIsOpen:function(e){t.dispatch("setIsOpen",e)},setStatus:function(e){t.dispatch("setStatus",e)},setContext:function(e){t.dispatch("setContext",e)}}}({store:r}),c=sn(On({props:n,refresh:i,store:r},o));function i(){return $t(On({event:new Event("input"),nextState:{isOpen:r.getState().isOpen},props:n,query:r.getState().query,refresh:i,store:r},o))}return n.plugins.forEach((function(e){var n;return null===(n=e.subscribe)||void 0===n?void 0:n.call(e,On(On({},o),{},{refresh:i,onSelect:function(e){t.push({onSelect:e})},onActive:function(e){t.push({onActive:e})}}))})),function(e){var t,n,r=e.metadata,o=e.environment;if(null===(t=o.navigator)||void 0===t||null===(n=t.userAgent)||void 0===n?void 0:n.includes("Algolia Crawler")){var c=o.document.createElement("meta"),i=o.document.querySelector("head");c.name="algolia:metadata",setTimeout((function(){c.content=JSON.stringify(r),i.appendChild(c)}),0)}}({metadata:dn({plugins:n.plugins,options:e}),environment:n.environment}),On(On({refresh:i},c),o)}function wn(e){var t=e.translations,n=(void 0===t?{}:t).searchByText,r=void 0===n?"Search by":n;return Be.createElement("a",{href:"https://www.algolia.com/ref/docsearch/?utm_source=".concat(window.location.hostname,"&utm_medium=referral&utm_content=powered_by&utm_campaign=docsearch"),target:"_blank",rel:"noopener noreferrer"},Be.createElement("span",{className:"DocSearch-Label"},r),Be.createElement("svg",{width:"77",height:"19","aria-label":"Algolia",role:"img"},Be.createElement("path",{d:"M2.5067 0h14.0245c1.384.001 2.5058 1.1205 2.5068 2.5017V16.5c-.0014 1.3808-1.1232 2.4995-2.5068 2.5H2.5067C1.1232 18.9995.0014 17.8808 0 16.5V2.4958A2.495 2.495 0 01.735.7294 2.505 2.505 0 012.5068 0zM37.95 15.0695c-3.7068.0168-3.7068-2.986-3.7068-3.4634L34.2372.3576 36.498 0v11.1794c0 .2715 0 1.9889 1.452 1.994v1.8961zm-9.1666-1.8388c.694 0 1.2086-.0397 1.5678-.1088v-2.2934a5.3639 5.3639 0 00-1.3303-.1679 4.8283 4.8283 0 00-.758.0582 2.2845 2.2845 0 00-.688.2024c-.2029.0979-.371.2362-.4919.4142-.1268.1788-.185.2826-.185.5533 0 .5297.185.8359.5205 1.0375.3355.2016.7928.3053 1.365.3053v-.0008zm-.1969-8.1817c.7463 0 1.3768.092 1.8856.2767.5088.1838.9195.4428 1.2204.7717.3068.334.5147.7777.6423 1.251.1327.4723.196.991.196 1.5603v5.798c-.5235.1036-1.05.192-1.5787.2649-.7048.1037-1.4976.156-2.3774.156-.5832 0-1.1215-.0582-1.6016-.167a3.385 3.385 0 01-1.2432-.5364 2.6034 2.6034 0 01-.8037-.9565c-.191-.3922-.29-.9447-.29-1.5208 0-.5533.11-.905.3246-1.2863a2.7351 2.7351 0 01.8849-.9329c.376-.242.8029-.415 1.2948-.5187a7.4517 7.4517 0 011.5381-.156 7.1162 7.1162 0 011.6667.2024V8.886c0-.259-.0296-.5061-.093-.7372a1.5847 1.5847 0 00-.3245-.6158 1.5079 1.5079 0 00-.6119-.4158 2.6788 2.6788 0 00-.966-.173c-.5206 0-.9948.0634-1.4283.1384a6.5481 6.5481 0 00-1.065.259l-.2712-1.849c.2831-.0986.7048-.1964 1.2491-.2943a9.2979 9.2979 0 011.752-.1501v.0008zm44.6597 8.1193c.6947 0 1.2086-.0405 1.567-.1097v-2.2942a5.3743 5.3743 0 00-1.3303-.1679c-.2485 0-.503.0177-.7573.0582a2.2853 2.2853 0 00-.688.2024 1.2333 1.2333 0 00-.4918.4142c-.1268.1788-.1843.2826-.1843.5533 0 .5297.1843.8359.5198 1.0375.3414.2066.7927.3053 1.365.3053v.0009zm-.191-8.1767c.7463 0 1.3768.0912 1.8856.2759.5087.1847.9195.4436 1.2204.7717.3.329.5147.7786.6414 1.251a5.7248 5.7248 0 01.197 1.562v5.7972c-.3466.0742-.874.1602-1.5788.2648-.7049.1038-1.4976.1552-2.3774.1552-.5832 0-1.1215-.0573-1.6016-.167a3.385 3.385 0 01-1.2432-.5356 2.6034 2.6034 0 01-.8038-.9565c-.191-.3922-.2898-.9447-.2898-1.5216 0-.5533.1098-.905.3245-1.2854a2.7373 2.7373 0 01.8849-.9338c.376-.2412.8029-.4141 1.2947-.5178a7.4545 7.4545 0 012.325-.1097c.2781.0287.5672.081.879.156v-.3686a2.7781 2.7781 0 00-.092-.738 1.5788 1.5788 0 00-.3246-.6166 1.5079 1.5079 0 00-.612-.415 2.6797 2.6797 0 00-.966-.1729c-.5205 0-.9947.0633-1.4282.1384a6.5608 6.5608 0 00-1.065.259l-.2712-1.8498c.283-.0979.7048-.1957 1.2491-.2935a9.8597 9.8597 0 011.752-.1494zm-6.79-1.072c-.7576.001-1.373-.6103-1.3759-1.3664 0-.755.6128-1.3664 1.376-1.3664.764 0 1.3775.6115 1.3775 1.3664s-.6195 1.3664-1.3776 1.3664zm1.1393 11.1507h-2.2726V5.3409l2.2734-.3568v10.0845l-.0008.0017zm-3.984 0c-3.707.0168-3.707-2.986-3.707-3.4642L59.7069.3576 61.9685 0v11.1794c0 .2715 0 1.9889 1.452 1.994V15.0703zm-7.3512-4.979c0-.975-.2138-1.7873-.6305-2.3516-.4167-.571-.9998-.852-1.747-.852-.7454 0-1.3302.281-1.7452.852-.4166.5702-.6195 1.3765-.6195 2.3516 0 .9851.208 1.6473.6254 2.2183.4158.576.9998.8587 1.7461.8587.7454 0 1.3303-.2885 1.747-.8595.4158-.5761.6237-1.2315.6237-2.2184v.0009zm2.3132-.006c0 .7609-.1099 1.3361-.3356 1.9654a4.654 4.654 0 01-.9533 1.6076A4.214 4.214 0 0155.613 14.69c-.579.2412-1.4697.3795-1.9143.3795-.4462-.005-1.3303-.1324-1.9033-.3795a4.307 4.307 0 01-1.474-1.0316c-.4115-.4445-.7293-.9801-.9609-1.6076a5.3423 5.3423 0 01-.3465-1.9653c0-.7608.104-1.493.3356-2.1155a4.683 4.683 0 01.9719-1.5958 4.3383 4.3383 0 011.479-1.0257c.5739-.242 1.2043-.3567 1.8864-.3567.6829 0 1.3125.1197 1.8906.3567a4.1245 4.1245 0 011.4816 1.0257 4.7587 4.7587 0 01.9592 1.5958c.2426.6225.3643 1.3547.3643 2.1155zm-17.0198 0c0 .9448.208 1.9932.6238 2.431.4166.4386.955.6579 1.6142.6579.3584 0 .6998-.0523 1.0176-.1502.3186-.0978.5721-.2134.775-.3517V7.0784a8.8706 8.8706 0 00-1.4926-.1906c-.8206-.0236-1.4452.312-1.8847.8468-.4335.5365-.6533 1.476-.6533 2.3516v-.0008zm6.2863 4.4485c0 1.5385-.3938 2.662-1.1866 3.3773-.791.7136-2.0005 1.0712-3.6308 1.0712-.5958 0-1.834-.1156-2.8228-.334l.3643-1.7865c.8282.173 1.9202.2193 2.4932.2193.9077 0 1.555-.1847 1.943-.5533.388-.3686.578-.916.578-1.643v-.3687a6.8289 6.8289 0 01-.8848.3349c-.3634.1096-.786.167-1.261.167-.6246 0-1.1917-.0979-1.7055-.2944a3.5554 3.5554 0 01-1.3244-.8645c-.3642-.3796-.6541-.8579-.8561-1.4289-.2028-.571-.3068-1.59-.3068-2.339 0-.7034.1099-1.5856.3245-2.1735.2198-.5871.5316-1.0949.9542-1.515.4167-.42.9255-.743 1.5213-.98a5.5923 5.5923 0 012.052-.3855c.7353 0 1.4114.092 2.0707.2024.6592.1088 1.2204.2236 1.6776.35v8.945-.0008zM11.5026 4.2418v-.6511c-.0005-.4553-.3704-.8241-.8266-.8241H8.749c-.4561 0-.826.3688-.8265.824v.669c0 .0742.0693.1264.1445.1096a6.0346 6.0346 0 011.6768-.2362 6.125 6.125 0 011.6202.2185.1116.1116 0 00.1386-.1097zm-5.2806.852l-.3296-.3282a.8266.8266 0 00-1.168 0l-.393.3922a.8199.8199 0 000 1.164l.3237.323c.0524.0515.1268.0397.1733-.0117.191-.259.3989-.507.6305-.7372.2374-.2362.48-.4437.7462-.6335.0575-.0354.0634-.1155.017-.1687zm3.5159 2.069v2.818c0 .081.0879.1392.1622.0987l2.5102-1.2964c.0574-.0287.0752-.0987.0464-.1552a3.1237 3.1237 0 00-2.603-1.574c-.0575 0-.115.0456-.115.1097l-.0008-.0009zm.0008 6.789c-2.0933.0005-3.7915-1.6912-3.7947-3.7804C5.9468 8.0821 7.6452 6.39 9.7387 6.391c2.0932-.0005 3.7911 1.6914 3.794 3.7804a3.7783 3.7783 0 01-1.1124 2.675 3.7936 3.7936 0 01-2.6824 1.1054h.0008zM9.738 4.8002c-1.9218 0-3.6975 1.0232-4.6584 2.6841a5.359 5.359 0 000 5.3683c.9609 1.661 2.7366 2.6841 4.6584 2.6841a5.3891 5.3891 0 003.8073-1.5725 5.3675 5.3675 0 001.578-3.7987 5.3574 5.3574 0 00-1.5771-3.797A5.379 5.379 0 009.7387 4.801l-.0008-.0008z",fill:"currentColor",fillRule:"evenodd"})))}function jn(e){return Be.createElement("svg",{width:"15",height:"15","aria-label":e.ariaLabel,role:"img"},Be.createElement("g",{fill:"none",stroke:"currentColor",strokeLinecap:"round",strokeLinejoin:"round",strokeWidth:"1.2"},e.children))}function Pn(e){var t=e.translations,n=void 0===t?{}:t,r=n.selectText,o=void 0===r?"to select":r,c=n.selectKeyAriaLabel,i=void 0===c?"Enter key":c,a=n.navigateText,u=void 0===a?"to navigate":a,l=n.navigateUpKeyAriaLabel,s=void 0===l?"Arrow up":l,f=n.navigateDownKeyAriaLabel,p=void 0===f?"Arrow down":f,m=n.closeText,d=void 0===m?"to close":m,h=n.closeKeyAriaLabel,v=void 0===h?"Escape key":h,y=n.searchByText,_=void 0===y?"Search by":y;return Be.createElement(Be.Fragment,null,Be.createElement("div",{className:"DocSearch-Logo"},Be.createElement(wn,{translations:{searchByText:_}})),Be.createElement("ul",{className:"DocSearch-Commands"},Be.createElement("li",null,Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:i},Be.createElement("path",{d:"M12 3.53088v3c0 1-1 2-2 2H4M7 11.53088l-3-3 3-3"}))),Be.createElement("span",{className:"DocSearch-Label"},o)),Be.createElement("li",null,Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:p},Be.createElement("path",{d:"M7.5 3.5v8M10.5 8.5l-3 3-3-3"}))),Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:s},Be.createElement("path",{d:"M7.5 11.5v-8M10.5 6.5l-3-3-3 3"}))),Be.createElement("span",{className:"DocSearch-Label"},u)),Be.createElement("li",null,Be.createElement("kbd",{className:"DocSearch-Commands-Key"},Be.createElement(jn,{ariaLabel:v},Be.createElement("path",{d:"M13.6167 8.936c-.1065.3583-.6883.962-1.4875.962-.7993 0-1.653-.9165-1.653-2.1258v-.5678c0-1.2548.7896-2.1016 1.653-2.1016.8634 0 1.3601.4778 1.4875 1.0724M9 6c-.1352-.4735-.7506-.9219-1.46-.8972-.7092.0246-1.344.57-1.344 1.2166s.4198.8812 1.3445.9805C8.465 7.3992 8.968 7.9337 9 8.5c.032.5663-.454 1.398-1.4595 1.398C6.6593 9.898 6 9 5.963 8.4851m-1.4748.5368c-.2635.5941-.8099.876-1.5443.876s-1.7073-.6248-1.7073-2.204v-.4603c0-1.0416.721-2.131 1.7073-2.131.9864 0 1.6425 1.031 1.5443 2.2492h-2.956"}))),Be.createElement("span",{className:"DocSearch-Label"},d))))}function In(e){var t=e.hit,n=e.children;return Be.createElement("a",{href:t.url},n)}function kn(){return Be.createElement("svg",{viewBox:"0 0 38 38",stroke:"currentColor",strokeOpacity:".5"},Be.createElement("g",{fill:"none",fillRule:"evenodd"},Be.createElement("g",{transform:"translate(1 1)",strokeWidth:"2"},Be.createElement("circle",{strokeOpacity:".3",cx:"18",cy:"18",r:"18"}),Be.createElement("path",{d:"M36 18c0-9.94-8.06-18-18-18"},Be.createElement("animateTransform",{attributeName:"transform",type:"rotate",from:"0 18 18",to:"360 18 18",dur:"1s",repeatCount:"indefinite"})))))}function Dn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("g",{stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M3.18 6.6a8.23 8.23 0 1112.93 9.94h0a8.23 8.23 0 01-11.63 0"}),Be.createElement("path",{d:"M6.44 7.25H2.55V3.36M10.45 6v5.6M10.45 11.6L13 13"})))}function Cn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M10 10l5.09-5.09L10 10l5.09 5.09L10 10zm0 0L4.91 4.91 10 10l-5.09 5.09L10 10z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"}))}function An(){return Be.createElement("svg",{className:"DocSearch-Hit-Select-Icon",width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("g",{stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M18 3v4c0 2-2 4-4 4H2"}),Be.createElement("path",{d:"M8 17l-6-6 6-6"})))}var xn=function(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M17 6v12c0 .52-.2 1-1 1H4c-.7 0-1-.33-1-1V2c0-.55.42-1 1-1h8l5 5zM14 8h-3.13c-.51 0-.87-.34-.87-.87V4",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinejoin:"round"}))};function Nn(e){switch(e.type){case"lvl1":return Be.createElement(xn,null);case"content":return Be.createElement(Tn,null);default:return Be.createElement(Rn,null)}}function Rn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M13 13h4-4V8H7v5h6v4-4H7V8H3h4V3v5h6V3v5h4-4v5zm-6 0v4-4H3h4z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"}))}function Tn(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M17 5H3h14zm0 5H3h14zm0 5H3h14z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinejoin:"round"}))}function Ln(){return Be.createElement("svg",{width:"20",height:"20",viewBox:"0 0 20 20"},Be.createElement("path",{d:"M10 14.2L5 17l1-5.6-4-4 5.5-.7 2.5-5 2.5 5 5.6.8-4 4 .9 5.5z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinejoin:"round"}))}function qn(){return Be.createElement("svg",{width:"40",height:"40",viewBox:"0 0 20 20",fill:"none",fillRule:"evenodd",stroke:"currentColor",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M19 4.8a16 16 0 00-2-1.2m-3.3-1.2A16 16 0 001.1 4.7M16.7 8a12 12 0 00-2.8-1.4M10 6a12 12 0 00-6.7 2M12.3 14.7a4 4 0 00-4.5 0M14.5 11.4A8 8 0 0010 10M3 16L18 2M10 18h0"}))}function Mn(){return Be.createElement("svg",{width:"40",height:"40",viewBox:"0 0 20 20",fill:"none",fillRule:"evenodd",stroke:"currentColor",strokeLinecap:"round",strokeLinejoin:"round"},Be.createElement("path",{d:"M15.5 4.8c2 3 1.7 7-1 9.7h0l4.3 4.3-4.3-4.3a7.8 7.8 0 01-9.8 1m-2.2-2.2A7.8 7.8 0 0113.2 2.4M2 18L18 2"}))}function Hn(e){var t=e.translations,n=void 0===t?{}:t,r=n.titleText,o=void 0===r?"Unable to fetch results":r,c=n.helpText,i=void 0===c?"You might want to check your network connection.":c;return Be.createElement("div",{className:"DocSearch-ErrorScreen"},Be.createElement("div",{className:"DocSearch-Screen-Icon"},Be.createElement(qn,null)),Be.createElement("p",{className:"DocSearch-Title"},o),Be.createElement("p",{className:"DocSearch-Help"},i))}var Un=["translations"];function Fn(e){return function(e){if(Array.isArray(e))return Bn(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"==typeof e)return Bn(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return Bn(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function Bn(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function zn(e){var t=e.translations,n=void 0===t?{}:t,r=Vn(e,Un),o=n.noResultsText,c=void 0===o?"No results for":o,i=n.suggestedQueryText,a=void 0===i?"Try searching for":i,u=n.reportMissingResultsText,l=void 0===u?"Believe this query should return results?":u,s=n.reportMissingResultsLinkText,f=void 0===s?"Let us know.":s,p=r.state.context.searchSuggestions;return Be.createElement("div",{className:"DocSearch-NoResults"},Be.createElement("div",{className:"DocSearch-Screen-Icon"},Be.createElement(Mn,null)),Be.createElement("p",{className:"DocSearch-Title"},c,' "',Be.createElement("strong",null,r.state.query),'"'),p&&p.length>0&&Be.createElement("div",{className:"DocSearch-NoResults-Prefill-List"},Be.createElement("p",{className:"DocSearch-Help"},a,":"),Be.createElement("ul",null,p.slice(0,3).reduce((function(e,t){return[].concat(Fn(e),[Be.createElement("li",{key:t},Be.createElement("button",{className:"DocSearch-Prefill",key:t,type:"button",onClick:function(){r.setQuery(t.toLowerCase()+" "),r.refresh(),r.inputRef.current.focus()}},t))])}),[]))),r.getMissingResultsUrl&&Be.createElement("p",{className:"DocSearch-Help"},"".concat(l," "),Be.createElement("a",{href:r.getMissingResultsUrl({query:r.state.query}),target:"_blank",rel:"noopener noreferrer"},f)))}var Wn=["hit","attribute","tagName"];function Kn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Jn(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function Yn(e,t){return t.split(".").reduce((function(e,t){return null!=e&&e[t]?e[t]:null}),e)}function Gn(e){var t=e.hit,n=e.attribute,r=e.tagName;return g(void 0===r?"span":r,Jn(Jn({},Qn(e,Wn)),{},{dangerouslySetInnerHTML:{__html:Yn(t,"_snippetResult.".concat(n,".value"))||Yn(t,n)}}))}function Zn(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=null==e?null:"undefined"!=typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null==n)return;var r,o,c=[],i=!0,a=!1;try{for(n=n.call(e);!(i=(r=n.next()).done)&&(c.push(r.value),!t||c.length!==t);i=!0);}catch(e){a=!0,o=e}finally{try{i||null==n.return||n.return()}finally{if(a)throw o}}return c}(e,t)||function(e,t){if(!e)return;if("string"==typeof e)return Xn(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return Xn(e,t)}(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function Xn(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n|<\/mark>)/g,ar=RegExp(ir.source);function ur(e){var t,n,r,o,c,i=e;if(!i.__docsearch_parent&&!e._highlightResult)return e.hierarchy.lvl0;var a=((i.__docsearch_parent?null===(t=i.__docsearch_parent)||void 0===t||null===(n=t._highlightResult)||void 0===n||null===(r=n.hierarchy)||void 0===r?void 0:r.lvl0:null===(o=e._highlightResult)||void 0===o||null===(c=o.hierarchy)||void 0===c?void 0:c.lvl0)||{}).value;return a&&ar.test(a)?a.replace(ir,""):a}function lr(){return lr=Object.assign||function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function dr(e){var t=e.translations,n=void 0===t?{}:t,r=mr(e,fr),o=n.recentSearchesTitle,c=void 0===o?"Recent":o,i=n.noRecentSearchesText,a=void 0===i?"No recent searches":i,u=n.saveRecentSearchButtonTitle,l=void 0===u?"Save this search":u,s=n.removeRecentSearchButtonTitle,f=void 0===s?"Remove this search from history":s,p=n.favoriteSearchesTitle,m=void 0===p?"Favorite":p,d=n.removeFavoriteSearchButtonTitle,h=void 0===d?"Remove this search from favorites":d;return"idle"===r.state.status&&!1===r.hasCollections?r.disableUserPersonalization?null:Be.createElement("div",{className:"DocSearch-StartScreen"},Be.createElement("p",{className:"DocSearch-Help"},a)):!1===r.hasCollections?null:Be.createElement("div",{className:"DocSearch-Dropdown-Container"},Be.createElement(tr,pr({},r,{title:c,collection:r.state.collections[0],renderIcon:function(){return Be.createElement("div",{className:"DocSearch-Hit-icon"},Be.createElement(Dn,null))},renderAction:function(e){var t=e.item,n=e.runFavoriteTransition,o=e.runDeleteTransition;return Be.createElement(Be.Fragment,null,Be.createElement("div",{className:"DocSearch-Hit-action"},Be.createElement("button",{className:"DocSearch-Hit-action-button",title:l,type:"submit",onClick:function(e){e.preventDefault(),e.stopPropagation(),n((function(){r.favoriteSearches.add(t),r.recentSearches.remove(t),r.refresh()}))}},Be.createElement(Ln,null))),Be.createElement("div",{className:"DocSearch-Hit-action"},Be.createElement("button",{className:"DocSearch-Hit-action-button",title:f,type:"submit",onClick:function(e){e.preventDefault(),e.stopPropagation(),o((function(){r.recentSearches.remove(t),r.refresh()}))}},Be.createElement(Cn,null))))}})),Be.createElement(tr,pr({},r,{title:m,collection:r.state.collections[1],renderIcon:function(){return Be.createElement("div",{className:"DocSearch-Hit-icon"},Be.createElement(Ln,null))},renderAction:function(e){var t=e.item,n=e.runDeleteTransition;return Be.createElement("div",{className:"DocSearch-Hit-action"},Be.createElement("button",{className:"DocSearch-Hit-action-button",title:h,type:"submit",onClick:function(e){e.preventDefault(),e.stopPropagation(),n((function(){r.favoriteSearches.remove(t),r.refresh()}))}},Be.createElement(Cn,null)))}})))}var hr=["translations"];function vr(){return vr=Object.assign||function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var _r=Be.memo((function(e){var t=e.translations,n=void 0===t?{}:t,r=yr(e,hr);if("error"===r.state.status)return Be.createElement(Hn,{translations:null==n?void 0:n.errorScreen});var o=r.state.collections.some((function(e){return e.items.length>0}));return r.state.query?!1===o?Be.createElement(zn,vr({},r,{translations:null==n?void 0:n.noResultsScreen})):Be.createElement(sr,r):Be.createElement(dr,vr({},r,{hasCollections:o,translations:null==n?void 0:n.startScreen}))}),(function(e,t){return"loading"===t.state.status||"stalled"===t.state.status})),br=["translations"];function gr(){return gr=Object.assign||function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function Sr(e){var t=e.translations,n=void 0===t?{}:t,r=Or(e,br),o=n.resetButtonTitle,c=void 0===o?"Clear the query":o,i=n.resetButtonAriaLabel,a=void 0===i?"Clear the query":i,u=n.cancelButtonText,l=void 0===u?"Cancel":u,s=n.cancelButtonAriaLabel,f=void 0===s?"Cancel":s,p=r.getFormProps({inputElement:r.inputRef.current}).onReset;return Be.useEffect((function(){r.autoFocus&&r.inputRef.current&&r.inputRef.current.focus()}),[r.autoFocus,r.inputRef]),Be.useEffect((function(){r.isFromSelection&&r.inputRef.current&&r.inputRef.current.select()}),[r.isFromSelection,r.inputRef]),Be.createElement(Be.Fragment,null,Be.createElement("form",{className:"DocSearch-Form",onSubmit:function(e){e.preventDefault()},onReset:p},Be.createElement("label",gr({className:"DocSearch-MagnifierLabel"},r.getLabelProps()),Be.createElement(ze,null)),Be.createElement("div",{className:"DocSearch-LoadingIndicator"},Be.createElement(kn,null)),Be.createElement("input",gr({className:"DocSearch-Input",ref:r.inputRef},r.getInputProps({inputElement:r.inputRef.current,autoFocus:r.autoFocus,maxLength:64}))),Be.createElement("button",{type:"reset",title:c,className:"DocSearch-Reset","aria-label":a,hidden:!r.state.query},Be.createElement(Cn,null))),Be.createElement("button",{className:"DocSearch-Cancel",type:"reset","aria-label":f,onClick:r.onClose},l))}var Er=["_highlightResult","_snippetResult"];function wr(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},c=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function jr(e){return!1===function(){var e="__TEST_KEY__";try{return localStorage.setItem(e,""),localStorage.removeItem(e),!0}catch(e){return!1}}()?{setItem:function(){},getItem:function(){return[]}}:{setItem:function(t){return window.localStorage.setItem(e,JSON.stringify(t))},getItem:function(){var t=window.localStorage.getItem(e);return t?JSON.parse(t):[]}}}function Pr(e){var t=e.key,n=e.limit,r=void 0===n?5:n,o=jr(t),c=o.getItem().slice(0,r);return{add:function(e){var t=e,n=(t._highlightResult,t._snippetResult,wr(t,Er)),i=c.findIndex((function(e){return e.objectID===n.objectID}));i>-1&&c.splice(i,1),c.unshift(n),c=c.slice(0,r),o.setItem(c)},remove:function(e){c=c.filter((function(t){return t.objectID!==e.objectID})),o.setItem(c)},getAll:function(){return c}}}var Ir=["facetName","facetQuery"];function kr(e){var t,n="algoliasearch-client-js-".concat(e.key),r=function(){return void 0===t&&(t=e.localStorage||window.localStorage),t},o=function(){return JSON.parse(r().getItem(n)||"{}")};return{get:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}};return Promise.resolve().then((function(){var n=JSON.stringify(e),r=o()[n];return Promise.all([r||t(),void 0!==r])})).then((function(e){var t=i(e,2),r=t[0],o=t[1];return Promise.all([r,o||n.miss(r)])})).then((function(e){return i(e,1)[0]}))},set:function(e,t){return Promise.resolve().then((function(){var c=o();return c[JSON.stringify(e)]=t,r().setItem(n,JSON.stringify(c)),t}))},delete:function(e){return Promise.resolve().then((function(){var t=o();delete t[JSON.stringify(e)],r().setItem(n,JSON.stringify(t))}))},clear:function(){return Promise.resolve().then((function(){r().removeItem(n)}))}}}function Dr(e){var t=a(e.caches),n=t.shift();return void 0===n?{get:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}};return t().then((function(e){return Promise.all([e,n.miss(e)])})).then((function(e){return i(e,1)[0]}))},set:function(e,t){return Promise.resolve(t)},delete:function(e){return Promise.resolve()},clear:function(){return Promise.resolve()}}:{get:function(e,r){var o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}};return n.get(e,r,o).catch((function(){return Dr({caches:t}).get(e,r,o)}))},set:function(e,r){return n.set(e,r).catch((function(){return Dr({caches:t}).set(e,r)}))},delete:function(e){return n.delete(e).catch((function(){return Dr({caches:t}).delete(e)}))},clear:function(){return n.clear().catch((function(){return Dr({caches:t}).clear()}))}}}function Cr(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{serializable:!0},t={};return{get:function(n,r){var o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{miss:function(){return Promise.resolve()}},c=JSON.stringify(n);if(c in t)return Promise.resolve(e.serializable?JSON.parse(t[c]):t[c]);var i=r(),a=o&&o.miss||function(){return Promise.resolve()};return i.then((function(e){return a(e)})).then((function(){return i}))},set:function(n,r){return t[JSON.stringify(n)]=e.serializable?JSON.stringify(r):r,Promise.resolve(r)},delete:function(e){return delete t[JSON.stringify(e)],Promise.resolve()},clear:function(){return t={},Promise.resolve()}}}function Ar(e){for(var t=e.length-1;t>0;t--){var n=Math.floor(Math.random()*(t+1)),r=e[t];e[t]=e[n],e[n]=r}return e}function xr(e,t){return t?(Object.keys(t).forEach((function(n){e[n]=t[n](e)})),e):e}function Nr(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r0?r:void 0,timeout:n.timeout||t,headers:n.headers||{},queryParameters:n.queryParameters||{},cacheable:n.cacheable}}var qr={Read:1,Write:2,Any:3},Mr=1,Hr=2,Ur=3,Fr=12e4;function Br(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:Mr;return t(t({},e),{},{status:n,lastUpdate:Date.now()})}function Vr(e){return"string"==typeof e?{protocol:"https",url:e,accept:qr.Any}:{protocol:e.protocol||"https",url:e.url,accept:e.accept||qr.Any}}var zr="GET",Wr="POST";function Kr(e,t){return Promise.all(t.map((function(t){return e.get(t,(function(){return Promise.resolve(Br(t))}))}))).then((function(e){var n=e.filter((function(e){return function(e){return e.status===Mr||Date.now()-e.lastUpdate>Fr}(e)})),r=e.filter((function(e){return function(e){return e.status===Ur&&Date.now()-e.lastUpdate<=Fr}(e)})),o=[].concat(a(n),a(r));return{getTimeout:function(e,t){return(0===r.length&&0===e?1:r.length+3+e)*t},statelessHosts:o.length>0?o.map((function(e){return Vr(e)})):t}}))}function Jr(e,n,r,o){var c=[],i=function(e,n){if(e.method===zr||void 0===e.data&&void 0===n.data)return;var r=Array.isArray(e.data)?e.data:t(t({},e.data),n.data);return JSON.stringify(r)}(r,o),u=function(e,n){var r=t(t({},e.headers),n.headers),o={};return Object.keys(r).forEach((function(e){var t=r[e];o[e.toLowerCase()]=t})),o}(e,o),l=r.method,s=r.method!==zr?{}:t(t({},r.data),o.data),f=t(t(t({"x-algolia-agent":e.userAgent.value},e.queryParameters),s),o.queryParameters),p=0,m=function t(n,a){var s=n.pop();if(void 0===s)throw{name:"RetryError",message:"Unreachable hosts - your application id may be incorrect. If the error persists, contact support@algolia.com.",transporterStackTrace:Gr(c)};var m={data:i,headers:u,method:l,url:Qr(s,r.path,f),connectTimeout:a(p,e.timeouts.connect),responseTimeout:a(p,o.timeout)},d=function(e){var t={request:m,response:e,host:s,triesLeft:n.length};return c.push(t),t},h={onSucess:function(e){return function(e){try{return JSON.parse(e.content)}catch(t){throw function(e,t){return{name:"DeserializationError",message:e,response:t}}(t.message,e)}}(e)},onRetry:function(r){var o=d(r);return r.isTimedOut&&p++,Promise.all([e.logger.info("Retryable failure",Zr(o)),e.hostsCache.set(s,Br(s,r.isTimedOut?Ur:Hr))]).then((function(){return t(n,a)}))},onFail:function(e){throw d(e),function(e,t){var n=e.content,r=e.status,o=n;try{o=JSON.parse(n).message}catch(e){}return function(e,t,n){return{name:"ApiError",message:e,status:t,transporterStackTrace:n}}(o,r,t)}(e,Gr(c))}};return e.requester.send(m).then((function(e){return function(e,t){return function(e){var t=e.status;return e.isTimedOut||function(e){var t=e.isTimedOut,n=e.status;return!t&&0==~~n}(e)||2!=~~(t/100)&&4!=~~(t/100)}(e)?t.onRetry(e):2==~~(e.status/100)?t.onSucess(e):t.onFail(e)}(e,h)}))};return Kr(e.hostsCache,n).then((function(e){return m(a(e.statelessHosts).reverse(),e.getTimeout)}))}function $r(e){var t={value:"Algolia for JavaScript (".concat(e,")"),add:function(e){var n="; ".concat(e.segment).concat(void 0!==e.version?" (".concat(e.version,")"):"");return-1===t.value.indexOf(n)&&(t.value="".concat(t.value).concat(n)),t}};return t}function Qr(e,t,n){var r=Yr(n),o="".concat(e.protocol,"://").concat(e.url,"/").concat("/"===t.charAt(0)?t.substr(1):t);return r.length&&(o+="?".concat(r)),o}function Yr(e){return Object.keys(e).map((function(t){return Nr("%s=%s",t,(n=e[t],"[object Object]"===Object.prototype.toString.call(n)||"[object Array]"===Object.prototype.toString.call(n)?JSON.stringify(e[t]):e[t]));var n})).join("&")}function Gr(e){return e.map((function(e){return Zr(e)}))}function Zr(e){var n=e.request.headers["x-algolia-api-key"]?{"x-algolia-api-key":"*****"}:{};return t(t({},e),{},{request:t(t({},e.request),{},{headers:t(t({},e.request.headers),n)})})}var Xr=function(e){var n=e.appId,r=function(e,t,n){var r={"x-algolia-api-key":n,"x-algolia-application-id":t};return{headers:function(){return e===Tr.WithinHeaders?r:{}},queryParameters:function(){return e===Tr.WithinQueryParameters?r:{}}}}(void 0!==e.authMode?e.authMode:Tr.WithinHeaders,n,e.apiKey),o=function(e){var t=e.hostsCache,n=e.logger,r=e.requester,o=e.requestsCache,c=e.responsesCache,a=e.timeouts,u=e.userAgent,l=e.hosts,s=e.queryParameters,f={hostsCache:t,logger:n,requester:r,requestsCache:o,responsesCache:c,timeouts:a,userAgent:u,headers:e.headers,queryParameters:s,hosts:l.map((function(e){return Vr(e)})),read:function(e,t){var n=Lr(t,f.timeouts.read),r=function(){return Jr(f,f.hosts.filter((function(e){return 0!=(e.accept&qr.Read)})),e,n)};if(!0!==(void 0!==n.cacheable?n.cacheable:e.cacheable))return r();var o={request:e,mappedRequestOptions:n,transporter:{queryParameters:f.queryParameters,headers:f.headers}};return f.responsesCache.get(o,(function(){return f.requestsCache.get(o,(function(){return f.requestsCache.set(o,r()).then((function(e){return Promise.all([f.requestsCache.delete(o),e])}),(function(e){return Promise.all([f.requestsCache.delete(o),Promise.reject(e)])})).then((function(e){var t=i(e,2);return t[0],t[1]}))}))}),{miss:function(e){return f.responsesCache.set(o,e)}})},write:function(e,t){return Jr(f,f.hosts.filter((function(e){return 0!=(e.accept&qr.Write)})),e,Lr(t,f.timeouts.write))}};return f}(t(t({hosts:[{url:"".concat(n,"-dsn.algolia.net"),accept:qr.Read},{url:"".concat(n,".algolia.net"),accept:qr.Write}].concat(Ar([{url:"".concat(n,"-1.algolianet.com")},{url:"".concat(n,"-2.algolianet.com")},{url:"".concat(n,"-3.algolianet.com")}]))},e),{},{headers:t(t(t({},r.headers()),{"content-type":"application/x-www-form-urlencoded"}),e.headers),queryParameters:t(t({},r.queryParameters()),e.queryParameters)})),c={transporter:o,appId:n,addAlgoliaAgent:function(e,t){o.userAgent.add({segment:e,version:t})},clearCache:function(){return Promise.all([o.requestsCache.clear(),o.responsesCache.clear()]).then((function(){}))}};return xr(c,e.methods)},eo=function(e){return function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r={transporter:e.transporter,appId:e.appId,indexName:t};return xr(r,n.methods)}},to=function(e){return function(n,r){var o=n.map((function(e){return t(t({},e),{},{params:Yr(e.params||{})})}));return e.transporter.read({method:Wr,path:"1/indexes/*/queries",data:{requests:o},cacheable:!0},r)}},no=function(e){return function(n,r){return Promise.all(n.map((function(n){var o=n.params,i=o.facetName,a=o.facetQuery,u=c(o,Ir);return eo(e)(n.indexName,{methods:{searchForFacetValues:co}}).searchForFacetValues(i,a,t(t({},r),u))})))}},ro=function(e){return function(t,n,r){return e.transporter.read({method:Wr,path:Nr("1/answers/%s/prediction",e.indexName),data:{query:t,queryLanguages:n},cacheable:!0},r)}},oo=function(e){return function(t,n){return e.transporter.read({method:Wr,path:Nr("1/indexes/%s/query",e.indexName),data:{query:t},cacheable:!0},n)}},co=function(e){return function(t,n,r){return e.transporter.read({method:Wr,path:Nr("1/indexes/%s/facets/%s/query",e.indexName,t),data:{facetQuery:n},cacheable:!0},r)}},io=1,ao=2,uo=3;function lo(e,n,r){var o,c={appId:e,apiKey:n,timeouts:{connect:1,read:2,write:30},requester:{send:function(e){return new Promise((function(t){var n=new XMLHttpRequest;n.open(e.method,e.url,!0),Object.keys(e.headers).forEach((function(t){return n.setRequestHeader(t,e.headers[t])}));var r,o=function(e,r){return setTimeout((function(){n.abort(),t({status:0,content:r,isTimedOut:!0})}),1e3*e)},c=o(e.connectTimeout,"Connection timeout");n.onreadystatechange=function(){n.readyState>n.OPENED&&void 0===r&&(clearTimeout(c),r=o(e.responseTimeout,"Socket timeout"))},n.onerror=function(){0===n.status&&(clearTimeout(c),clearTimeout(r),t({content:n.responseText||"Network request failed",status:n.status,isTimedOut:!1}))},n.onload=function(){clearTimeout(c),clearTimeout(r),t({content:n.responseText,status:n.status,isTimedOut:!1})},n.send(e.data)}))}},logger:(o=uo,{debug:function(e,t){return io>=o&&console.debug(e,t),Promise.resolve()},info:function(e,t){return ao>=o&&console.info(e,t),Promise.resolve()},error:function(e,t){return console.error(e,t),Promise.resolve()}}),responsesCache:Cr(),requestsCache:Cr({serializable:!1}),hostsCache:Dr({caches:[kr({key:"".concat(Rr,"-").concat(e)}),Cr()]}),userAgent:$r(Rr).add({segment:"Browser",version:"lite"}),authMode:Tr.WithinQueryParameters};return Xr(t(t(t({},c),r),{},{methods:{search:to,searchForFacetValues:no,multipleQueries:to,multipleSearchForFacetValues:no,initIndex:function(e){return function(t){return eo(e)(t,{methods:{search:oo,searchForFacetValues:co,findAnswers:ro}})}}}}))}lo.version=Rr;var so="3.2.0";var fo=["footer","searchBox"];function po(){return po=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function go(e){var t=e.appId,n=e.apiKey,r=e.indexName,o=e.placeholder,c=void 0===o?"Search docs":o,i=e.searchParameters,a=e.onClose,u=void 0===a?cr:a,l=e.transformItems,s=void 0===l?or:l,f=e.hitComponent,p=void 0===f?In:f,m=e.resultsFooterComponent,d=void 0===m?function(){return null}:m,h=e.navigator,v=e.initialScrollY,y=void 0===v?0:v,_=e.transformSearchClient,b=void 0===_?or:_,g=e.disableUserPersonalization,O=void 0!==g&&g,S=e.initialQuery,E=void 0===S?"":S,w=e.translations,j=void 0===w?{}:w,P=e.getMissingResultsUrl,I=j.footer,k=j.searchBox,D=bo(j,fo),C=yo(Be.useState({query:"",collections:[],completion:null,context:{},isOpen:!1,activeItemId:null,status:"idle"}),2),A=C[0],x=C[1],N=Be.useRef(null),R=Be.useRef(null),T=Be.useRef(null),L=Be.useRef(null),q=Be.useRef(null),M=Be.useRef(10),H=Be.useRef("undefined"!=typeof window?window.getSelection().toString().slice(0,64):"").current,U=Be.useRef(E||H).current,F=function(e,t,n){return Be.useMemo((function(){var r=lo(e,t);return r.addAlgoliaAgent("docsearch",so),!1===/docsearch.js \(.*\)/.test(r.transporter.userAgent.value)&&r.addAlgoliaAgent("docsearch-react",so),n(r)}),[e,t,n])}(t,n,b),B=Be.useRef(Pr({key:"__DOCSEARCH_FAVORITE_SEARCHES__".concat(r),limit:10})).current,V=Be.useRef(Pr({key:"__DOCSEARCH_RECENT_SEARCHES__".concat(r),limit:0===B.getAll().length?7:4})).current,z=Be.useCallback((function(e){if(!O){var t="content"===e.type?e.__docsearch_parent:e;t&&-1===B.getAll().findIndex((function(e){return e.objectID===t.objectID}))&&V.add(t)}}),[B,V,O]),W=Be.useMemo((function(){return En({id:"docsearch",defaultActiveItemId:0,placeholder:c,openOnFocus:!0,initialState:{query:U,context:{searchSuggestions:[]}},navigator:h,onStateChange:function(e){x(e.state)},getSources:function(e){var t=e.query,n=e.state,o=e.setContext,c=e.setStatus;return t?F.search([{query:t,indexName:r,params:ho({attributesToRetrieve:["hierarchy.lvl0","hierarchy.lvl1","hierarchy.lvl2","hierarchy.lvl3","hierarchy.lvl4","hierarchy.lvl5","hierarchy.lvl6","content","type","url"],attributesToSnippet:["hierarchy.lvl1:".concat(M.current),"hierarchy.lvl2:".concat(M.current),"hierarchy.lvl3:".concat(M.current),"hierarchy.lvl4:".concat(M.current),"hierarchy.lvl5:".concat(M.current),"hierarchy.lvl6:".concat(M.current),"content:".concat(M.current)],snippetEllipsisText:"…",highlightPreTag:"",highlightPostTag:"",hitsPerPage:20},i)}]).catch((function(e){throw"RetryError"===e.name&&c("error"),e})).then((function(e){var t=e.results[0],r=t.hits,c=t.nbHits,i=rr(r,(function(e){return ur(e)}));return n.context.searchSuggestions.length0&&($(),q.current&&q.current.focus())}),[U,$]),Be.useEffect((function(){function e(){if(R.current){var e=.01*window.innerHeight;R.current.style.setProperty("--docsearch-vh","".concat(e,"px"))}}return e(),window.addEventListener("resize",e),function(){window.removeEventListener("resize",e)}}),[]),Be.createElement("div",po({ref:N},J({"aria-expanded":!0}),{className:["DocSearch","DocSearch-Container","stalled"===A.status&&"DocSearch-Container--Stalled","error"===A.status&&"DocSearch-Container--Errored"].filter(Boolean).join(" "),role:"button",tabIndex:0,onMouseDown:function(e){e.target===e.currentTarget&&u()}}),Be.createElement("div",{className:"DocSearch-Modal",ref:R},Be.createElement("header",{className:"DocSearch-SearchBar",ref:T},Be.createElement(Sr,po({},W,{state:A,autoFocus:0===U.length,inputRef:q,isFromSelection:Boolean(U)&&U===H,translations:k,onClose:u}))),Be.createElement("div",{className:"DocSearch-Dropdown",ref:L},Be.createElement(_r,po({},W,{indexName:r,state:A,hitComponent:p,resultsFooterComponent:d,disableUserPersonalization:O,recentSearches:V,favoriteSearches:B,inputRef:q,translations:D,getMissingResultsUrl:P,onItemClick:function(e){z(e),u()}}))),Be.createElement("footer",{className:"DocSearch-Footer"},Be.createElement(Pn,{translations:I}))))}function Oo(){return Oo=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n1&&void 0!==arguments[1]?arguments[1]:window;return"string"==typeof e?t.document.querySelector(e):e}(e.container,e.environment))}})); -//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/themes/docura/assets/js/component/dropdown.js b/themes/docura/assets/js/component/dropdown.js deleted file mode 100644 index eaffb02..0000000 --- a/themes/docura/assets/js/component/dropdown.js +++ /dev/null @@ -1,21 +0,0 @@ -const dropdowns = document.querySelectorAll('.dropdown') -const dropdownOpenSelector = '.dropdown-menu.show'; - -dropdowns.forEach(el => { - el.addEventListener('click', function (e) { - const alreadyShown = el.querySelector('.dropdown-menu.show'); - - document.querySelectorAll(dropdownOpenSelector).forEach(openDropdownEl => openDropdownEl.classList.remove('show')); - - if (!alreadyShown) { - el.querySelector('.dropdown-menu').classList.toggle('show') - } - }) -}); - -document.body.addEventListener('click', function (e) { - const isDropdownMenu = e.target.closest('.dropdown'); - if (!isDropdownMenu) { - document.querySelectorAll(dropdownOpenSelector).forEach(el => el.classList.remove('show')); - } -}); diff --git a/themes/docura/assets/js/component/sidebar.js b/themes/docura/assets/js/component/sidebar.js deleted file mode 100644 index 007e049..0000000 --- a/themes/docura/assets/js/component/sidebar.js +++ /dev/null @@ -1,13 +0,0 @@ -const fromDesktop = window.matchMedia('(min-width: 1280px)'); -const sidebarSticky = document.querySelector("#sidebar .sticky") -if (fromDesktop && sidebarSticky) { - window.addEventListener("scroll", function () { - if (document.body.scrollTop > 80 || document.documentElement.scrollTop > 80) { - sidebarSticky.style.top = "20px"; - sidebarSticky.style.bottom = "65px"; - } else { - sidebarSticky.style.top = null; - sidebarSticky.style.bottom = null; - } - }); -} \ No newline at end of file diff --git a/themes/docura/assets/js/component/toc.js b/themes/docura/assets/js/component/toc.js deleted file mode 100644 index f0e7af9..0000000 --- a/themes/docura/assets/js/component/toc.js +++ /dev/null @@ -1,47 +0,0 @@ -const fromLargeTablet = window.matchMedia('(min-width: 1024px)'); -const tocSticky = document.querySelector("#toc .sticky") -if (fromLargeTablet && tocSticky) { - window.addEventListener("scroll", function () { - if (document.body.scrollTop > 80 || document.documentElement.scrollTop > 80) { - tocSticky.style.top = "20px"; - tocSticky.style.bottom = "65px"; - } else { - tocSticky.style.top = null; - tocSticky.style.bottom = null; - } - }); -} - -if ('IntersectionObserver' in window) { - document.addEventListener('DOMContentLoaded', function () { - const links = document.querySelectorAll('#TableOfContents a'); - let activeLink = null; - const linksById = {}; - - const observer = new IntersectionObserver(entries => { - entries.forEach(entry => { - if (entry.isIntersecting) { - if (activeLink) { - activeLink.classList.remove('active'); - } - - activeLink = linksById[entry.target.id]; - if (activeLink) { - activeLink.classList.add('active'); - } - } - }); - }, {rootMargin: `0% 0% -80% 0%`}); - - links.forEach(link => { - const id = link.getAttribute('href') ? link.getAttribute('href').slice(1) : null; // Checking if href exists before slicing # - if (id) { - const target = document.getElementById(id); - if (target) { - linksById[id] = link; - observer.observe(target); - } - } - }); - }); -} diff --git a/themes/docura/assets/scss/base.scss b/themes/docura/assets/scss/base.scss deleted file mode 100644 index 6617bae..0000000 --- a/themes/docura/assets/scss/base.scss +++ /dev/null @@ -1,27 +0,0 @@ -/*! - * Docura (https://docura.github.io/) - * Copyright 2022-2023 Dumindu Madunuwan - * Licensed under the MIT License. - */ - -@import "reset"; -@import "variables"; -@import "layout"; - -@import "component/site-header"; -@import "component/site-footer"; -@import "component/article"; -@import "component/sidebar"; -@import "component/toc"; - -@import "component/button"; -@import "component/dropdown"; -@import "component/chroma"; - -html { - font-family: var(--font-family); - background: var(--background); - color: var(--color); - scroll-behavior: smooth; - scroll-padding: 2em; -} diff --git a/themes/docura/assets/scss/component/_button.scss b/themes/docura/assets/scss/component/_button.scss deleted file mode 100644 index 60bbf35..0000000 --- a/themes/docura/assets/scss/component/_button.scss +++ /dev/null @@ -1,42 +0,0 @@ -.btn-github { - display: flex; - flex-direction: row; - gap: 2px; - font-size: .7em; /*11 px*/ - font-weight: 700; - line-height: 1.8em; - color: #576060; - background: #f6f8fa; - border: 1px solid #d5d7da; - border-radius: 6px; - padding: 2px 4px; -} - -:root[data-color="dark"] .btn-github, :root[data-color="night"] .btn-github { - color: #c9d1d9; - background: #21262d; - border: 1px solid #576060; -} - -.btn-github .icon { - transform: scale(.8); /* 18px */ -} - -.btn-buymeacoffee { - width: 86px; - height: 24px; - background-image: url("data:image/svg+xml,%3Csvg width='85.5' height='24' viewBox='0 0 545 153' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0 24.48C0 10.9601 10.9601 0 24.48 0H520.2C533.72 0 544.68 10.9601 544.68 24.48V128.52C544.68 142.04 533.72 153 520.2 153H24.48C10.9601 153 0 142.04 0 128.52V24.48Z' fill='%23FFDD00'/%3E%3Cpath d='M109.522 50.3178L109.455 50.2783L109.299 50.2308C109.362 50.2836 109.44 50.3142 109.522 50.3178Z' fill='%230D0C22'/%3E%3Cpath d='M110.507 57.3134L110.432 57.3344L110.507 57.3134Z' fill='%230D0C22'/%3E%3Cpath d='M109.549 50.3062C109.54 50.3051 109.532 50.3031 109.524 50.3003C109.523 50.3058 109.523 50.3113 109.524 50.3168C109.533 50.3156 109.541 50.3119 109.549 50.3062Z' fill='%230D0C22'/%3E%3Cpath d='M109.523 50.3205H109.536V50.3127L109.523 50.3205Z' fill='%230D0C22'/%3E%3Cpath d='M110.447 57.3006L110.56 57.2361L110.602 57.2123L110.64 57.1715C110.569 57.2025 110.503 57.2462 110.447 57.3006Z' fill='%230D0C22'/%3E%3Cpath d='M109.715 50.4713L109.604 50.3659L109.529 50.3251C109.57 50.3963 109.636 50.4488 109.715 50.4713Z' fill='%230D0C22'/%3E%3Cpath d='M81.8801 118.353C81.7916 118.391 81.7142 118.451 81.6548 118.527L81.7246 118.482C81.772 118.439 81.8392 118.387 81.8801 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M98.0456 115.173C98.0456 115.073 97.9968 115.091 98.0087 115.447C98.0087 115.418 98.0206 115.389 98.0258 115.361C98.0324 115.298 98.0377 115.236 98.0456 115.173Z' fill='%230D0C22'/%3E%3Cpath d='M96.3761 118.353C96.2877 118.391 96.2103 118.451 96.1509 118.527L96.2207 118.482C96.2681 118.439 96.3353 118.387 96.3761 118.353Z' fill='%230D0C22'/%3E%3Cpath d='M70.4886 119.11C70.4215 119.052 70.3393 119.013 70.2515 118.999C70.3226 119.034 70.3937 119.068 70.4412 119.094L70.4886 119.11Z' fill='%230D0C22'/%3E%3Cpath d='M67.9304 116.657C67.92 116.553 67.8881 116.453 67.8369 116.362C67.8732 116.456 67.9035 116.553 67.9278 116.652L67.9304 116.657Z' fill='%230D0C22'/%3E%3Cpath d='M85.1368 72.7737C81.6195 74.2794 77.628 75.9866 72.4549 75.9866C70.2908 75.9823 68.1373 75.6854 66.0527 75.104L69.6306 111.838C69.7572 113.373 70.4567 114.805 71.59 115.848C72.7233 116.892 74.2076 117.471 75.7482 117.47C75.7482 117.47 80.8212 117.734 82.514 117.734C84.3358 117.734 89.7988 117.47 89.7988 117.47C91.3391 117.47 92.8231 116.891 93.9562 115.848C95.0892 114.804 95.7885 113.373 95.9151 111.838L99.7472 71.2456C98.0347 70.6607 96.3064 70.2721 94.358 70.2721C90.9883 70.2708 88.2733 71.4313 85.1368 72.7737Z' fill='white'/%3E%3Cpath d='M54.9844 57.1021L55.045 57.1587L55.0845 57.1824C55.0541 57.1522 55.0205 57.1252 54.9844 57.1021Z' fill='%230D0C22'/%3E%3Cpath d='M116.299 53.7119L115.761 50.9943C115.277 48.5559 114.18 46.2519 111.677 45.3706C110.875 45.0887 109.964 44.9675 109.349 44.384C108.734 43.8004 108.552 42.8941 108.41 42.0536C108.147 40.511 107.899 38.9671 107.629 37.4272C107.396 36.1033 107.211 34.616 106.604 33.4015C105.814 31.7706 104.174 30.8169 102.543 30.1859C101.707 29.8739 100.854 29.61 99.9884 29.3955C95.9139 28.3205 91.63 27.9253 87.4382 27.7001C82.407 27.4225 77.3623 27.5061 72.343 27.9504C68.6071 28.2902 64.6723 28.7013 61.1221 29.9935C59.8245 30.4665 58.4875 31.0342 57.5008 32.0367C56.2902 33.2684 55.895 35.1733 56.7789 36.7092C57.4073 37.8 58.4717 38.5706 59.6006 39.0804C61.0711 39.7373 62.6068 40.2371 64.1822 40.5716C68.5689 41.5412 73.1124 41.9219 77.5939 42.0839C82.561 42.2844 87.5362 42.1219 92.4796 41.5978C93.7021 41.4635 94.9224 41.3023 96.1405 41.1144C97.575 40.8944 98.4958 39.0185 98.073 37.7117C97.5671 36.1494 96.2077 35.5434 94.6703 35.7792C94.4438 35.8148 94.2185 35.8477 93.9919 35.8807L93.8286 35.9044C93.3078 35.9702 92.787 36.0317 92.2662 36.0888C91.1904 36.2047 90.112 36.2996 89.0309 36.3733C86.6097 36.5419 84.1818 36.6197 81.7553 36.6236C79.371 36.6236 76.9853 36.5564 74.6062 36.3997C73.5207 36.3285 72.4379 36.2381 71.3577 36.1283C70.8663 36.0769 70.3763 36.0229 69.8862 35.9623L69.4199 35.903L69.3185 35.8886L68.835 35.8187C67.847 35.6699 66.859 35.4986 65.8816 35.2918C65.783 35.2699 65.6947 35.2151 65.6315 35.1363C65.5683 35.0575 65.5338 34.9594 65.5338 34.8584C65.5338 34.7574 65.5683 34.6594 65.6315 34.5806C65.6947 34.5018 65.783 34.4469 65.8816 34.425H65.9C66.7471 34.2445 67.6007 34.0904 68.4569 33.956C68.7424 33.9113 69.0287 33.8673 69.3158 33.8243H69.3237C69.8599 33.7887 70.3987 33.6926 70.9322 33.6293C75.574 33.1465 80.2434 32.9819 84.9077 33.1367C87.1721 33.2025 89.4353 33.3356 91.6892 33.5648C92.174 33.6149 92.6562 33.6676 93.1383 33.7268C93.3227 33.7492 93.5085 33.7756 93.6942 33.798L94.0683 33.852C95.1591 34.0144 96.2441 34.2116 97.3234 34.4435C98.9227 34.7912 100.976 34.9045 101.688 36.6566C101.914 37.2125 102.017 37.8303 102.142 38.4139L102.302 39.1581C102.306 39.1715 102.309 39.1852 102.311 39.199C102.688 40.9554 103.065 42.7118 103.442 44.4683C103.47 44.598 103.471 44.7321 103.444 44.8621C103.418 44.9921 103.365 45.1153 103.289 45.2239C103.213 45.3326 103.115 45.4244 103.002 45.4936C102.889 45.5628 102.762 45.6079 102.631 45.6262H102.62L102.39 45.6578L102.162 45.6881C101.44 45.7821 100.717 45.8699 99.9936 45.9516C98.5683 46.114 97.1408 46.2546 95.711 46.3731C92.87 46.6094 90.0233 46.7644 87.1708 46.8381C85.7174 46.8768 84.2644 46.8948 82.8118 46.8921C77.0301 46.8876 71.2534 46.5516 65.5101 45.8857C64.8883 45.8119 64.2666 45.7329 63.6448 45.6525C64.1269 45.7145 63.2944 45.6051 63.1258 45.5814C62.7306 45.5261 62.3354 45.4686 61.9402 45.4088C60.6136 45.2099 59.295 44.9649 57.9711 44.7502C56.3705 44.4867 54.8398 44.6185 53.3921 45.4088C52.2037 46.0591 51.2419 47.0564 50.6349 48.2674C50.0105 49.5584 49.8248 50.964 49.5455 52.3511C49.2662 53.7383 48.8315 55.2308 48.9962 56.6548C49.3505 59.7281 51.4991 62.2258 54.5895 62.7843C57.4968 63.3112 60.42 63.7381 63.351 64.1016C74.8648 65.5118 86.4968 65.6805 98.0466 64.6049C98.9872 64.517 99.9265 64.4213 100.864 64.3177C101.157 64.2855 101.454 64.3192 101.732 64.4165C102.01 64.5137 102.263 64.6719 102.472 64.8795C102.681 65.0872 102.842 65.339 102.941 65.6165C103.04 65.894 103.076 66.1902 103.046 66.4834L102.753 69.3261C102.164 75.0705 101.575 80.8145 100.986 86.558C100.371 92.5896 99.7521 98.6208 99.1295 104.651C98.9538 106.35 98.7782 108.048 98.6025 109.746C98.4339 111.417 98.4102 113.142 98.0927 114.794C97.5922 117.391 95.8335 118.987 93.2674 119.57C90.9164 120.105 88.5148 120.386 86.1038 120.408C83.431 120.422 80.7594 120.304 78.0866 120.318C75.2333 120.334 71.7384 120.071 69.5358 117.947C67.6007 116.082 67.3333 113.161 67.0698 110.636C66.7185 107.293 66.3703 103.95 66.0252 100.607L64.0887 82.0212L62.8359 69.9953C62.8149 69.7964 62.7938 69.6001 62.774 69.3999C62.6239 67.9654 61.6082 66.5611 60.0077 66.6335C58.6376 66.6941 57.0806 67.8586 57.2413 69.3999L58.17 78.3155L60.0906 96.7581C60.6378 101.997 61.1836 107.236 61.7281 112.476C61.8335 113.48 61.9323 114.487 62.0429 115.49C62.6449 120.976 66.834 123.932 72.0216 124.764C75.0515 125.252 78.1551 125.352 81.2297 125.402C85.1711 125.465 89.1521 125.617 93.029 124.903C98.7738 123.849 103.084 120.013 103.699 114.062C103.875 112.345 104.051 110.626 104.226 108.908C104.81 103.224 105.393 97.5397 105.976 91.855L107.88 73.2807L108.754 64.7682C108.797 64.3461 108.976 63.9492 109.262 63.6363C109.549 63.3234 109.929 63.111 110.345 63.0307C111.988 62.7105 113.558 62.1639 114.727 60.9137C116.587 58.9232 116.957 56.3281 116.299 53.7119ZM54.5052 55.5483C54.5302 55.5364 54.4841 55.7511 54.4644 55.8513C54.4604 55.6998 54.4683 55.5654 54.5052 55.5483ZM54.6646 56.7813C54.6778 56.7721 54.7173 56.8248 54.7581 56.888C54.6962 56.83 54.6567 56.7866 54.6633 56.7813H54.6646ZM54.8214 56.9881C54.878 57.0843 54.9083 57.1449 54.8214 56.9881V56.9881ZM55.1362 57.2437H55.1441C55.1441 57.2529 55.1586 57.2621 55.1639 57.2713C55.1551 57.2612 55.1454 57.2519 55.1349 57.2437H55.1362ZM110.269 56.8616C109.679 57.4228 108.789 57.6837 107.911 57.8141C98.0572 59.2763 88.06 60.0166 78.0984 59.6899C70.9691 59.4462 63.9148 58.6545 56.8566 57.6573C56.165 57.5598 55.4155 57.4334 54.9399 56.9236C54.0441 55.9619 54.4841 54.0254 54.7173 52.8636C54.9307 51.7992 55.3391 50.3804 56.605 50.2289C58.581 49.9971 60.8758 50.8309 62.8307 51.1273C65.1843 51.4865 67.5467 51.7741 69.9179 51.9902C80.0375 52.9123 90.3271 52.7687 100.402 51.4198C102.238 51.173 104.068 50.8863 105.891 50.5596C107.516 50.2684 109.316 49.7218 110.298 51.404C110.971 52.55 111.06 54.0834 110.956 55.3783C110.924 55.9425 110.678 56.4732 110.267 56.8616H110.269Z' fill='%230D0C22'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M170.036 84.2397C169.461 85.3378 168.67 86.2942 167.663 87.1057C166.656 87.9178 165.482 88.579 164.139 89.0881C162.797 89.5984 161.446 89.9408 160.088 90.1153C158.729 90.2905 157.41 90.2753 156.133 90.0674C154.854 89.8608 153.766 89.439 152.872 88.8014L153.88 78.3397C154.806 78.0216 155.972 77.6949 157.379 77.3604C158.785 77.0264 160.231 76.787 161.718 76.644C163.205 76.5004 164.61 76.5173 165.937 76.6919C167.263 76.867 168.31 77.2888 169.077 77.9579C169.493 78.3397 169.845 78.7537 170.132 79.1997C170.42 79.6458 170.595 80.1076 170.66 80.5852C170.819 81.9227 170.612 83.1409 170.036 84.2397ZM155.413 61.9545C156.084 61.5406 156.892 61.1739 157.834 60.8551C158.777 60.5376 159.744 60.3139 160.735 60.1867C161.725 60.06 162.692 60.043 163.636 60.1388C164.578 60.2345 165.41 60.497 166.129 60.9267C166.848 61.357 167.383 61.9782 167.735 62.7897C168.086 63.6024 168.182 64.6296 168.022 65.8714C167.895 66.8587 167.502 67.695 166.848 68.3793C166.193 69.0647 165.393 69.6374 164.451 70.0993C163.508 70.5617 162.509 70.9277 161.455 71.1974C160.399 71.4689 159.384 71.6683 158.41 71.795C157.435 71.9229 156.588 72.0029 155.869 72.0338C155.15 72.0659 154.678 72.0816 154.454 72.0816L155.413 61.9545ZM175.214 77.4798C174.703 76.3658 174.016 75.3864 173.153 74.5416C172.29 73.698 171.266 73.0853 170.084 72.7029C170.595 72.2889 171.099 71.6362 171.595 70.7441C172.09 69.8532 172.513 68.8811 172.865 67.8302C173.216 66.7787 173.457 65.7205 173.584 64.6533C173.711 63.5866 173.663 62.6709 173.441 61.906C172.896 59.9958 172.042 58.4988 170.875 57.4158C169.708 56.3334 168.35 55.5849 166.8 55.1704C165.249 54.7577 163.54 54.6692 161.67 54.908C159.8 55.1467 157.89 55.6164 155.941 56.317C155.941 56.1582 155.957 55.991 155.989 55.8158C156.02 55.6413 156.036 55.4576 156.036 55.2661C156.036 54.7886 155.797 54.3752 155.317 54.0243C154.838 53.674 154.287 53.4674 153.664 53.4031C153.04 53.3401 152.433 53.4746 151.841 53.8092C151.25 54.1437 150.842 54.7577 150.619 55.6479C150.363 58.5146 150.107 61.4927 149.852 64.5812C149.596 67.6708 149.324 70.792 149.037 73.9453C148.749 77.0979 148.461 80.227 148.174 83.3318C147.886 86.4372 147.598 89.4226 147.311 92.2886C147.407 93.1486 147.646 93.8177 148.03 94.2953C148.413 94.7734 148.861 95.0601 149.372 95.1553C149.883 95.251 150.419 95.1625 150.978 94.8922C151.537 94.6225 152.025 94.1516 152.441 93.4832C153.719 94.1838 155.158 94.6377 156.756 94.845C158.354 95.0516 159.975 95.0516 161.623 94.845C163.268 94.6377 164.89 94.248 166.488 93.6741C168.086 93.1013 169.541 92.3844 170.851 91.525C172.162 90.665 173.264 89.685 174.16 88.5869C175.054 87.4875 175.646 86.3014 175.933 85.0281C176.221 83.7221 176.301 82.4167 176.173 81.1106C176.045 79.8052 175.725 78.5955 175.214 77.4798Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M221.989 102.702C221.814 103.753 221.565 104.86 221.246 106.023C220.926 107.184 220.551 108.244 220.12 109.2C219.688 110.155 219.209 110.926 218.682 111.516C218.154 112.105 217.586 112.352 216.979 112.257C216.5 112.192 216.196 111.89 216.069 111.349C215.94 110.807 215.94 110.138 216.069 109.343C216.196 108.546 216.443 107.646 216.811 106.643C217.179 105.64 217.627 104.644 218.154 103.658C218.682 102.67 219.281 101.723 219.952 100.815C220.623 99.9082 221.326 99.1512 222.061 98.5464C222.221 98.7373 222.293 99.2149 222.277 99.9797C222.26 100.744 222.165 101.652 221.989 102.702ZM238.243 81.9697C237.811 81.4921 237.284 81.2218 236.66 81.1576C236.037 81.0939 235.405 81.4442 234.767 82.2085C234.351 82.9727 233.823 83.7054 233.184 84.406C232.545 85.1072 231.882 85.7436 231.195 86.3169C230.507 86.8896 229.852 87.3841 229.229 87.7975C228.606 88.212 228.118 88.5144 227.767 88.7053C227.639 87.6866 227.566 86.5878 227.551 85.409C227.534 84.2308 227.559 83.0369 227.623 81.8266C227.718 80.1067 227.918 78.3715 228.222 76.6194C228.526 74.868 228.965 73.148 229.541 71.4595C229.541 70.5686 229.332 69.8438 228.917 69.2862C228.501 68.7293 227.998 68.3784 227.407 68.2353C226.815 68.0923 226.209 68.1717 225.585 68.4741C224.962 68.7771 224.427 69.3268 223.979 70.122C223.596 71.1735 223.156 72.3516 222.661 73.6571C222.165 74.9631 221.606 76.2928 220.983 77.6461C220.359 79.0006 219.664 80.3139 218.897 81.5873C218.13 82.8618 217.291 83.9927 216.38 84.9793C215.469 85.9666 214.478 86.7393 213.408 87.2963C212.336 87.8538 211.179 88.1005 209.932 88.0369C209.356 87.8775 208.94 87.4478 208.685 86.7466C208.429 86.0466 208.277 85.1702 208.23 84.1193C208.182 83.0684 208.23 81.9139 208.373 80.6557C208.517 79.3982 208.709 78.1479 208.949 76.9061C209.188 75.6637 209.452 74.4855 209.739 73.371C210.027 72.2565 210.298 71.3165 210.554 70.5523C210.938 69.6292 210.938 68.8559 210.554 68.2353C210.171 67.6141 209.644 67.2008 208.973 66.9929C208.302 66.7863 207.598 66.7947 206.863 67.0172C206.128 67.2402 205.6 67.7335 205.281 68.4977C204.737 69.8044 204.241 71.2686 203.794 72.8928C203.347 74.5171 202.987 76.1976 202.716 77.9328C202.444 79.6691 202.291 81.3891 202.26 83.0927C202.258 83.2036 202.263 83.309 202.263 83.4193C201.566 85.2708 200.902 86.6702 200.271 87.6066C199.456 88.8174 198.536 89.3429 197.514 89.1829C197.065 88.992 196.771 88.5465 196.627 87.8453C196.482 87.1453 196.435 86.2854 196.482 85.2654C196.531 84.2472 196.651 83.0927 196.842 81.8024C197.035 80.5127 197.273 79.1752 197.561 77.7897C197.849 76.4037 198.153 75.0116 198.472 73.6098C198.792 72.2086 199.079 70.8868 199.336 69.6444C199.304 68.5299 198.976 67.6784 198.352 67.0887C197.73 66.5002 196.858 66.2693 195.74 66.396C194.973 66.7147 194.405 67.1293 194.038 67.6384C193.67 68.1474 193.374 68.8008 193.151 69.5965C193.022 70.0111 192.831 70.8389 192.575 72.0813C192.319 73.3225 191.992 74.7486 191.592 76.3564C191.193 77.9655 190.721 79.6449 190.178 81.3963C189.635 83.1478 189.027 84.7333 188.357 86.1496C187.685 87.5666 186.95 88.7053 186.151 89.5653C185.352 90.4247 184.489 90.7756 183.562 90.6162C183.05 90.5205 182.723 89.995 182.579 89.0399C182.435 88.0841 182.412 86.9066 182.507 85.5048C182.603 84.1036 182.795 82.5666 183.082 80.8951C183.37 79.223 183.665 77.6388 183.969 76.1413C184.273 74.6449 184.553 73.3225 184.809 72.1765C185.064 71.0298 185.24 70.2656 185.336 69.8838C185.336 68.9602 185.127 68.2202 184.713 67.662C184.297 67.1056 183.794 66.7547 183.202 66.6111C182.61 66.4681 182.003 66.5475 181.381 66.8499C180.757 67.1529 180.222 67.7026 179.774 68.4977C179.614 69.3577 179.406 70.3535 179.151 71.4838C178.895 72.614 178.648 73.7765 178.408 74.971C178.168 76.1655 177.944 77.3358 177.737 78.4824C177.529 79.6291 177.377 80.6321 177.281 81.4921C177.217 82.1606 177.145 82.9812 177.066 83.9521C176.985 84.9242 176.945 85.9508 176.945 87.0332C176.945 88.1169 177.025 89.1914 177.186 90.258C177.345 91.3253 177.633 92.3047 178.048 93.1956C178.463 94.0877 179.047 94.8198 179.799 95.3931C180.549 95.9664 181.5 96.2846 182.651 96.3489C183.833 96.4119 184.864 96.3252 185.744 96.0858C186.622 95.847 187.421 95.4725 188.141 94.9628C188.86 94.4543 189.515 93.8489 190.107 93.1477C190.697 92.4477 191.281 91.6835 191.856 90.855C192.4 92.0659 193.103 93.0047 193.966 93.6737C194.829 94.3422 195.74 94.741 196.699 94.8677C197.657 94.9943 198.633 94.8604 199.624 94.4616C200.614 94.064 201.509 93.3871 202.308 92.4313C202.835 91.8453 203.331 91.1792 203.797 90.4429C203.995 90.7877 204.205 91.1204 204.442 91.4277C205.225 92.4477 206.288 93.1477 207.631 93.5301C209.069 93.9125 210.474 93.9768 211.849 93.7216C213.223 93.4671 214.534 93.0047 215.78 92.3362C217.027 91.6671 218.185 90.8635 219.257 89.9235C220.327 88.9841 221.262 88.0053 222.061 86.9854C222.029 87.7181 222.013 88.4114 222.013 89.0635C222.013 89.7168 221.997 90.4247 221.966 91.1895C220.367 92.3047 218.857 93.6422 217.435 95.2022C216.012 96.7622 214.765 98.4264 213.695 100.194C212.624 101.961 211.785 103.753 211.179 105.568C210.571 107.384 210.275 109.08 210.291 110.657C210.307 112.233 210.682 113.61 211.418 114.788C212.152 115.967 213.351 116.81 215.013 117.32C216.74 117.862 218.257 117.877 219.569 117.368C220.879 116.858 222.021 116.014 222.996 114.836C223.971 113.658 224.77 112.233 225.394 110.561C226.017 108.889 226.512 107.145 226.88 105.33C227.247 103.515 227.479 101.73 227.575 99.9797C227.671 98.2276 227.671 96.6664 227.575 95.2974C230.324 94.1513 232.577 92.7022 234.335 90.9501C236.093 89.1999 237.547 87.352 238.698 85.409C239.049 84.9314 239.169 84.3581 239.058 83.6896C238.945 83.0206 238.674 82.4472 238.243 81.9697Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M298.724 78.9135C298.82 78.1814 298.964 77.4087 299.155 76.5966C299.347 75.7845 299.587 74.996 299.875 74.2318C300.162 73.4676 300.498 72.807 300.882 72.2494C301.265 71.6924 301.673 71.2943 302.104 71.0549C302.536 70.8167 302.974 70.8403 303.423 71.1264C303.902 71.4137 304.197 72.0185 304.31 72.9415C304.421 73.8663 304.31 74.853 303.974 75.9039C303.638 76.9554 303.039 77.942 302.176 78.8657C301.313 79.7899 300.146 80.3941 298.676 80.6808C298.612 80.236 298.628 79.6463 298.724 78.9135ZM315.336 80.8717C314.809 80.7135 314.306 80.6972 313.826 80.8244C313.347 80.9517 313.043 81.2862 312.916 81.8281C312.659 82.8468 312.251 83.8898 311.692 84.9565C311.133 86.0238 310.446 87.0346 309.632 87.9904C308.817 88.9455 307.897 89.7898 306.875 90.5219C305.851 91.2546 304.781 91.78 303.662 92.0982C302.543 92.4491 301.616 92.4885 300.882 92.2176C300.146 91.9479 299.563 91.4855 299.132 90.8328C298.7 90.1801 298.388 89.3916 298.197 88.468C298.005 87.5443 297.893 86.5892 297.861 85.6013C299.683 85.7292 301.305 85.4032 302.728 84.622C304.149 83.8426 305.356 82.8068 306.347 81.5171C307.337 80.2275 308.089 78.7784 308.6 77.1699C309.111 75.5621 309.399 73.9615 309.463 72.3688C309.495 70.8718 309.272 69.6064 308.792 68.5713C308.313 67.5367 307.665 66.7313 306.85 66.1586C306.036 65.5853 305.1 65.2507 304.046 65.1556C302.992 65.0598 301.92 65.2034 300.833 65.5853C299.522 66.0313 298.412 66.7555 297.501 67.7592C296.59 68.7622 295.831 69.9252 295.224 71.2464C294.617 72.5682 294.137 73.993 293.786 75.5215C293.434 77.0505 293.178 78.5554 293.019 80.0366C292.875 81.3656 292.798 82.6365 292.771 83.8632C292.702 84.0189 292.636 84.1686 292.563 84.3353C292.067 85.4668 291.491 86.5734 290.837 87.6558C290.182 88.7389 289.454 89.6467 288.656 90.3788C287.857 91.1116 287.026 91.3661 286.163 91.1431C285.651 91.0164 285.372 90.4261 285.324 89.3758C285.276 88.3243 285.331 87.0189 285.491 85.4583C285.651 83.8983 285.835 82.2093 286.043 80.3941C286.25 78.579 286.354 76.8439 286.354 75.1875C286.354 73.7542 286.082 72.3773 285.539 71.0549C284.995 69.7343 284.252 68.6349 283.31 67.7592C282.367 66.8828 281.272 66.3016 280.026 66.0156C278.779 65.7283 277.437 65.9198 275.999 66.5883C274.56 67.2574 273.417 68.1967 272.571 69.407C271.723 70.6179 270.948 71.8912 270.245 73.2288C269.989 72.2094 269.614 71.2628 269.118 70.3864C268.623 69.5107 268.016 68.7464 267.297 68.0931C266.577 67.441 265.769 66.9313 264.876 66.5646C263.981 66.1992 263.037 66.0156 262.046 66.0156C261.088 66.0156 260.201 66.1992 259.386 66.5646C258.571 66.9313 257.828 67.4004 257.156 67.9737C256.485 68.5476 255.878 69.1919 255.334 69.9088C254.791 70.6252 254.311 71.3343 253.896 72.0343C253.831 71.2064 253.76 70.4822 253.681 69.8603C253.6 69.2398 253.456 68.7143 253.249 68.2846C253.041 67.8543 252.746 67.5283 252.362 67.3052C251.978 67.0828 251.435 66.9707 250.732 66.9707C250.38 66.9707 250.028 67.0422 249.677 67.1852C249.325 67.3289 249.013 67.5283 248.742 67.7828C248.47 68.0386 248.263 68.3482 248.119 68.7143C247.975 69.0804 247.936 69.5028 247.999 69.9803C248.031 70.3312 248.119 70.7525 248.263 71.2464C248.406 71.7403 248.542 72.3858 248.67 73.1809C248.798 73.9773 248.902 74.9409 248.982 76.0712C249.062 77.2021 249.085 78.5875 249.054 80.2275C249.021 81.8681 248.902 83.7862 248.694 85.9837C248.486 88.1813 248.158 90.7291 247.711 93.6267C247.647 94.2957 247.903 94.8376 248.479 95.2515C249.054 95.6648 249.709 95.9036 250.444 95.9678C251.179 96.0315 251.875 95.9036 252.53 95.586C253.185 95.2666 253.561 94.7097 253.656 93.9139C253.752 92.417 253.936 90.8249 254.208 89.1364C254.479 87.4492 254.815 85.7771 255.215 84.1207C255.614 82.465 256.069 80.8887 256.581 79.3911C257.092 77.8942 257.66 76.573 258.283 75.4263C258.907 74.2797 259.554 73.3645 260.225 72.6797C260.896 71.9949 261.599 71.6524 262.335 71.6524C263.229 71.6524 263.924 72.0579 264.42 72.87C264.915 73.6827 265.266 74.7263 265.475 75.999C265.682 77.2736 265.778 78.6675 265.763 80.1796C265.746 81.6923 265.682 83.1492 265.571 84.5504C265.459 85.9522 265.331 87.2019 265.187 88.3007C265.043 89.3995 264.939 90.1564 264.876 90.5697C264.876 91.3025 265.155 91.8831 265.714 92.3134C266.273 92.743 266.896 92.9982 267.584 93.0776C268.272 93.1576 268.918 93.0297 269.526 92.6952C270.133 92.3606 270.485 91.7964 270.581 90.9994C270.9 88.7067 271.34 86.4062 271.899 84.0971C272.458 81.7881 273.098 79.7184 273.817 77.8869C274.536 76.0554 275.335 74.5585 276.214 73.3961C277.093 72.2343 278.028 71.6524 279.019 71.6524C279.53 71.6524 279.922 72.0033 280.193 72.7033C280.465 73.4039 280.601 74.3591 280.601 75.5694C280.601 76.4615 280.529 77.3772 280.386 78.3166C280.241 79.256 280.074 80.2275 279.882 81.2305C279.69 82.2341 279.522 83.2608 279.378 84.3117C279.235 85.3632 279.163 86.4613 279.163 87.608C279.163 88.4043 279.243 89.3279 279.403 90.3788C279.562 91.4291 279.865 92.4255 280.313 93.3642C280.761 94.3042 281.376 95.1 282.16 95.7527C282.943 96.4054 283.941 96.7321 285.155 96.7321C286.978 96.7321 288.591 96.3418 289.998 95.5618C291.404 94.7818 292.611 93.763 293.618 92.5049C293.67 92.4388 293.718 92.3685 293.769 92.3031C293.846 92.4891 293.914 92.6861 294.001 92.863C294.688 94.2642 295.623 95.3466 296.806 96.1115C297.988 96.8757 299.379 97.2975 300.978 97.3775C302.575 97.4563 304.317 97.1618 306.204 96.4933C307.609 95.9836 308.832 95.3466 309.871 94.5824C310.909 93.8182 311.844 92.8867 312.675 91.7879C313.507 90.6891 314.265 89.4231 314.953 87.9904C315.641 86.5565 316.335 84.9171 317.038 83.0692C317.166 82.5608 317.046 82.1068 316.679 81.7081C316.311 81.3105 315.864 81.0317 315.336 80.8717Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M341.393 75.5432C341.233 76.4832 341.018 77.5189 340.746 78.6486C340.474 79.7795 340.131 80.9498 339.715 82.1601C339.3 83.3703 338.788 84.4612 338.181 85.4321C337.574 86.4042 336.878 87.1757 336.096 87.7491C335.312 88.3224 334.41 88.5612 333.387 88.4654C332.875 88.4024 332.483 88.0521 332.212 87.4145C331.94 86.7782 331.797 85.9655 331.78 84.9782C331.764 83.9915 331.852 82.9085 332.044 81.7298C332.236 80.5522 332.531 79.3971 332.932 78.2662C333.331 77.1365 333.818 76.0929 334.393 75.1371C334.969 74.182 335.632 73.4414 336.383 72.916C337.134 72.3905 337.958 72.1445 338.852 72.1754C339.747 72.2075 340.706 72.6529 341.729 73.5129C341.664 73.9275 341.553 74.6044 341.393 75.5432ZM358.437 79.1977C357.941 78.9431 357.43 78.888 356.903 79.031C356.376 79.174 356 79.6601 355.777 80.488C355.649 81.3801 355.361 82.4304 354.914 83.6406C354.466 84.8509 353.914 85.9982 353.26 87.08C352.604 88.163 351.853 89.063 351.006 89.7793C350.159 90.4963 349.256 90.823 348.298 90.7581C347.498 90.6951 346.938 90.289 346.62 89.5406C346.299 88.7921 346.132 87.8533 346.116 86.7218C346.099 85.5921 346.212 84.3182 346.451 82.9007C346.691 81.4837 346.979 80.0746 347.314 78.6722C347.65 77.2716 347.994 75.9256 348.346 74.6359C348.697 73.3463 348.984 72.2554 349.209 71.3639C349.464 70.5675 349.384 69.8912 348.969 69.333C348.553 68.7766 348.034 68.3778 347.411 68.1391C346.787 67.9003 346.155 67.8366 345.516 67.9481C344.877 68.0597 344.462 68.4021 344.27 68.9748C342.384 67.3506 340.57 66.4748 338.829 66.3476C337.086 66.2203 335.48 66.6027 334.01 67.4942C332.539 68.3857 331.237 69.6754 330.103 71.3639C328.968 73.0523 328.049 74.8911 327.345 76.8814C326.642 78.8716 326.203 80.9025 326.027 82.9722C325.851 85.0424 325.987 86.9297 326.435 88.6333C326.883 90.3369 327.673 91.7308 328.808 92.8126C329.942 93.8956 331.485 94.4375 333.435 94.4375C334.298 94.4375 335.129 94.2623 335.928 93.912C336.726 93.5611 337.462 93.1472 338.133 92.6696C338.804 92.192 339.395 91.6902 339.908 91.1648C340.418 90.6393 340.818 90.2018 341.106 89.8509C341.329 90.9975 341.697 91.9696 342.209 92.7654C342.719 93.5611 343.303 94.215 343.958 94.7235C344.613 95.2326 345.301 95.6071 346.02 95.8465C346.739 96.0853 347.435 96.2047 348.105 96.2047C349.608 96.2047 351.013 95.695 352.325 94.6756C353.635 93.6575 354.81 92.4066 355.849 90.926C356.887 89.4448 357.743 87.8848 358.413 86.2442C359.085 84.6043 359.532 83.1473 359.756 81.8728C359.98 81.3952 359.939 80.894 359.636 80.3686C359.332 79.8431 358.933 79.4534 358.437 79.1977Z' fill='%230D0C23'/%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M444.738 105.571C444.467 106.653 444.043 107.57 443.467 108.318C442.892 109.066 442.173 109.456 441.31 109.489C440.767 109.52 440.351 109.233 440.063 108.629C439.776 108.023 439.576 107.243 439.464 106.288C439.352 105.332 439.304 104.265 439.32 103.087C439.336 101.909 439.384 100.746 439.464 99.5996C439.543 98.4536 439.64 97.3857 439.752 96.3991C439.863 95.4112 439.951 94.6482 440.015 94.1064C441.102 94.2336 442.006 94.7027 442.724 95.5154C443.443 96.3275 443.995 97.2906 444.378 98.4057C444.762 99.5202 444.985 100.723 445.05 102.012C445.113 103.302 445.009 104.488 444.738 105.571ZM427.382 105.571C427.111 106.653 426.687 107.57 426.112 108.318C425.537 109.066 424.817 109.456 423.954 109.489C423.411 109.52 422.996 109.233 422.708 108.629C422.42 108.023 422.22 107.243 422.109 106.288C421.996 105.332 421.948 104.265 421.965 103.087C421.98 101.909 422.028 100.746 422.109 99.5996C422.188 98.4536 422.284 97.3857 422.396 96.3991C422.508 95.4112 422.595 94.6482 422.66 94.1064C423.746 94.2336 424.65 94.7027 425.368 95.5154C426.088 96.3275 426.639 97.2906 427.023 98.4057C427.407 99.5202 427.63 100.723 427.694 102.012C427.757 103.302 427.653 104.488 427.382 105.571ZM409.572 78.4375C409.539 79.2011 409.467 79.8781 409.355 80.4672C409.243 81.0575 409.092 81.4308 408.9 81.5902C408.548 81.3987 408.116 80.906 407.605 80.109C407.094 79.3133 406.695 78.4127 406.406 77.4096C406.119 76.4066 406.03 75.42 406.143 74.4479C406.254 73.477 406.758 72.7212 407.653 72.1788C408.004 71.9879 408.308 72.0594 408.564 72.394C408.82 72.7285 409.027 73.2139 409.188 73.8509C409.347 74.4885 409.458 75.2206 409.523 76.0485C409.587 76.8769 409.603 77.6727 409.572 78.4375ZM405.328 87.9677C404.832 88.4925 404.28 88.9464 403.674 89.3289C403.066 89.7113 402.443 89.9979 401.804 90.1889C401.164 90.3804 400.589 90.4276 400.078 90.3319C398.64 90.0458 397.537 89.424 396.77 88.4689C396.003 87.5137 395.515 86.3913 395.308 85.1017C395.1 83.8114 395.123 82.4338 395.38 80.969C395.635 79.5042 396.066 78.143 396.674 76.8848C397.281 75.6266 398.017 74.5436 398.879 73.6364C399.742 72.7285 400.685 72.1637 401.708 71.94C401.324 73.5642 401.197 75.2448 401.324 76.98C401.452 78.7157 401.868 80.3478 402.571 81.8762C403.018 82.8011 403.554 83.6441 404.177 84.4083C404.801 85.1732 405.56 85.8259 406.455 86.3671C406.199 86.9089 405.823 87.4422 405.328 87.9677ZM458.378 78.9151C458.474 78.183 458.617 77.4096 458.81 76.5975C459.001 75.786 459.241 74.9976 459.528 74.2333C459.816 73.4685 460.152 72.8079 460.536 72.2509C460.92 71.694 461.326 71.2952 461.758 71.0564C462.19 70.8176 462.629 70.8413 463.076 71.1279C463.556 71.4152 463.851 72.02 463.963 72.943C464.075 73.8673 463.963 74.8539 463.628 75.9054C463.292 76.9563 462.693 77.9436 461.83 78.8666C460.968 79.7914 459.8 80.3957 458.33 80.6823C458.266 80.2369 458.282 79.6478 458.378 78.9151ZM477.7 78.9151C477.796 78.183 477.939 77.4096 478.131 76.5975C478.323 75.786 478.563 74.9976 478.851 74.2333C479.138 73.4685 479.473 72.8079 479.857 72.2509C480.241 71.694 480.649 71.2952 481.08 71.0564C481.512 70.8176 481.951 70.8413 482.398 71.1279C482.878 71.4152 483.173 72.02 483.285 72.943C483.397 73.8673 483.285 74.8539 482.95 75.9054C482.614 76.9563 482.015 77.9436 481.152 78.8666C480.289 79.7914 479.122 80.3957 477.652 80.6823C477.588 80.2369 477.604 79.6478 477.7 78.9151ZM495.655 81.7096C495.287 81.312 494.84 81.0332 494.313 80.8732C493.785 80.7144 493.282 80.6987 492.802 80.826C492.323 80.9532 492.018 81.2878 491.891 81.829C491.635 82.8484 491.228 83.8914 490.669 84.9574C490.109 86.0253 489.422 87.0362 488.607 87.9913C487.792 88.9464 486.873 89.7913 485.851 90.5234C484.827 91.2561 483.757 91.7816 482.639 92.0991C481.519 92.4506 480.592 92.49 479.857 92.2191C479.122 91.9488 478.539 91.487 478.107 90.8343C477.676 90.181 477.365 89.3931 477.172 88.4689C476.981 87.5459 476.868 86.5907 476.837 85.6029C478.659 85.7307 480.281 85.4047 481.703 84.6235C483.125 83.8435 484.332 82.8077 485.324 81.5181C486.314 80.229 487.065 78.7799 487.576 77.1715C488.087 75.563 488.375 73.963 488.44 72.3703C488.471 70.8734 488.247 69.6073 487.768 68.5722C487.289 67.5377 486.642 66.7328 485.827 66.1601C485.011 65.5862 484.077 65.2522 483.021 65.1565C481.967 65.0607 480.896 65.205 479.809 65.5862C478.498 66.0328 477.388 66.7571 476.478 67.7601C475.567 68.7637 474.807 69.9267 474.2 71.2473C473.592 72.5697 473.113 73.9939 472.761 75.523C472.409 77.0515 472.154 78.5569 471.995 80.0375C471.839 81.4744 471.755 82.8496 471.736 84.1659C471.615 84.4283 471.486 84.692 471.347 84.9574C470.787 86.0253 470.1 87.0362 469.285 87.9913C468.471 88.9464 467.551 89.7913 466.529 90.5234C465.506 91.2561 464.435 91.7816 463.317 92.0991C462.197 92.4506 461.271 92.49 460.536 92.2191C459.8 91.9488 459.217 91.487 458.786 90.8343C458.355 90.181 458.043 89.3931 457.851 88.4689C457.659 87.5459 457.547 86.5907 457.515 85.6029C459.337 85.7307 460.959 85.4047 462.382 84.6235C463.803 83.8435 465.01 82.8077 466.001 81.5181C466.992 80.229 467.743 78.7799 468.254 77.1715C468.765 75.563 469.054 73.963 469.117 72.3703C469.149 70.8734 468.926 69.6073 468.447 68.5722C467.967 67.5377 467.319 66.7328 466.504 66.1601C465.689 65.5862 464.755 65.2522 463.7 65.1565C462.645 65.0607 461.574 65.205 460.488 65.5862C459.176 66.0328 458.066 66.7571 457.156 67.7601C456.245 68.7637 455.485 69.9267 454.878 71.2473C454.271 72.5697 453.792 73.9939 453.44 75.523C453.088 77.0515 452.832 78.5569 452.673 80.0375C452.582 80.8726 452.522 81.6823 452.477 82.4774C452.168 82.7393 451.867 83.0029 451.546 83.2617C450.444 84.1538 449.284 84.9574 448.07 85.6744C446.855 86.3913 445.592 86.9804 444.283 87.4422C442.971 87.904 441.629 88.1828 440.255 88.278L443.228 56.5578C443.42 55.8887 443.324 55.3003 442.94 54.7906C442.557 54.2809 442.061 53.9306 441.454 53.7397C440.847 53.5482 440.199 53.5645 439.512 53.787C438.824 54.0106 438.258 54.5203 437.81 55.3154C437.586 56.5263 437.354 58.182 437.115 60.2838C436.875 62.3856 436.635 64.6789 436.396 67.1631C436.156 69.6473 435.916 72.2109 435.677 74.8539C435.437 77.4981 435.229 79.966 435.053 82.2587C435.045 82.3605 435.039 82.4526 435.031 82.5532C434.751 82.7896 434.48 83.0277 434.19 83.2617C433.088 84.1538 431.928 84.9574 430.714 85.6744C429.499 86.3913 428.237 86.9804 426.927 87.4422C425.616 87.904 424.273 88.1828 422.899 88.278L425.872 56.5578C426.064 55.8887 425.968 55.3003 425.585 54.7906C425.201 54.2809 424.705 53.9306 424.098 53.7397C423.491 53.5482 422.843 53.5645 422.156 53.787C421.469 54.0106 420.902 54.5203 420.454 55.3154C420.23 56.5263 419.999 58.182 419.76 60.2838C419.519 62.3856 419.28 64.6789 419.04 67.1631C418.8 69.6473 418.561 72.2109 418.321 74.8539C418.082 77.4981 417.873 79.966 417.698 82.2587C417.694 82.3047 417.691 82.3465 417.687 82.3926C417.185 82.6247 416.638 82.8284 416.043 82.9993C415.436 83.175 414.749 83.2786 413.982 83.3102C414.11 82.7362 414.213 82.0993 414.293 81.3987C414.373 80.6987 414.438 79.966 414.486 79.2011C414.534 78.4375 414.549 77.6727 414.534 76.9084C414.517 76.1436 414.477 75.4436 414.414 74.806C414.253 73.4376 413.958 72.1394 413.527 70.9128C413.095 69.6873 412.512 68.6607 411.777 67.8316C411.041 67.0037 410.123 66.4462 409.019 66.1601C407.917 65.8734 406.63 65.9686 405.161 66.4462C402.986 66.1601 401.029 66.3595 399.287 67.0437C397.545 67.7292 396.034 68.7237 394.756 70.0291C393.478 71.3358 392.431 72.8715 391.616 74.6394C390.801 76.4066 390.257 78.2224 389.986 80.0848C389.871 80.8744 389.815 81.6605 389.798 82.4447C389.303 83.4544 388.761 84.3368 388.164 85.0774C387.317 86.1283 386.438 86.9883 385.527 87.6568C384.616 88.3258 383.713 88.8355 382.819 89.1858C381.923 89.5367 381.124 89.7755 380.421 89.9022C379.59 90.0616 378.791 90.0779 378.024 89.9501C377.257 89.8234 376.553 89.4567 375.915 88.8513C375.403 88.4058 375.011 87.6889 374.74 86.7016C374.468 85.7144 374.309 84.5926 374.261 83.3338C374.213 82.0756 374.261 80.7617 374.404 79.3926C374.548 78.0236 374.795 76.7254 375.147 75.4994C375.499 74.2733 375.945 73.1746 376.49 72.2024C377.032 71.2322 377.672 70.5388 378.408 70.1249C378.822 70.1891 379.079 70.4352 379.175 70.8649C379.271 71.2952 379.294 71.8049 379.246 72.394C379.199 72.9836 379.127 73.5885 379.031 74.2091C378.935 74.8303 378.887 75.3485 378.887 75.7618C379.047 76.6218 379.358 77.2909 379.822 77.7684C380.285 78.246 380.805 78.5254 381.38 78.6042C381.955 78.6842 382.522 78.549 383.083 78.1981C383.641 77.8484 384.096 77.2909 384.449 76.526C384.48 76.5581 384.528 76.5739 384.592 76.5739L385.264 70.5073C385.455 69.6788 385.327 68.9467 384.88 68.3098C384.432 67.6728 383.841 67.3062 383.106 67.211C382.179 65.8734 380.924 65.165 379.342 65.085C377.76 65.0056 376.138 65.5231 374.476 66.6377C373.453 67.371 372.55 68.3813 371.767 69.671C370.983 70.9613 370.345 72.394 369.85 73.9703C369.353 75.5466 369.002 77.2115 368.795 78.963C368.587 80.7144 368.547 82.4187 368.674 84.0738C368.802 85.7307 369.098 87.2913 369.562 88.7555C370.025 90.221 370.672 91.447 371.504 92.4337C372.207 93.2937 373.005 93.9233 373.9 94.3215C374.795 94.7197 375.73 94.9658 376.705 95.0615C377.68 95.1567 378.647 95.1167 379.606 94.9421C380.565 94.7676 381.476 94.5209 382.339 94.2015C383.457 93.7882 384.609 93.2621 385.791 92.6252C386.973 91.9888 388.108 91.224 389.195 90.3319C389.767 89.8628 390.317 89.3513 390.849 88.8028C391.091 89.4016 391.362 89.981 391.688 90.5234C392.551 91.9561 393.717 93.1191 395.188 94.0106C396.657 94.9021 398.464 95.3312 400.605 95.3003C402.907 95.2682 405.032 94.6876 406.982 93.5567C408.932 92.427 410.53 90.7616 411.777 88.5646C413.644 88.5646 415.481 88.258 417.287 87.6489C417.272 87.8416 417.256 88.0446 417.242 88.2307C417.115 89.9186 417.05 91.0646 417.05 91.67C417.019 92.7209 416.947 94.0185 416.835 95.5627C416.723 97.1075 416.651 98.7318 416.619 100.435C416.588 102.139 416.651 103.859 416.811 105.595C416.971 107.33 417.306 108.907 417.818 110.325C418.328 111.741 419.055 112.944 419.999 113.932C420.941 114.918 422.18 115.508 423.715 115.699C425.345 115.921 426.751 115.635 427.934 114.839C429.116 114.042 430.075 112.952 430.811 111.567C431.546 110.181 432.064 108.581 432.369 106.766C432.672 104.95 432.76 103.127 432.633 101.295C432.504 99.4639 432.168 97.7366 431.625 96.113C431.082 94.4882 430.33 93.1506 429.372 92.0991C429.948 91.9409 430.634 91.6385 431.434 91.1919C432.232 90.7464 433.055 90.2446 433.903 89.687C434.111 89.5501 434.316 89.4058 434.524 89.2652C434.446 90.3937 434.406 91.1985 434.406 91.67C434.375 92.7209 434.303 94.0185 434.19 95.5627C434.079 97.1075 434.007 98.7318 433.975 100.435C433.943 102.139 434.007 103.859 434.167 105.595C434.326 107.33 434.662 108.907 435.173 110.325C435.684 111.741 436.412 112.944 437.354 113.932C438.297 114.918 439.536 115.508 441.071 115.699C442.7 115.921 444.106 115.635 445.289 114.839C446.472 114.042 447.431 112.952 448.166 111.567C448.901 110.181 449.42 108.581 449.724 106.766C450.028 104.95 450.115 103.127 449.988 101.295C449.86 99.4639 449.524 97.7366 448.982 96.113C448.437 94.4882 447.687 93.1506 446.727 92.0991C447.303 91.9409 447.99 91.6385 448.789 91.1919C449.588 90.7464 450.411 90.2446 451.259 89.687C451.699 89.3974 452.136 89.0986 452.573 88.7913C452.737 90.3488 453.091 91.7149 453.655 92.864C454.343 94.2658 455.277 95.3482 456.46 96.113C457.642 96.8766 459.033 97.299 460.632 97.3784C462.23 97.4572 463.971 97.1633 465.858 96.4942C467.264 95.9851 468.486 95.3482 469.525 94.5839C470.563 93.8191 471.498 92.8876 472.33 91.7894C472.378 91.7258 472.423 91.6567 472.47 91.5925C472.618 92.0385 472.782 92.467 472.977 92.864C473.665 94.2658 474.6 95.3482 475.782 96.113C476.964 96.8766 478.355 97.299 479.953 97.3784C481.551 97.4572 483.293 97.1633 485.179 96.4942C486.586 95.9851 487.808 95.3482 488.847 94.5839C489.885 93.8191 490.82 92.8876 491.652 91.7894C492.483 90.6901 493.241 89.424 493.929 87.9913C494.616 86.558 495.311 84.9186 496.015 83.0708C496.142 82.5617 496.022 82.1078 495.655 81.7096Z' fill='%230D0C23'/%3E%3C/svg%3E%0A"); - border-radius: 6px; - box-shadow: 0px 2px 3px rgba(0, 0, 0, 0.1); -} - -:root[data-color="dark"] .btn-buymeacoffee, :root[data-color="night"] .btn-buymeacoffee { - box-shadow: 0px 2px 3px rgba(255, 255, 255, 0.1); -} - -.btn-close { - background: var(--background-fg); - border: 1px dotted var(--border-color); - border-radius: 4px; - cursor: pointer; -} diff --git a/themes/docura/assets/scss/component/_chroma.scss b/themes/docura/assets/scss/component/_chroma.scss deleted file mode 100644 index b695a36..0000000 --- a/themes/docura/assets/scss/component/_chroma.scss +++ /dev/null @@ -1,83 +0,0 @@ -/* Background */ .chroma { font-size: .9em; color: var(--chroma-base05); background-color: var(--chroma-base00); border-radius: 6px; padding: 16px 24px; overflow-x: auto; } -/* Other */ .chroma .x { color: var(--chroma-base05) } -/* Error */ .chroma .err { color: var(--chroma-base08) } -/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; } -/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; width: auto; overflow: auto; display: block; } -/* LineHighlight */ .chroma .hl { display: block; width: 100%; background-color: var(--chroma-base02) } -/* LineNumbersTable */ .chroma .lnt { margin-right: 0.4em; padding: 0 0.4em 0 0.4em; } -/* LineNumbers */ .chroma .ln { margin-right: 0.4em; padding: 0 0.4em 0 0.4em; border-right: 1px solid var(--chroma-base0A); } -/* Line */ .chroma .line { display: flex; } -/* Keyword */ .chroma .k { color: var(--chroma-base0E) } -/* KeywordConstant */ .chroma .kc { color: var(--chroma-base0E) } -/* KeywordDeclaration */ .chroma .kd { color: var(--chroma-base0E) } -/* KeywordNamespace */ .chroma .kn { color: var(--chroma-base0E) } -/* KeywordPseudo */ .chroma .kp { color: var(--chroma-base0D) } -/* KeywordReserved */ .chroma .kr { color: var(--chroma-base0E) } -/* KeywordType */ .chroma .kt { color: var(--chroma-base0E) } -/* Name */ .chroma .n { color: var(--chroma-base05) } -/* NameAttribute */ .chroma .na { color: var(--chroma-base05) } -/* NameBuiltin */ .chroma .nb { color: var(--chroma-base0D) } -/* NameBuiltinPseudo */ .chroma .bp { color: var(--chroma-base0D) } -/* NameClass */ .chroma .nc { color: var(--chroma-base0A) } -/* NameConstant */ .chroma .no { color: var(--chroma-base09) } -/* NameDecorator */ .chroma .nd { color: var(--chroma-base09) } -/* NameEntity */ .chroma .ni { color: var(--chroma-base0A) } -/* NameException */ .chroma .ne { color: var(--chroma-base0A) } -/* NameFunction */ .chroma .nf { color: var(--chroma-base05) } -/* NameFunctionMagic */ .chroma .fm { color: var(--chroma-base05) } -/* NameLabel */ .chroma .nl { color: var(--chroma-base08) } -/* NameNamespace */ .chroma .nn { color: var(--chroma-base0A) } -/* NameOther */ .chroma .nx { color: var(--chroma-base0D) } -/* NameProperty */ .chroma .py { color: var(--chroma-base08) } -/* NameTag */ .chroma .nt { color: var(--chroma-base0D) } -/* NameVariable */ .chroma .nv { color: var(--chroma-base0D) } -/* NameVariableClass */ .chroma .vc { color: var(--chroma-base0D) } -/* NameVariableGlobal */ .chroma .vg { color: var(--chroma-base0D) } -/* NameVariableInstance */ .chroma .vi { color: var(--chroma-base08) } -/* NameVariableMagic */ .chroma .vm { color: var(--chroma-base0D) } -/* Literal */ .chroma .l { color: var(--chroma-base0B) } -/* LiteralDate */ .chroma .ld { color: var(--chroma-base0B) } -/* LiteralString */ .chroma .s { color: var(--chroma-base0B) } -/* LiteralStringAffix */ .chroma .sa { color: var(--chroma-base0B) } -/* LiteralStringBacktick */ .chroma .sb { color: var(--chroma-base0B) } -/* LiteralStringChar */ .chroma .sc { color: var(--chroma-base0B) } -/* LiteralStringDelimiter */ .chroma .dl { color: var(--chroma-base0F) } -/* LiteralStringDoc */ .chroma .sd { color: var(--chroma-base03) } -/* LiteralStringDouble */ .chroma .s2 { color: var(--chroma-base0B) } -/* LiteralStringEscape */ .chroma .se { color: var(--chroma-base0C) } -/* LiteralStringHeredoc */ .chroma .sh { color: var(--chroma-base0B) } -/* LiteralStringInterpol */ .chroma .si { color: var(--chroma-base0F) } -/* LiteralStringOther */ .chroma .sx { color: var(--chroma-base0B) } -/* LiteralStringRegex */ .chroma .sr { color: var(--chroma-base0C) } -/* LiteralStringSingle */ .chroma .s1 { color: var(--chroma-base0B) } -/* LiteralStringSymbol */ .chroma .ss { color: var(--chroma-base0B) } -/* LiteralNumber */ .chroma .m { color: var(--chroma-base09) } -/* LiteralNumberBin */ .chroma .mb { color: var(--chroma-base09) } -/* LiteralNumberFloat */ .chroma .mf { color: var(--chroma-base09) } -/* LiteralNumberHex */ .chroma .mh { color: var(--chroma-base09) } -/* LiteralNumberInteger */ .chroma .mi { color: var(--chroma-base09) } -/* LiteralNumberIntegerLong */ .chroma .il { color: var(--chroma-base09) } -/* LiteralNumberOct */ .chroma .mo { color: var(--chroma-base09) } -/* Operator */ .chroma .o { color: var(--chroma-base05) } -/* OperatorWord */ .chroma .ow { color: var(--chroma-base05) } -/* Punctuation */ .chroma .p { color: var(--chroma-base05) } -/* Comment */ .chroma .c { color: var(--chroma-base03) } -/* CommentHashbang */ .chroma .ch { color: var(--chroma-base03) } -/* CommentMultiline */ .chroma .cm { color: var(--chroma-base03) } -/* CommentSingle */ .chroma .c1 { color: var(--chroma-base03) } -/* CommentSpecial */ .chroma .cs { color: var(--chroma-base03) } -/* CommentPreproc */ .chroma .cp { color: var(--chroma-base0F) } -/* CommentPreprocFile */ .chroma .cpf { color: var(--chroma-base0B) } -/* Generic */ .chroma .g { color: var(--chroma-base05) } -/* GenericDeleted */ .chroma .gd { color: var(--chroma-base08) } -/* GenericEmph */ .chroma .ge { color: var(--chroma-base05); font-style: italic } -/* GenericError */ .chroma .gr { color: var(--chroma-base05) } -/* GenericHeading */ .chroma .gh { color: var(--chroma-base0D) } -/* GenericInserted */ .chroma .gi { color: var(--chroma-base0B) } -/* GenericOutput */ .chroma .go { color: var(--chroma-base05) } -/* GenericPrompt */ .chroma .gp { color: var(--chroma-base05) } -/* GenericStrong */ .chroma .gs { color: var(--chroma-base05); font-weight: bold } -/* GenericSubheading */ .chroma .gu { color: var(--chroma-base0D) } -/* GenericTraceback */ .chroma .gt { color: var(--chroma-base05) } -/* GenericUnderline */ .chroma .gl { color: var(--chroma-base05); text-decoration: underline } -/* TextWhitespace */ .chroma .w { color: var(--chroma-base00); } \ No newline at end of file diff --git a/themes/docura/assets/scss/component/_dropdown.scss b/themes/docura/assets/scss/component/_dropdown.scss deleted file mode 100644 index 317184e..0000000 --- a/themes/docura/assets/scss/component/_dropdown.scss +++ /dev/null @@ -1,51 +0,0 @@ -.dropdown { - position: relative; -} - -.dropdown-btn { - display: flex; - flex-direction: row; - box-shadow: var(--box-shadow); - border-radius: 6px; - padding: 6px; - cursor: pointer; - white-space: nowrap; -} - -.dropdown-btn .icon-select { - opacity: .4; -} - -.dropdown-menu { - display: none; - position: absolute; - right: 0; - top: 34px; - min-width: 100px; - max-height: 240px; - overflow-x: auto; - background: var(--background); - color: var(--color3); - box-shadow: var(--box-shadow2); - z-index: 1; - border-radius: 6px; - padding: 3px; -} - -.dropdown-menu.show { - display: block; -} - -.dropdown-menu button, .dropdown-menu a { - width: 100%; - display: flex; - gap: 2px; - padding: 6px; - align-items: center; - justify-content: center; - cursor: pointer; -} - -.dropdown-menu button:hover, .dropdown-menu a:hover { - background: var(--background-fg); -} diff --git a/themes/docura/assets/scss/component/article.scss b/themes/docura/assets/scss/component/article.scss deleted file mode 100644 index 1d51686..0000000 --- a/themes/docura/assets/scss/component/article.scss +++ /dev/null @@ -1,335 +0,0 @@ -#article { - padding: 8px 16px; -} - -#article-header { - font-size: 3em; - font-weight: 400; - margin-bottom: 1em; - color: var(--color2) -} - -#article-content h1, -#article-content h2, -#article-content h3, -#article-content h4, -#article-content h5, -#article-content h6 { - line-height: 1em; - font-weight: 400; - margin: 2.6em 0 .1em; - color: var(--color2) -} - -#article-content h1 { - font-size: 1.8em -} - -#article-content h2 { - font-size: 1.5em -} - -#article-content h3 { - font-size: 1.3em -} - -#article-content h4 { - font-size: 1.1em -} - -#article-content .highlight, -#article-content blockquote, -#article-content dl, -#article-content iframe, -#article-content ol, -#article-content p, -#article-content table, -#article-content ul { - margin-top: 1em; - line-height: 1.8rem; - letter-spacing: -.1px; -} - -#article-content blockquote p { - margin: 1em 0 -} - -#article-content blockquote dl, -#article-content blockquote ol, -#article-content blockquote ul { - margin: 0 1em 1em 1em -} - -#article-content a { - color: var(--color-anchor); - text-decoration: none -} - -#article-content a:hover { - color: var(--color-hover); - text-decoration: underline -} - -@media print { - #article-content a { - color: #355265; - text-decoration: underline - } - - #article-content a:after { - content: " (" attr(href) ")"; - font-size: 80% - } -} - -#article-content strong, #article-content b, #article-content table th { - font-weight: 600 -} - -#article-content em { - font-style: italic -} - -#article-content dl, -#article-content ol, -#article-content ul { - margin-left: 20px -} - -#article-content dl dl, -#article-content dl ol, -#article-content dl ul, -#article-content ol dl, -#article-content ol ol, -#article-content ol ul, -#article-content ul dl, -#article-content ul ol, -#article-content ul ul { - margin-top: 0; - margin-bottom: 0 -} - -#article-content ul { - list-style: disc -} - -#article-content ol { - list-style: decimal -} - -#article-content dl { - list-style: square -} - -#article-content li > ul { - list-style: circle -} - -#article-content li > ol { - list-style: lower-alpha -} - -#article-content li p { - margin: 0 -} - -#article-content li .highlight, -#article-content li blockquote, -#article-content li iframe, -#article-content li table { - margin: 1em 0 -} - -#article-content img, -#article-content video { - max-width: 100%; - border-radius: 4px -} - -#article-content blockquote { - padding: 8px 12px; - position: relative; - background: var(--background-fg); - border-left: 4px solid var(--border-color); - border-radius: 6px; -} - -#article-content blockquote footer { - margin: 1em 0; - font-style: italic -} - -#article-content blockquote footer cite:before { - content: "—"; - padding: 0 .3em -} - -#article-content blockquote footer cite a { - color: var(--border-color); -} - -#article-content code, #article-content pre { - font-family: var(--font-family-code); -} - -#article-content h1 code, -#article-content h2 code, -#article-content h3 code, -#article-content h4 code, -#article-content h5 code, -#article-content h6 code, -#article-content p code, -#article-content blockquote code, -#article-content ul code, -#article-content ol code, -#article-content dl code, -#article-content table code { - background: var(--chroma-base00); - padding: 4px; - border-radius: 4px; - font-size: .9em; -} - -#article-content pre:not(.chroma) { - color: var(--chroma-base05); - font-size: .9em; - line-height: 1.8; - letter-spacing: -.1px; - background-color: var(--chroma-base00); - border-radius: 6px; - padding: 16px 24px; - overflow-x: auto; - margin-top: 1em; -} - -#article-content blockquote code { - background: var(--background-fg2); - opacity: .8; -} - -#article-content blockquote .chroma, #article-content blockquote pre:not(.chroma) { - background: var(--background-fg2); - margin-bottom: 1em; -} - -#article-content blockquote .chroma code, #article-content blockquote pre:not(.chroma) code { - padding: 0; -} - -#article-content table { - max-width: 100%; - border: 1px solid var(--border-color) -} - -#article-content table td, -#article-content table th { - padding: 5px 15px -} - -#article-content table tr:nth-child(2n) { - background: var(--background-fg) -} - -#article-footer { - display: grid; - grid-template-columns: 1fr 1fr; - padding-top: 20px; -} - -#article-last-updated, #article-prev-link, #article-next-link { - display: flex; - align-items: center; - padding: 12px 0; -} - -#article-last-updated { - grid-column: 1 / 3; - justify-content: center; - color: var(--color3); -} - -#article-prev-link, #article-next-link { - color: var(--color-anchor); -} - -#article-prev-link:hover, #article-next-link:hover { - color: var(--color-hover); - font-weight: 600; - font-size: 98%; -} - -#article-next-link { - justify-content: flex-end; -} - -#article-prev-link .icon { - padding-right: 6px; -} - -#article-next-link .icon { - padding-left: 6px; -} - -@media (max-width: 767px) { - #article-next-link[data-first-page="true"] { - grid-column: 2/ 3; - } -} - -@media (min-width: 768px) { - #article { - padding: 16px 24px; - } - - #article-footer { - display: grid; - grid-template-columns: repeat(3, 1fr); - } - - #article-prev-link { - grid-column: 1/ 2; - grid-row: 1; - } - - #article-last-updated { - grid-column: 2 / 3; - } - - #article-next-link { - grid-column: 3 / 4; - } -} - -@media (min-width: 1024px) { - #article { - padding: 24px 32px; - } -} - -@media (min-width: 1281px) { - #article { - padding: 32px 40px; - } -} - -@media (min-width: 1920px) { - #article { - padding: 40px 48px; - } - - #article-content { - width: 90%; - } -} - -@media (min-width: 2560px) { - #article-content { - width: 85%; - } -} - -@media (min-width: 3840px) { - #article-content { - width: 80%; - } -} diff --git a/themes/docura/assets/scss/component/docsearch.scss b/themes/docura/assets/scss/component/docsearch.scss deleted file mode 100644 index 841cd7f..0000000 --- a/themes/docura/assets/scss/component/docsearch.scss +++ /dev/null @@ -1,690 +0,0 @@ -/*! @docsearch/css 3.2.0 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com | https://cdn.jsdelivr.net/npm/@docsearch/css@3 */ -:root { - --docsearch-primary-color: #5468ff; - --docsearch-spacing: 12px; - --docsearch-icon-stroke-width: 1.4; - --docsearch-highlight-color: var(--docsearch-primary-color); - --docsearch-muted-color: #969faf; - --docsearch-container-background: rgba(255, 255, 255, 0.1); - --docsearch-logo-color: #5468ff; - --docsearch-modal-width: 560px; - --docsearch-modal-height: 600px; - --docsearch-modal-shadow: inset 1px 1px 0 0 hsla(0, 0%, 100%, 0.5), 0 3px 8px 0 #555a64; - --docsearch-searchbox-height: 56px; - --docsearch-searchbox-focus-background: #fff; - --docsearch-searchbox-shadow: inset 0 0 0 2px var(--docsearch-primary-color); - --docsearch-hit-height: 56px; - --docsearch-hit-color: #444950; - --docsearch-hit-active-color: #fff; - --docsearch-hit-background: #fff; - --docsearch-hit-shadow: 0 1px 3px 0 #d4d9e1; - --docsearch-footer-height: 44px; - --docsearch-footer-shadow: 0 -1px 0 0 #e0e3e8, 0 -3px 6px 0 rgba(69, 98, 155, 0.12) -} - -:root[data-color="dark"] { - --docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309; - --docsearch-searchbox-focus-background: #000; - --docsearch-hit-color: #bec3c9; - --docsearch-hit-shadow: none; - --docsearch-hit-background: #090a11; - --docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2); - --docsearch-muted-color: #7f8497 -} - -:root[data-color="night"] { - --docsearch-modal-shadow: inset 1px 1px 0 0 #2c2e40, 0 3px 8px 0 #000309; - --docsearch-searchbox-focus-background: #000; - --docsearch-hit-color: #bec3c9; - --docsearch-hit-shadow: none; - --docsearch-hit-background: #090a11; - --docsearch-footer-shadow: inset 0 1px 0 0 rgba(73, 76, 106, 0.5), 0 -4px 8px 0 rgba(0, 0, 0, 0.2); - --docsearch-muted-color: #7f8497 -} - -.DocSearch-Button { - width: 100%; - line-height: 1.6em; - align-items: center; - box-shadow: var(--box-shadow); - border-radius: 24px; - color: var(--color); - cursor: pointer; - display: flex; - justify-content: space-between; - margin: 0 12px; - padding: 3px 6px; - user-select: none; -} - -.DocSearch-Button:active, .DocSearch-Button:focus, .DocSearch-Button:hover { - background: var(--docsearch-searchbox-focus-background); - box-shadow: var(--docsearch-searchbox-shadow); - color: var(--color); - outline: none -} - -.DocSearch-Button-Container { - align-items: center; - display: flex -} - -.DocSearch-Search-Icon { - stroke-width: 1.6 -} - -.DocSearch-Button-Placeholder { - font-size: 1rem; - padding: 0 12px 0 6px; - color: var(--color3) -} - -.DocSearch-Button-Keys { - display: flex; - min-width: calc(40px + .8em) -} - -.DocSearch-Button-Key { - align-items: center; - border-radius: 3px; - color: var(--docsearch-muted-color); - display: flex; - height: 18px; - justify-content: center; - margin-right: .4em; - position: relative; - border: 1px solid var(--border-color); - width: 20px -} - -@media (min-width: 1278px) { - .DocSearch-Button { - width: 80%; - margin: 0; - } -} - -@media (min-width: 2558px) { - .DocSearch-Button { - width: 60%; - } -} - -@media (min-width: 3838px) { - .DocSearch-Button { - width: 40%; - } -} - -.DocSearch--active { - overflow: hidden !important -} - -.DocSearch-Container, .DocSearch-Container * { - box-sizing: border-box -} - -.DocSearch-Container { - background-color: var(--docsearch-container-background); - height: 100vh; - left: 0; - position: fixed; - top: 0; - width: 100vw; - z-index: 200; - backdrop-filter: blur(var(--blur)); - -webkit-backdrop-filter: blur(var(--blur)); -} - -.DocSearch-Container a { - text-decoration: none -} - -.DocSearch-Link { - appearance: none; - background: none; - border: 0; - color: var(--docsearch-highlight-color); - cursor: pointer; - font: inherit; - margin: 0; - padding: 0 -} - -.DocSearch-Modal { - background: var(--background); - border-radius: 6px; - box-shadow: var(--docsearch-modal-shadow); - flex-direction: column; - margin: 60px auto auto; - max-width: var(--docsearch-modal-width); - position: relative -} - -.DocSearch-SearchBar { - display: flex; - padding: var(--docsearch-spacing) var(--docsearch-spacing) 0 -} - -.DocSearch-Form { - align-items: center; - background: var(--docsearch-searchbox-focus-background); - border-radius: 4px; - box-shadow: var(--docsearch-searchbox-shadow); - display: flex; - height: var(--docsearch-searchbox-height); - margin: 0; - padding: 0 var(--docsearch-spacing); - position: relative; - width: 100% -} - -.DocSearch-Input { - appearance: none; - background: transparent; - border: 0; - color: var(--docsearch-text-color); - flex: 1; - font: inherit; - font-size: 1.2em; - height: 100%; - outline: none; - padding: 0 0 0 8px; - width: 80% -} - -.DocSearch-Input::placeholder { - color: var(--docsearch-muted-color); - opacity: 1 -} - -.DocSearch-Input::-webkit-search-cancel-button, .DocSearch-Input::-webkit-search-decoration, .DocSearch-Input::-webkit-search-results-button, .DocSearch-Input::-webkit-search-results-decoration { - display: none -} - -.DocSearch-LoadingIndicator, .DocSearch-MagnifierLabel, .DocSearch-Reset { - margin: 0; - padding: 0 -} - -.DocSearch-MagnifierLabel, .DocSearch-Reset { - align-items: center; - color: var(--docsearch-highlight-color); - display: flex; - justify-content: center -} - -.DocSearch-Container--Stalled .DocSearch-MagnifierLabel, .DocSearch-LoadingIndicator { - display: none -} - -.DocSearch-Container--Stalled .DocSearch-LoadingIndicator { - align-items: center; - color: var(--docsearch-highlight-color); - display: flex; - justify-content: center -} - -@media screen and (prefers-reduced-motion: reduce) { - .DocSearch-Reset { - animation: none; - appearance: none; - background: none; - border: 0; - border-radius: 50%; - color: var(--docsearch-icon-color); - cursor: pointer; - right: 0; - stroke-width: var(--docsearch-icon-stroke-width) - } -} - -.DocSearch-Reset { - animation: fade-in .1s ease-in forwards; - appearance: none; - background: none; - border: 0; - border-radius: 50%; - color: var(--docsearch-icon-color); - cursor: pointer; - padding: 2px; - right: 0; - stroke-width: var(--docsearch-icon-stroke-width) -} - -.DocSearch-Reset[hidden] { - display: none -} - -.DocSearch-Reset:focus { - outline: none -} - -.DocSearch-Reset:hover { - color: var(--docsearch-highlight-color) -} - -.DocSearch-LoadingIndicator svg, .DocSearch-MagnifierLabel svg { - height: 24px; - width: 24px -} - -.DocSearch-Cancel { - display: none -} - -.DocSearch-Dropdown { - max-height: calc(var(--docsearch-modal-height) - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height)); - min-height: var(--docsearch-spacing); - overflow-y: auto; - overflow-y: overlay; - padding: 0 var(--docsearch-spacing); - scrollbar-color: var(--docsearch-muted-color) var(--docsearch-modal-background); - scrollbar-width: thin -} - -.DocSearch-Dropdown::-webkit-scrollbar { - width: 12px -} - -.DocSearch-Dropdown::-webkit-scrollbar-track { - background: transparent -} - -.DocSearch-Dropdown::-webkit-scrollbar-thumb { - background-color: var(--docsearch-muted-color); - border: 3px solid var(--docsearch-modal-background); - border-radius: 20px -} - -.DocSearch-Dropdown ul { - list-style: none; - margin: 0; - padding: 0 -} - -.DocSearch-Label { - font-size: .75em; - line-height: 1.6em -} - -.DocSearch-Help, .DocSearch-Label { - color: var(--docsearch-muted-color) -} - -.DocSearch-Help { - font-size: .9em; - margin: 0; - user-select: none -} - -.DocSearch-Title { - font-size: 1.2em -} - -.DocSearch-Logo a { - display: flex -} - -.DocSearch-Logo svg { - color: var(--docsearch-logo-color); - margin-left: 8px -} - -.DocSearch-Hits:last-of-type { - margin-bottom: 24px -} - -.DocSearch-Hits mark { - background: none; - color: var(--docsearch-highlight-color) -} - -.DocSearch-HitsFooter { - color: var(--docsearch-muted-color); - display: flex; - font-size: .85em; - justify-content: center; - margin-bottom: var(--docsearch-spacing); - padding: var(--docsearch-spacing) -} - -.DocSearch-HitsFooter a { - border-bottom: 1px solid; - color: inherit -} - -.DocSearch-Hit { - border-radius: 4px; - display: flex; - padding-bottom: 4px; - position: relative -} - -@media screen and (prefers-reduced-motion: reduce) { - .DocSearch-Hit--deleting { - transition: none - } -} - -.DocSearch-Hit--deleting { - opacity: 0; - transition: all .25s linear -} - -@media screen and (prefers-reduced-motion: reduce) { - .DocSearch-Hit--favoriting { - transition: none - } -} - -.DocSearch-Hit--favoriting { - transform: scale(0); - transform-origin: top center; - transition: all .25s linear; - transition-delay: .25s -} - -.DocSearch-Hit a { - background: var(--docsearch-hit-background); - border-radius: 4px; - box-shadow: var(--docsearch-hit-shadow); - display: block; - padding-left: var(--docsearch-spacing); - width: 100% -} - -.DocSearch-Hit-source { - background: var(--docsearch-modal-background); - color: var(--docsearch-highlight-color); - font-size: .85em; - font-weight: 600; - line-height: 32px; - margin: 0 -4px; - padding: 8px 4px 0; - position: sticky; - top: 0; - z-index: 10 -} - -.DocSearch-Hit-Tree { - color: var(--docsearch-muted-color); - height: var(--docsearch-hit-height); - opacity: .5; - stroke-width: var(--docsearch-icon-stroke-width); - width: 24px -} - -.DocSearch-Hit[aria-selected=true] a { - background-color: var(--docsearch-highlight-color) -} - -.DocSearch-Hit[aria-selected=true] mark { - text-decoration: underline -} - -.DocSearch-Hit-Container { - align-items: center; - color: var(--docsearch-hit-color); - display: flex; - flex-direction: row; - height: var(--docsearch-hit-height); - padding: 0 var(--docsearch-spacing) 0 0 -} - -.DocSearch-Hit-icon { - height: 20px; - width: 20px -} - -.DocSearch-Hit-action, .DocSearch-Hit-icon { - color: var(--docsearch-muted-color); - stroke-width: var(--docsearch-icon-stroke-width) -} - -.DocSearch-Hit-action { - align-items: center; - display: flex; - height: 22px; - width: 22px -} - -.DocSearch-Hit-action svg { - display: block; - height: 18px; - width: 18px -} - -.DocSearch-Hit-action + .DocSearch-Hit-action { - margin-left: 6px -} - -.DocSearch-Hit-action-button { - appearance: none; - background: none; - border: 0; - border-radius: 50%; - color: inherit; - cursor: pointer; - padding: 2px -} - -svg.DocSearch-Hit-Select-Icon { - display: none -} - -.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Select-Icon { - display: block -} - -.DocSearch-Hit-action-button:focus, .DocSearch-Hit-action-button:hover { - background: rgba(0, 0, 0, .2); - transition: background-color .1s ease-in -} - -@media screen and (prefers-reduced-motion: reduce) { - .DocSearch-Hit-action-button:focus, .DocSearch-Hit-action-button:hover { - transition: none - } -} - -.DocSearch-Hit-action-button:focus path, .DocSearch-Hit-action-button:hover path { - fill: #fff -} - -.DocSearch-Hit-content-wrapper { - display: flex; - flex: 1 1 auto; - flex-direction: column; - font-weight: 500; - justify-content: center; - line-height: 1.2em; - margin: 0 8px; - overflow-x: hidden; - position: relative; - text-overflow: ellipsis; - white-space: nowrap; - width: 80% -} - -.DocSearch-Hit-title { - font-size: .9em -} - -.DocSearch-Hit-path { - color: var(--docsearch-muted-color); - font-size: .75em -} - -.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-action, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-icon, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-path, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-text, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-title, .DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Tree, .DocSearch-Hit[aria-selected=true] mark { - color: var(--docsearch-hit-active-color) !important -} - -@media screen and (prefers-reduced-motion: reduce) { - .DocSearch-Hit-action-button:focus, .DocSearch-Hit-action-button:hover { - background: rgba(0, 0, 0, .2); - transition: none - } -} - -.DocSearch-ErrorScreen, .DocSearch-NoResults, .DocSearch-StartScreen { - font-size: .9em; - margin: 0 auto; - padding: 36px 0; - text-align: center; - width: 80% -} - -.DocSearch-Screen-Icon { - color: var(--docsearch-muted-color); - padding-bottom: 12px -} - -.DocSearch-NoResults-Prefill-List { - display: inline-block; - padding-bottom: 24px; - text-align: left -} - -.DocSearch-NoResults-Prefill-List ul { - display: inline-block; - padding: 8px 0 0 -} - -.DocSearch-NoResults-Prefill-List li { - list-style-position: inside; - list-style-type: "» " -} - -.DocSearch-Prefill { - appearance: none; - background: none; - border: 0; - border-radius: 1em; - color: var(--docsearch-highlight-color); - cursor: pointer; - display: inline-block; - font-size: 1em; - font-weight: 700; - padding: 0 -} - -.DocSearch-Prefill:focus, .DocSearch-Prefill:hover { - outline: none; - text-decoration: underline -} - -.DocSearch-Footer { - align-items: center; - border-radius: 0 0 8px 8px; - box-shadow: var(--docsearch-footer-shadow); - display: flex; - flex-direction: row-reverse; - flex-shrink: 0; - height: var(--docsearch-footer-height); - justify-content: space-between; - padding: 0 var(--docsearch-spacing); - position: relative; - user-select: none; - width: 100%; - z-index: 300 -} - -.DocSearch-Commands { - color: var(--docsearch-muted-color); - display: flex; - list-style: none; - margin: 0; - padding: 0 -} - -.DocSearch-Commands li { - align-items: center; - display: flex -} - -.DocSearch-Commands li:not(:last-of-type) { - margin-right: .8em -} - -.DocSearch-Commands-Key { - align-items: center; - border-radius: 2px; - display: flex; - height: 18px; - justify-content: center; - margin-right: .4em; - padding: 0 0 1px; - color: var(--docsearch-muted-color); - border: 1px solid var(--border-color); - width: 20px -} - -@media (max-width: 768px) { - :root { - --docsearch-spacing: 10px; - --docsearch-footer-height: 40px - } - .DocSearch-Dropdown { - height: 100% - } - .DocSearch-Container { - height: 100vh; - height: -webkit-fill-available; - height: calc(var(--docsearch-vh, 1vh) * 100); - position: absolute - } - .DocSearch-Footer { - border-radius: 0; - bottom: 0; - position: absolute - } - .DocSearch-Hit-content-wrapper { - display: flex; - position: relative; - width: 80% - } - .DocSearch-Modal { - border-radius: 0; - box-shadow: none; - height: 100vh; - height: -webkit-fill-available; - height: calc(var(--docsearch-vh, 1vh) * 100); - margin: 0; - max-width: 100%; - width: 100% - } - .DocSearch-Dropdown { - max-height: calc(var(--docsearch-vh, 1vh) * 100 - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height)) - } - .DocSearch-Cancel { - appearance: none; - background: none; - border: 0; - color: var(--docsearch-highlight-color); - cursor: pointer; - display: inline-block; - flex: none; - font: inherit; - font-size: 1em; - font-weight: 500; - margin-left: var(--docsearch-spacing); - outline: none; - overflow: hidden; - padding: 0; - user-select: none; - white-space: nowrap - } - .DocSearch-Commands, .DocSearch-Hit-Tree { - display: none - } -} - -@keyframes fade-in { - 0% { - opacity: 0 - } - to { - opacity: 1 - } -} \ No newline at end of file diff --git a/themes/docura/assets/scss/component/home.scss b/themes/docura/assets/scss/component/home.scss deleted file mode 100644 index 2ecfc52..0000000 --- a/themes/docura/assets/scss/component/home.scss +++ /dev/null @@ -1,130 +0,0 @@ -.cover { - padding: 40px 20px; - width: 100vw; - flex: 1; - display: flex; - align-items: center; - justify-content: center; - flex-direction: column; - background: var(--home-cover-background); - position: relative; - color: var(--color2) -} - -.cover::after { - content: ""; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - z-index: -1; - background: inherit; - filter: blur(1rem); -} - -.cover h1 { - font-family: var(--font-family-brand); - font-size: 4em; - text-align: center; -} - -.cover h2 { - font-family: var(--font-family-brand); - font-size: 2em; - text-align: center; -} - -.cover h3 { - font-family: var(--font-family-brand); - font-size: 1.5em; - text-align: center; - padding-top: .8em; -} - -.cover p { - font-size: 1em; - padding-top: .8em; -} - -.github-buttons { - display: flex; - gap: 10px; - padding-top: 20px; - justify-content: center; -} - -.github-repos-grid { - display: flex; - flex-wrap: wrap; - padding-top: 4em; - padding-bottom: 2em; - gap: 4em; - width: 100%; -} - -.github-repo-tile { - width: 100%; -} - -.github-repo-tile .icon { - width: 80px; - height: 80px; - background-size: 5em; -} - -.github-repo-tile a { - display: flex; - flex-direction: column; - align-items: center; -} - -@media (min-width: 768px) { - .github-repos-grid { - flex-direction: row; - width: 80%; - padding-top: 4em; - gap: 0; - } - - .github-repo-tile { - width: 50%; - } -} - -@media (min-width: 1024px) { - .github-repos-grid { - width: 60%; - padding-top: 6em; - } - - .github-repo-tile .icon { - width: 100px; - height: 100px; - background-size: 6.25em; - } -} - -@media (min-width: 1281px) { - .github-repos-grid { - width: 50%; - } - - .github-repo-tile .icon { - width: 120px; - height: 120px; - background-size: 7.5em; - } -} - -@media (min-width: 1920px) { - .github-repos-grid { - width: 40%; - } - - .github-repo-tile .icon { - width: 160px; - height: 160px; - background-size: 10em; - } -} \ No newline at end of file diff --git a/themes/docura/assets/scss/component/sidebar.scss b/themes/docura/assets/scss/component/sidebar.scss deleted file mode 100644 index f90b76c..0000000 --- a/themes/docura/assets/scss/component/sidebar.scss +++ /dev/null @@ -1,60 +0,0 @@ -#sidebar { - padding: 40px 0; -} - -#sidebar .sticky { - display: flex; - flex-direction: column; - padding: 0 20px; - overflow: auto; -} - -.sidebar-section, .sidebar-link { - padding: 7px 0; -} - -.sidebar-section { - margin-top: 40px; - font-weight: 600; - color: var(--color2) -} - -#sidebar .sidebar-section:first-child { - margin-top: 0; -} - -.sidebar-link { - padding-left: 10px; - color: var(--color3); - border-left: 1px solid var(--border-color); - margin-left: 4px; -} - -.sidebar-link::before { - content: ''; - display: inline-block; - width: 6px; - height: 6px; - background: var(--background); - box-shadow: var(--box-shadow); - border-radius: 50%; - position: relative; - left: -13.5px; - top: -3px; -} - -.sidebar-link:hover { - color: var(--color-hover); - font-weight: 600; - font-size: 98%; -} - -.sidebar-link.current { - color: var(--color-anchor); - font-weight: 600; - font-size: 98%; -} - -.sidebar-link.current::before, .sidebar-link:hover::before { - background: var(--color-anchor); -} diff --git a/themes/docura/assets/scss/component/site-footer.scss b/themes/docura/assets/scss/component/site-footer.scss deleted file mode 100644 index b7c03fe..0000000 --- a/themes/docura/assets/scss/component/site-footer.scss +++ /dev/null @@ -1,41 +0,0 @@ -#site-footer-social { - display: flex; - gap: 12px; - justify-content: flex-start; - padding-left: 12px; - align-items: center; -} - -#site-footer-fund { - display: flex; - gap: 12px; - overflow: auto; - justify-content: flex-end; - padding-right: 12px; - align-items: center; -} - -#site-footer-copyright, #site-footer-love { - display: flex; - align-items: center; - justify-content: center; - color: var(--color3) -} - -#site-footer-copyright a { - display: flex; - align-items: center; -} - -/* From Small Tablet */ -@media (min-width: 768px) { - #site-footer-copyright { - justify-content: flex-start; - padding-left: 12px; - } - - #site-footer-social { - justify-content: flex-end; - padding-right: 12px; - } -} diff --git a/themes/docura/assets/scss/component/site-header.scss b/themes/docura/assets/scss/component/site-header.scss deleted file mode 100644 index c022313..0000000 --- a/themes/docura/assets/scss/component/site-header.scss +++ /dev/null @@ -1,61 +0,0 @@ -#site-header-brand { - display: flex; - align-items: center; - font-family: var(--font-family-brand); - font-size: 1.4em; - color: var(--color2); -} - -#site-header-brand a { - padding: 12px; -} - -#site-header-menu { - padding: 0 12px; - display: flex; - align-items: center; - color: var(--color3); -} - -#site-header-menu nav { - width: 100%; - overflow: auto; -} - -#site-header-menu ul { - display: flex; - height: 100%; - align-items: center; - gap: 12px; -} - -#site-header-menu a { - display: flex; - padding: 12px 6px; - gap: 3px; - white-space: nowrap; -} - -#site-header-menu a:focus, #site-header-menu a:hover, #site-header-menu a.active { - border-bottom: 3px solid; -} - -#site-header-controls { - display: flex; - align-items: center; - padding-right: 12px; - justify-content: flex-end; - gap: 12px -} - -#site-header-search { - display: flex; - align-items: flex-end; -} - -/* From Small Tablet */ -@media (min-width: 768px) { - #site-header-search { - align-items: center; - } -} \ No newline at end of file diff --git a/themes/docura/assets/scss/component/toc.scss b/themes/docura/assets/scss/component/toc.scss deleted file mode 100644 index 9ab15d2..0000000 --- a/themes/docura/assets/scss/component/toc.scss +++ /dev/null @@ -1,54 +0,0 @@ -#toc { - padding-top: 40px; - padding-bottom: 40px; -} - -#toc .sticky{ - overflow: auto; -} - -#toc strong { - font-weight: 600; - padding: 7px 10px 7px 0; - display: flex; - gap: 3px; - position: relative; - left: -3px; - color: var(--color2) -} - -#toc ul { - margin-left: .3em; - border-left: 1px solid var(--border-color); -} - -#toc ul ul { - margin-left: 1em; -} - -#toc ul a { - display: inline-block; - padding: 7px; - color: var(--color3); -} - -#toc ul a.active, #toc ul a:hover { - color: var(--color-hover); -} - -#toc ul a::before { - content: ''; - display: inline-block; - width: 6px; - height: 6px; - background: var(--background); - box-shadow: var(--box-shadow); - position: relative; - left: -10.5px; - top: -3px; -} - -#toc ul a.active::before, #toc ul a:hover::before { - background: var(--color-hover); -} - diff --git a/themes/docura/assets/scss/font/inter.scss b/themes/docura/assets/scss/font/inter.scss deleted file mode 100644 index 5b94ebb..0000000 --- a/themes/docura/assets/scss/font/inter.scss +++ /dev/null @@ -1,35 +0,0 @@ -@font-face { - font-family: 'Inter'; - font-style: normal; - font-weight: 400; - font-display: swap; - src: url("/font/Inter-Regular.woff2?v=3.19") format("woff2"), - url("/font/Inter-Regular.woff?v=3.19") format("woff"); -} - -@font-face { - font-family: 'Inter'; - font-style: italic; - font-weight: 400; - font-display: swap; - src: url("/font/Inter-Italic.woff2?v=3.19") format("woff2"), - url("/font/Inter-Italic.woff?v=3.19") format("woff"); -} - -@font-face { - font-family: 'Inter'; - font-style: normal; - font-weight: 600; - font-display: swap; - src: url("/font/Inter-SemiBold.woff2?v=3.19") format("woff2"), - url("/font/Inter-SemiBold.woff?v=3.19") format("woff"); -} - -@font-face { - font-family: 'Inter'; - font-style: italic; - font-weight: 600; - font-display: swap; - src: url("/font/Inter-SemiBoldItalic.woff2?v=3.19") format("woff2"), - url("/font/Inter-SemiBoldItalic.woff?v=3.19") format("woff"); -} diff --git a/themes/docura/assets/scss/home.scss b/themes/docura/assets/scss/home.scss deleted file mode 100644 index d405d7d..0000000 --- a/themes/docura/assets/scss/home.scss +++ /dev/null @@ -1,16 +0,0 @@ -/*! - * Docura (https://docura.github.io/) - * Copyright 2022-2023 Dumindu Madunuwan - * Licensed under the MIT License. - */ - -@import "reset"; -@import "variables"; -@import "layout"; - -@import "component/site-header"; -@import "component/site-footer"; -@import "component/home"; - -@import "component/button"; -@import "component/dropdown"; \ No newline at end of file diff --git a/themes/docura/assets/scss/icon/default.scss b/themes/docura/assets/scss/icon/default.scss deleted file mode 100644 index 1cde2d9..0000000 --- a/themes/docura/assets/scss/icon/default.scss +++ /dev/null @@ -1,103 +0,0 @@ -.icon { - display: block; - width: 18px; - height: 18px; -} - -/* -- social icons: add `.icon-colored` with `.icon` -- */ -.icon-facebook { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 30 30' fill='%231877f2' %3E%3Cpath d='M30 15.091C30 6.756 23.285 0 15 0S0 6.756 0 15.091C0 22.625 5.484 28.868 12.656 30V19.454H8.848V15.09h3.808v-3.324c0-3.782 2.239-5.872 5.666-5.872 1.64 0 3.358.295 3.358.295v3.714h-1.893c-1.863 0-2.443 1.164-2.443 2.358v2.83h4.16l-.665 4.362h-3.495V30C24.516 28.868 30 22.625 30 15.091z'%3E%3C/path%3E%3C/svg%3E"); -} - -.icon-twitter { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%231d9bf0' %3E%3Cpath d='M24 4.557c-.883.392-1.832.656-2.828.775 1.017-.609 1.798-1.574 2.165-2.724-.951.564-2.005.974-3.127 1.195-.897-.957-2.178-1.555-3.594-1.555-3.179 0-5.515 2.966-4.797 6.045-4.091-.205-7.719-2.165-10.148-5.144-1.29 2.213-.669 5.108 1.523 6.574-.806-.026-1.566-.247-2.229-.616-.054 2.281 1.581 4.415 3.949 4.89-.693.188-1.452.232-2.224.084.626 1.956 2.444 3.379 4.6 3.419-2.07 1.623-4.678 2.348-7.29 2.04 2.179 1.397 4.768 2.212 7.548 2.212 9.142 0 14.307-7.721 13.995-14.646.962-.695 1.797-1.562 2.457-2.549z'/%3E%3C/svg%3E"); - transform: scale(1.1); -} - -.icon-youtube { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 24 24' fill='%23ff0000' %3E%3Cpath d='M23.498 6.186a3.016 3.016 0 0 0-2.122-2.136C19.505 3.545 12 3.545 12 3.545s-7.505 0-9.377.505A3.017 3.017 0 0 0 .502 6.186C0 8.07 0 12 0 12s0 3.93.502 5.814a3.016 3.016 0 0 0 2.122 2.136c1.871.505 9.376.505 9.376.505s7.505 0 9.377-.505a3.015 3.015 0 0 0 2.122-2.136C24 15.93 24 12 24 12s0-3.93-.502-5.814zM9.545 15.568V8.432L15.818 12l-6.273 3.568z'%3E%3C/path%3E%3C/svg%3E"); - transform: scale(1.1); -} - -.icon-github { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%2324292f' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E"); -} - -:root[data-color="dark"] .icon-github, :root[data-color="night"] .icon-github { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 16 16' fill='%236e7681' %3E%3Cpath d='M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z'%3E%3C/path%3E%3C/svg%3E"); -} - - -/* -- template icons -- */ -.icon-menu { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3Cpath d='M4,18h11c0.55,0,1-0.45,1-1v0c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,17.55,3.45,18,4,18z M4,13h8c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4c-0.55,0-1,0.45-1,1v0C3,12.55,3.45,13,4,13z M3,7L3,7c0,0.55,0.45,1,1,1h11c0.55,0,1-0.45,1-1v0 c0-0.55-0.45-1-1-1H4C3.45,6,3,6.45,3,7z M20.3,14.88L17.42,12l2.88-2.88c0.39-0.39,0.39-1.02,0-1.41l0,0 c-0.39-0.39-1.02-0.39-1.41,0l-3.59,3.59c-0.39,0.39-0.39,1.02,0,1.41l3.59,3.59c0.39,0.39,1.02,0.39,1.41,0l0,0 C20.68,15.91,20.69,15.27,20.3,14.88z'/%3E%3Cpath d='M0,0h24v24H0V0z' fill='none'/%3E%3C/svg%3E"); -} - -.icon-toc { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23000000'%3E%3Cpath d='M0 0h24v24H0V0zm0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M3 9h14V7H3v2zm0 4h14v-2H3v2zm0 4h14v-2H3v2zm16 0h2v-2h-2v2zm0-10v2h2V7h-2zm0 6h2v-2h-2v2z'/%3E%3C/svg%3E"); -} - -.icon-close { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z'/%3E%3C/svg%3E"); -} - -.icon-home { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Crect fill='none' height='24' width='24'/%3E%3Cpolygon opacity='.3' points='18,19 13,19 13,15 11,15 11,19 6,19 6,10.1 12,5.52 18,10.1'/%3E%3Cpath d='M12,3L6,7.58V6H4v3.11L1,11.4l1.21,1.59L4,11.62V21h16v-9.38l1.79,1.36L23,11.4L12,3z M18,19h-5v-4h-2v4H6v-8.9l6-4.58 l6,4.58V19z M10,1c0,1.66-1.34,3-3,3C6.45,4,6,4.45,6,5H4c0-1.66,1.34-3,3-3c0.55,0,1-0.45,1-1H10z'/%3E%3C/svg%3E"); -} - -.icon-book { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg/%3E%3Cg%3E%3Cpath d='M21,5c-1.11-0.35-2.33-0.5-3.5-0.5c-1.95,0-4.05,0.4-5.5,1.5c-1.45-1.1-3.55-1.5-5.5-1.5S2.45,4.9,1,6v14.65 c0,0.25,0.25,0.5,0.5,0.5c0.1,0,0.15-0.05,0.25-0.05C3.1,20.45,5.05,20,6.5,20c1.95,0,4.05,0.4,5.5,1.5c1.35-0.85,3.8-1.5,5.5-1.5 c1.65,0,3.35,0.3,4.75,1.05c0.1,0.05,0.15,0.05,0.25,0.05c0.25,0,0.5-0.25,0.5-0.5V6C22.4,5.55,21.75,5.25,21,5z M3,18.5V7 c1.1-0.35,2.3-0.5,3.5-0.5c1.34,0,3.13,0.41,4.5,0.99v11.5C9.63,18.41,7.84,18,6.5,18C5.3,18,4.1,18.15,3,18.5z M21,18.5 c-1.1-0.35-2.3-0.5-3.5-0.5c-1.34,0-3.13,0.41-4.5,0.99V7.49c1.37-0.59,3.16-0.99,4.5-0.99c1.2,0,2.4,0.15,3.5,0.5V18.5z'/%3E%3Cpath d='M11,7.49C9.63,6.91,7.84,6.5,6.5,6.5C5.3,6.5,4.1,6.65,3,7v11.5C4.1,18.15,5.3,18,6.5,18 c1.34,0,3.13,0.41,4.5,0.99V7.49z' opacity='.3'/%3E%3C/g%3E%3Cg%3E%3Cpath d='M17.5,10.5c0.88,0,1.73,0.09,2.5,0.26V9.24C19.21,9.09,18.36,9,17.5,9c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,10.69,16.18,10.5,17.5,10.5z'/%3E%3Cpath d='M17.5,13.16c0.88,0,1.73,0.09,2.5,0.26V11.9c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,13.36,16.18,13.16,17.5,13.16z'/%3E%3Cpath d='M17.5,15.83c0.88,0,1.73,0.09,2.5,0.26v-1.52c-0.79-0.15-1.64-0.24-2.5-0.24c-1.28,0-2.46,0.16-3.5,0.47v1.57 C14.99,16.02,16.18,15.83,17.5,15.83z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E"); -} - -.icon-theme { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0z' fill='none'/%3E%3Cpath d='M12 3c-4.97 0-9 4.03-9 9s4.03 9 9 9c.83 0 1.5-.67 1.5-1.5 0-.39-.15-.74-.39-1.01-.23-.26-.38-.61-.38-.99 0-.83.67-1.5 1.5-1.5H16c2.76 0 5-2.24 5-5 0-4.42-4.03-8-9-8zm-5.5 9c-.83 0-1.5-.67-1.5-1.5S5.67 9 6.5 9 8 9.67 8 10.5 7.33 12 6.5 12zm3-4C8.67 8 8 7.33 8 6.5S8.67 5 9.5 5s1.5.67 1.5 1.5S10.33 8 9.5 8zm5 0c-.83 0-1.5-.67-1.5-1.5S13.67 5 14.5 5s1.5.67 1.5 1.5S15.33 8 14.5 8zm3 4c-.83 0-1.5-.67-1.5-1.5S16.67 9 17.5 9s1.5.67 1.5 1.5-.67 1.5-1.5 1.5z'/%3E%3C/svg%3E"); -} - -.icon-brightness { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M18 9.52V6h-3.52L12 3.52 9.52 6H6v3.52L3.52 12 6 14.48V18h3.52L12 20.48 14.48 18H18v-3.52L20.48 12 18 9.52zm-6 7.98v-11c3.03 0 5.5 2.47 5.5 5.5s-2.47 5.5-5.5 5.5z' opacity='.3'/%3E%3Cpath d='M20 8.69V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12 20 8.69zm-2 5.79V18h-3.52L12 20.48 9.52 18H6v-3.52L3.52 12 6 9.52V6h3.52L12 3.52 14.48 6H18v3.52L20.48 12 18 14.48zM12 6.5v11c3.03 0 5.5-2.47 5.5-5.5S15.03 6.5 12 6.5z'/%3E%3C/svg%3E"); -} - -.icon-light-mode { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Ccircle cx='12' cy='12' opacity='.3' r='3'/%3E%3Cpath d='M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z'/%3E%3C/svg%3E"); -} - -.icon-dark-mode { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27 C17.45,17.19,14.93,19,12,19c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z' opacity='.3'/%3E%3Cpath d='M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z'/%3E%3C/svg%3E"); -} - -.icon-night-mode { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Cg%3E%3Cpath d='M8.1,14.15C9.77,14.63,11,16.17,11,18c0,0.68-0.19,1.31-0.48,1.87c0.48,0.09,0.97,0.14,1.48,0.14 c1.48,0,2.9-0.41,4.13-1.15c-2.62-0.92-5.23-2.82-6.8-5.86C7.74,9.94,7.78,7.09,8.29,4.9c-2.57,1.33-4.3,4.01-4.3,7.1c0,0,0,0,0,0 c0.01,0,0.01,0,0.02,0C5.66,12,7.18,12.83,8.1,14.15z' opacity='.3'/%3E%3Cpath d='M19.78,17.51c-2.47,0-6.57-1.33-8.68-5.43C8.77,7.57,10.6,3.6,11.63,2.01C6.27,2.2,1.98,6.59,1.98,12 c0,0.14,0.02,0.28,0.02,0.42C2.61,12.16,3.28,12,3.98,12c0,0,0,0,0,0c0-3.09,1.73-5.77,4.3-7.1C7.78,7.09,7.74,9.94,9.32,13 c1.57,3.04,4.18,4.95,6.8,5.86c-1.23,0.74-2.65,1.15-4.13,1.15c-0.5,0-1-0.05-1.48-0.14c-0.37,0.7-0.94,1.27-1.64,1.64 c0.98,0.32,2.03,0.5,3.11,0.5c3.5,0,6.58-1.8,8.37-4.52C20.18,17.5,19.98,17.51,19.78,17.51z'/%3E%3Cpath d='M7,16l-0.18,0C6.4,14.84,5.3,14,4,14c-1.66,0-3,1.34-3,3s1.34,3,3,3c0.62,0,2.49,0,3,0c1.1,0,2-0.9,2-2 C9,16.9,8.1,16,7,16z'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E"); -} - -.icon-translate { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12.65 15.67c.14-.36.05-.77-.23-1.05l-2.09-2.06.03-.03c1.74-1.94 2.98-4.17 3.71-6.53h1.94c.54 0 .99-.45.99-.99v-.02c0-.54-.45-.99-.99-.99H10V3c0-.55-.45-1-1-1s-1 .45-1 1v1H1.99c-.54 0-.99.45-.99.99 0 .55.45.99.99.99h10.18C11.5 7.92 10.44 9.75 9 11.35c-.81-.89-1.49-1.86-2.06-2.88-.16-.29-.45-.47-.78-.47-.69 0-1.13.75-.79 1.35.63 1.13 1.4 2.21 2.3 3.21L3.3 16.87c-.4.39-.4 1.03 0 1.42.39.39 1.02.39 1.42 0L9 14l2.02 2.02c.51.51 1.38.32 1.63-.35zM17.5 10c-.6 0-1.14.37-1.35.94l-3.67 9.8c-.24.61.22 1.26.87 1.26.39 0 .74-.24.88-.61l.89-2.39h4.75l.9 2.39c.14.36.49.61.88.61.65 0 1.11-.65.88-1.26l-3.67-9.8c-.22-.57-.76-.94-1.36-.94zm-1.62 7l1.62-4.33L19.12 17h-3.24z'/%3E%3C/svg%3E"); -} - -.icon-search { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M15.5 14h-.79l-.28-.27c1.2-1.4 1.82-3.31 1.48-5.34-.47-2.78-2.79-5-5.59-5.34-4.23-.52-7.79 3.04-7.27 7.27.34 2.8 2.56 5.12 5.34 5.59 2.03.34 3.94-.28 5.34-1.48l.27.28v.79l4.25 4.25c.41.41 1.08.41 1.49 0 .41-.41.41-1.08 0-1.49L15.5 14zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E"); -} - -.icon-select { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M12 5.83L15.17 9l1.41-1.41L12 3 7.41 7.59 8.83 9 12 5.83zm0 12.34L8.83 15l-1.41 1.41L12 21l4.59-4.59L15.17 15 12 18.17z'/%3E%3C/svg%3E"); -} - -.icon-calendar { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24'%3E%3Cg%3E%3Crect fill='none' height='24' width='24'/%3E%3C/g%3E%3Cg%3E%3Crect height='2' opacity='.3' width='14' x='5' y='6'/%3E%3Cpath d='M19,4h-1V2h-2v2H8V2H6v2H5C3.89,4,3.01,4.9,3.01,6L3,20c0,1.1,0.89,2,2,2h14c1.1,0,2-0.9,2-2V6C21,4.9,20.1,4,19,4z M19,20 H5V10h14V20z M19,8H5V6h14V8z M9,14H7v-2h2V14z M13,14h-2v-2h2V14z M17,14h-2v-2h2V14z M9,18H7v-2h2V18z M13,18h-2v-2h2V18z M17,18 h-2v-2h2V18z'/%3E%3C/g%3E%3C/svg%3E"); -} - -.icon-next { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M24 24H0V0h24v24z' fill='none' opacity='.87'/%3E%3Cpath d='M7.38 21.01c.49.49 1.28.49 1.77 0l8.31-8.31c.39-.39.39-1.02 0-1.41L9.15 2.98c-.49-.49-1.28-.49-1.77 0s-.49 1.28 0 1.77L14.62 12l-7.25 7.25c-.48.48-.48 1.28.01 1.76z' fill='%23328ac1'/%3E%3C/svg%3E"); -} - -.icon-prev { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Crect fill='none' height='24' width='24'/%3E%3Cg%3E%3Cpath d='M16.88,2.88L16.88,2.88c-0.49-0.49-1.28-0.49-1.77,0l-8.41,8.41c-0.39,0.39-0.39,1.02,0,1.41l8.41,8.41 c0.49,0.49,1.28,0.49,1.77,0l0,0c0.49-0.49,0.49-1.28,0-1.77L9.54,12l7.35-7.35C17.37,4.16,17.37,3.37,16.88,2.88z' fill='%23328ac1'/%3E%3C/g%3E%3C/svg%3E"); -} - -.icon-copyright { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M10.08 10.86c.05-.33.16-.62.3-.87s.34-.46.59-.62c.24-.15.54-.22.91-.23.23.01.44.05.63.13.2.09.38.21.52.36s.25.33.34.53.13.42.14.64h1.79c-.02-.47-.11-.9-.28-1.29s-.4-.73-.7-1.01-.66-.5-1.08-.66-.88-.23-1.39-.23c-.65 0-1.22.11-1.7.34s-.88.53-1.2.92-.56.84-.71 1.36S8 11.29 8 11.87v.27c0 .58.08 1.12.23 1.64s.39.97.71 1.35.72.69 1.2.91c.48.22 1.05.34 1.7.34.47 0 .91-.08 1.32-.23s.77-.36 1.08-.63.56-.58.74-.94.29-.74.3-1.15h-1.79c-.01.21-.06.4-.15.58s-.21.33-.36.46-.32.23-.52.3c-.19.07-.39.09-.6.1-.36-.01-.66-.08-.89-.23-.25-.16-.45-.37-.59-.62s-.25-.55-.3-.88-.08-.67-.08-1v-.27c0-.35.03-.68.08-1.01zM12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8z'/%3E%3C/svg%3E"); -} - -/* -- add `.icon-colored` -- */ -.icon-love { - background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='18px' height='18px' viewBox='0 0 24 24' fill='%23ff4d4d' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none'/%3E%3Cpath d='M13.35 20.13c-.76.69-1.93.69-2.69-.01l-.11-.1C5.3 15.27 1.87 12.16 2 8.28c.06-1.7.93-3.33 2.34-4.29 2.64-1.8 5.9-.96 7.66 1.1 1.76-2.06 5.02-2.91 7.66-1.1 1.41.96 2.28 2.59 2.34 4.29.14 3.88-3.3 6.99-8.55 11.76l-.1.09z'/%3E%3C/svg%3E"); -} diff --git a/themes/docura/assets/scss/layout.scss b/themes/docura/assets/scss/layout.scss deleted file mode 100644 index 3e376f1..0000000 --- a/themes/docura/assets/scss/layout.scss +++ /dev/null @@ -1,368 +0,0 @@ -body { - font-family: var(--font-family); - background: var(--background); - color: var(--color); - display: flex; - flex-direction: column; - min-height: 100svh; -} - -#site-header { - display: grid; - grid-template-columns: 2fr 1fr; - grid-template-rows: repeat(3, var(--site-header-height)); -} - -#site-header-menu, #site-header-search { - grid-column: 1 / 3; -} - -#site-footer { - display: grid; - grid-template-columns: 1fr 1fr; - grid-template-rows: repeat(3, var(--site-footer-height)); -} - -#site-footer-copyright, #site-footer-love { - grid-column: 1 / 3; -} - -#site-main-content-wrapper { - display: flex; - flex: 1; -} - -#sidebar, #toc, #article-nav, #sidebar .btn-close, #toc .btn-close { - display: none; -} - -main { - flex: 1; - display: flex; - overflow: auto; -} - -#article { - flex: 1; - width: 100vw; -} - -#sidebar { - width: 85%; - left: -85%; -} - -#toc { - width: 85%; - right: -85%; -} - -/* Small Tablet */ -@media (min-width: 768px) and (max-width: 1023px) { - #site-header { - grid-template-columns: repeat(6, 1fr); - grid-template-rows: repeat(2, var(--site-header-height)); - } - - #site-header-brand { - grid-column: 1 / 6; - } - - #site-header-controls { - grid-column: 6 / 7; - } - - #site-header-menu { - grid-column: 1 / 5; - } - - #site-header-search { - grid-column: 5 / 7; - } - - #site-footer { - grid-template-columns: repeat(4, 1fr); - grid-template-rows: repeat(2, var(--site-footer-height)); - } - - #site-footer-copyright { - grid-column: 1 / 3; - } - - #site-footer-social { - grid-column: 3 / 4; - } - - #site-footer-fund { - grid-column: 4 / 5; - } - - #site-footer-love { - grid-column: 1 / 5; - } - - #sidebar { - width: 50%; - left: -50%; - } - - #toc { - width: 50%; - right: -50%; - } -} - -/* From Large Tablet */ -@media (min-width: 1024px) { - #site-header { - grid-template-columns: repeat(6, 1fr); - grid-template-rows: var(--site-header-height); - } - - #site-header-brand { - grid-column: 1 / 2; - } - - #site-header-menu { - grid-column: 2 / 5; - grid-row: 1; - } - - #site-header-search { - grid-column: 5 / 6; - grid-row: 1; - } - - #site-header-controls { - grid-column: 6 / 7; - } - - #site-footer { - grid-template-columns: repeat(5, 1fr); - grid-template-rows: var(--site-footer-height); - } - - #site-footer-copyright { - grid-column: 1 / 3; - } - - #site-footer-love { - grid-column: 3 / 4; - grid-row: 1; - } - - #site-footer-social { - grid-column: 4 / 5; - } - - #site-footer-fund { - grid-column: 5 / 6; - } - - #article-nav-toc-btn { - display: none; - } -} - -/* Large Tablet */ -@media (min-width: 1024px) and (max-width: 1279px) { - #sidebar { - width: 33%; - left: -33%; - } - - #article { - width: 75vw; - } - - #toc { - width: 25%; - display: flex; - flex-direction: column; - } - - #toc .sticky { - position: fixed; - right: 0; - width: 25%; - } -} - -/* From Desktop */ -@media (min-width: 1280px) { - #sidebar { - width: 20%; - display: flex; - flex-direction: column; - } - - #article { - width: 60vw; - } - - #toc { - width: 25%; - display: flex; - flex-direction: column; - } - - #sidebar .sticky { - position: fixed; - left: 0; - width: 20%; - } - - #toc .sticky { - position: fixed; - right: 0; - width: 20%; - } -} - -/* Upto Large Tablet */ -@media (max-width: 1023px) { - #toc { - position: fixed; - top: 0; - height: 100%; - transition: .3s; - z-index: 300; - overflow-x: auto; - background: var(--background); - box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1); - } - - :root[data-color="dark"] #toc, :root[data-color="night"] #toc { - box-shadow: 0 4px 30px rgba(255, 255, 255, 0.1); - } - - .offcanvas-toc-on #toc { - animation: slide-in-right .3s forwards; - display: flex; - flex-direction: column; - padding-left: 16px; - z-index: 10; - cursor: default; - } - - .offcanvas-toc-on:before { - content: ""; - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 5; - } - - .offcanvas-toc-on #toc .btn-close { - display: block; - position: absolute; - top: 10px; - left: 10px; - } - - #article-nav-toc-btn { - display: flex; - box-shadow: var(--box-shadow2); - border-radius: 6px; - padding: 6px; - cursor: pointer; - white-space: nowrap; - gap: 6px; - color: var(--color2); - } -} - -/* Upto Desktop */ -@media (max-width: 1279px) { - #sidebar { - position: fixed; - top: 0; - height: 100%; - transition: .3s; - z-index: 200; - overflow-x: auto; - background: var(--background); - box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1); - } - - :root[data-color="dark"] #sidebar, :root[data-color="night"] #sidebar { - box-shadow: 0 4px 30px rgba(255, 255, 255, 0.1); - } - - .offcanvas-sidebar-on #sidebar { - animation: slide-in-left .3s forwards; - display: flex; - flex-direction: column; - z-index: 10; - cursor: default; - } - - .offcanvas-sidebar-on:before { - content: ""; - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 5; - } - - .offcanvas-sidebar-on #sidebar .btn-close { - display: block; - position: absolute; - top: 10px; - right: 10px; - } - - #article-nav { - display: flex; - gap: 12px; - overflow: auto; - justify-content: space-between; - height: var(--site-header-height); - align-items: center; - padding: 0 2px; - } - - #article-nav-menu-btn { - display: flex; - box-shadow: var(--box-shadow2); - border-radius: 6px; - padding: 6px; - cursor: pointer; - white-space: nowrap; - gap: 6px; - color: var(--color2); - } -} - -body.offcanvas-sidebar-on, body.offcanvas-toc-on { - cursor: pointer; - overflow: hidden; -} - -.offcanvas-sidebar-on:before, .offcanvas-toc-on:before { - background: rgba(255, 255, 255, 0.1); - backdrop-filter: blur(var(--blur)); - -webkit-backdrop-filter: blur(var(--blur)); -} - -@keyframes slide-in-left { - from { - transform: translateX(0); - } - to { - transform: translateX(100%); - } -} - -@keyframes slide-in-right { - from { - transform: translateX(0); - } - to { - transform: translateX(-100%); - } -} \ No newline at end of file diff --git a/themes/docura/assets/scss/reset.scss b/themes/docura/assets/scss/reset.scss deleted file mode 100644 index 2ff2733..0000000 --- a/themes/docura/assets/scss/reset.scss +++ /dev/null @@ -1,87 +0,0 @@ -/* https://github.com/elad2412/the-new-css-reset v1.11 */ -/* custom styles for: pre, code */ - -*:where(:not(html, iframe, canvas, img, svg, video, audio, pre, code):not(svg *, symbol *)) { - all: unset; - display: revert; -} - -*, -*::before, -*::after { - box-sizing: border-box; -} - -html { - -moz-text-size-adjust: none; - -webkit-text-size-adjust: none; - text-size-adjust: none; -} - -a, button { - cursor: revert; -} - -ol, ul, menu { - list-style: none; -} - -img { - max-inline-size: 100%; - max-block-size: 100%; -} - -table { - border-collapse: collapse; -} - -input, textarea { - -webkit-user-select: auto; -} - -textarea { - white-space: revert; -} - -meter { - -webkit-appearance: revert; - appearance: revert; -} - -:where(pre) { - all: revert; - box-sizing: border-box; -} - -::placeholder { - color: unset; -} - -::marker { - content: initial; -} - -:where([hidden]) { - display: none; -} - -:where([contenteditable]:not([contenteditable="false"])) { - -moz-user-modify: read-write; - -webkit-user-modify: read-write; - overflow-wrap: break-word; - -webkit-line-break: after-white-space; - -webkit-user-select: auto; -} - -:where([draggable="true"]) { - -webkit-user-drag: element; -} - -:where(dialog:modal) { - all: revert; - box-sizing: border-box; -} - -pre, code { - margin: 0; -} \ No newline at end of file diff --git a/themes/docura/assets/scss/theme/default.scss b/themes/docura/assets/scss/theme/default.scss deleted file mode 100644 index 9576307..0000000 --- a/themes/docura/assets/scss/theme/default.scss +++ /dev/null @@ -1,123 +0,0 @@ -@import "../font/inter"; -@import "../icon/default"; - -:root { - --font-family: 'Inter', sans-serif; - --font-family-brand: 'Times', serif; - --font-family-code: 'Menlo', monospace; - - --background: #ffffff; - --color: #355265; - --color2: #274457; - --color3: #476d86; - - --color-anchor: #328ac1; - --color-hover: #4b9dd0; - - --background-fg: #f7f7f7; - --background-fg2: #ebebeb; - --border-color: #dddddd; - - --box-shadow: 0 0 1px rgba(0, 0, 0, .7); - --box-shadow2: 0 0 3px rgba(0, 0, 0, .2); - - --blur: 10px; - - --home-cover-background: radial-gradient(circle, rgba(255,255,255,1) 0%, rgba(255,255,250,1) 25%, rgba(214,219,220,1) 50%, rgba(255,255,250,1) 75%, rgba(255,255,255,1) 100%); - - --icon-filter: invert(41%) sepia(19%) saturate(840%) hue-rotate(161deg) brightness(92%) contrast(92%); - - /* base16 tomorrow */ - --chroma-base00: #f9f9f9; - --chroma-base01: #e0e0e0; - --chroma-base02: rgba(159, 218, 159, .2); - --chroma-base03: #8e908c; - --chroma-base04: #969896; - --chroma-base05: #4d4d4c; - --chroma-base06: #282a2e; - --chroma-base07: #1d1f21; - --chroma-base08: #c82829; - --chroma-base09: #f5871f; - --chroma-base0A: #eab700; - --chroma-base0B: #718c00; - --chroma-base0C: #3e999f; - --chroma-base0D: #4271ae; - --chroma-base0E: #8959a8; - --chroma-base0F: #a3685a; -} - -:root[data-color="dark"] { - --background: #121212; - --color: #efefef; - --color2: #ffffff; - --color3: #b3b3b3; - - --background-fg: #333333; - --background-fg2: #1f1f1f; - --border-color: rgba(255, 255, 255, .4); - - --box-shadow: 0 0 1px rgba(255, 255, 255, 1); - --box-shadow2: 0 0 3px rgba(255, 255, 255, .6); - - --home-cover-background: radial-gradient(circle, rgba(23,23,25,1) 0%, rgba(18,18,0,1) 25%, rgba(32,32,32,1) 50%, rgba(18,18,0,1) 75%, rgba(23,23,25,1) 100%); - - --icon-filter: invert(83%) sepia(0%) saturate(1582%) hue-rotate(126deg) brightness(86%) contrast(80%); - - /* base16 tomorrow night */ - --chroma-base00: #080808; - --chroma-base01: #393939; - --chroma-base02: rgba(159, 218, 159, .1); - --chroma-base03: #999999; - --chroma-base04: #b4b7b4; - --chroma-base05: #cccccc; - --chroma-base06: #e0e0e0; - --chroma-base07: #ffffff; - --chroma-base08: #f2777a; - --chroma-base09: #f99157; - --chroma-base0A: #ffcc66; - --chroma-base0B: #99cc99; - --chroma-base0C: #66cccc; - --chroma-base0D: #6699cc; - --chroma-base0E: #cc99cc; - --chroma-base0F: #a3685a; -} - -:root[data-color="night"] { - --background: #333333; - --color: #cccccc; - --color2: #dedede; - --color3: #9d9d9d; - - --background-fg: #444444; - --background-fg2: #303030; - --border-color: rgba(255, 255, 255, 0.2); - - --box-shadow: 0 0 1px rgba(225, 255, 255, 1); - --box-shadow2: 0 0 3px rgba(255, 255, 255, .6); - - --home-cover-background: radial-gradient(circle, rgba(52,52,52,1) 0%, rgba(42,42,42,1) 25%, rgba(57,57,57,1) 50%, rgba(42,42,42,1) 75%, rgba(52,52,52,1) 100%); - - --icon-filter: invert(60%) sepia(25%) saturate(20%) hue-rotate(343deg) brightness(98%) contrast(94%); - - /* base16 twilight */ - --chroma-base00: #1e1e1e; - --chroma-base01: #323537; - --chroma-base02: rgba(159, 218, 159, .1); - --chroma-base03: #5f5a60; - --chroma-base04: #838184; - --chroma-base05: #a7a7a7; - --chroma-base06: #c3c3c3; - --chroma-base07: #ffffff; - --chroma-base08: #cf6a4c; - --chroma-base09: #cda869; - --chroma-base0A: #f9ee98; - --chroma-base0B: #8f9d6a; - --chroma-base0C: #afc4db; - --chroma-base0D: #7587a6; - --chroma-base0E: #9b859d; - --chroma-base0F: #9b703f; -} - -.icon:not(.icon-colored) { - filter: var(--icon-filter); -} \ No newline at end of file diff --git a/themes/docura/assets/scss/variables.scss b/themes/docura/assets/scss/variables.scss deleted file mode 100644 index ed2b659..0000000 --- a/themes/docura/assets/scss/variables.scss +++ /dev/null @@ -1,19 +0,0 @@ -:root { - --site-header-height: 46px; - --site-footer-height: 46px; -} - -@media (min-width: 1025px) and (max-width: 1280px), -(min-width: 1024px) and (max-width: 1280px) and (orientation: portrait) { - :root { - --site-header-height: 60px; - --site-footer-height: 60px; - } -} - -@media (min-width: 1281px) { - :root { - --site-header-height: 80px; - --site-footer-height: 80px; - } -} \ No newline at end of file diff --git a/themes/docura/go.mod b/themes/docura/go.mod deleted file mode 100644 index f47f0a8..0000000 --- a/themes/docura/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/docura/docura - -go 1.20 diff --git a/themes/docura/hugo.yaml b/themes/docura/hugo.yaml deleted file mode 100644 index 44e136e..0000000 --- a/themes/docura/hugo.yaml +++ /dev/null @@ -1,88 +0,0 @@ -theme: docura - -enableGitInfo: true - -markup: - highlight: - noClasses: false - goldmark: - renderer: - unsafe: true - -services: - googleAnalytics: - ID: G-xxxxxxxxxx - -defaultContentLanguage: en -languages: - en: - languageName: English - languageCode: en-US - contentDir: content/en - title: Your Site Name - description: Your Site Description - weight: 1 - -params: - - author: - name: Your Name - url: https://your.site - - themeColors: - light: '#ffffff' - dark: '#121212' - - years: - start: '' - present: 2023 - - social: - github: '' - youtube: '' - facebook: '' - x: '' - - ads: - googleAdSense: '' - - donate: - buyMeACoffee: '' - githubSponsor: '' - - algolia: - en: - container: '#site-header-search' - appId: '' - indexName: '' - apiKey: '' - - home: - - repository: 'your/repo' - -# repositories: -# - name: Website -# repo: org/site -# icon: "" -# description: The repository of the website -# -# - name: Code -# repo: org/project -# icon: "" -# description: The repository of the project - -menu: - main: - - - url: / - name: Home - identifier: home - pre: "" - weight: 1 - - - url: /docs - name: Documentation - identifier: docs - pre: "" - weight: 2 diff --git a/themes/docura/images/screenshot.png b/themes/docura/images/screenshot.png deleted file mode 100644 index 5f2635f..0000000 Binary files a/themes/docura/images/screenshot.png and /dev/null differ diff --git a/themes/docura/images/tn.png b/themes/docura/images/tn.png deleted file mode 100644 index 902d28b..0000000 Binary files a/themes/docura/images/tn.png and /dev/null differ diff --git a/themes/docura/layouts/_default/404.html b/themes/docura/layouts/_default/404.html deleted file mode 100644 index 6fd92f5..0000000 --- a/themes/docura/layouts/_default/404.html +++ /dev/null @@ -1,10 +0,0 @@ -{{ define "main" }} -
    -
    -
    -

    Page Not Found

    -

    Sorry, but the page you were trying to view does not exist.

    -
    -
    -
    -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/_default/baseof.html b/themes/docura/layouts/_default/baseof.html deleted file mode 100644 index 9a951c2..0000000 --- a/themes/docura/layouts/_default/baseof.html +++ /dev/null @@ -1,13 +0,0 @@ - - - - {{ partial "head" . }} - - - - {{ partial "site-header" . }} - {{ block "main" . }}{{ end }} - {{ partial "site-footer" . }} - {{ partial "scripts" . }} - - \ No newline at end of file diff --git a/themes/docura/layouts/_default/home.html b/themes/docura/layouts/_default/home.html deleted file mode 100644 index 054dd33..0000000 --- a/themes/docura/layouts/_default/home.html +++ /dev/null @@ -1,23 +0,0 @@ -{{ define "main" }} -
    -
    -
    -

    {{ .Site.Title }}

    -

    {{ .Site.Params.description }}

    - - {{ with .Site.Params.home.repositories }} - {{ partial "github-repos-grid" . }} - {{ end }} - - - -
    -
    -
    -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/_default/single.html b/themes/docura/layouts/_default/single.html deleted file mode 100644 index be3e3d1..0000000 --- a/themes/docura/layouts/_default/single.html +++ /dev/null @@ -1,21 +0,0 @@ -{{ define "main" }} -
    - {{ $dataLocale := index .Site.Data .Site.Language.Lang }} - {{ if isset $dataLocale .Section }} - {{ partial "sidebar" . }} - {{ end }} -
    - - {{ partial "toc" . }} -
    -
    -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/partials/adsense.html b/themes/docura/layouts/partials/adsense.html deleted file mode 100644 index b8e5597..0000000 --- a/themes/docura/layouts/partials/adsense.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ with .Site.Params.ads.googleAdSense }} - -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/partials/analytics.html b/themes/docura/layouts/partials/analytics.html deleted file mode 100644 index 565cc5e..0000000 --- a/themes/docura/layouts/partials/analytics.html +++ /dev/null @@ -1,8 +0,0 @@ - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/article-footer.html b/themes/docura/layouts/partials/article-footer.html deleted file mode 100644 index a0dbc5f..0000000 --- a/themes/docura/layouts/partials/article-footer.html +++ /dev/null @@ -1,66 +0,0 @@ -{{ $lastMod := .Lastmod.Format "2006-01-02" }} - -{{ $prevPageTitle := "" }} -{{ $nextPageTitle := "" }} -{{ $prevPageHref := "" }} -{{ $nextPageHref := "" }} - -{{ $dataLocale := index .Site.Data .Site.Language.Lang }} -{{ if isset $dataLocale .Section }} - {{ $data := index .Site.Data .Site.Language.Lang .Section "sidebar"}} - {{- $url := split .Permalink "/" -}} - {{- $urlPageSlug := index $url (sub (len $url) 2) -}} - - {{ $isSectionIndex := eq $urlPageSlug .Section }} - {{ $isActivePagePassed := false }} - {{ $isNextPagePassed := false }} - - {{- range $group := $data -}} - {{ if $isNextPagePassed }} - {{break}} - {{ end }} - - {{- range $page := $group.pages -}} - {{- $pageSlug := $page.title | urlize -}} - {{- $isActivePage := or $isSectionIndex (eq $urlPageSlug $pageSlug) -}} - - {{ if $isActivePagePassed }} - {{ $nextPageTitle = $page.title }} - - {{ if eq .Site.Language.Lang .Site.DefaultContentLanguage }} - {{ $nextPageHref = printf "/%s/%s/" $.Section $pageSlug }} - {{ else }} - {{ $nextPageHref = printf "/%s/%s/%s/" $.Site.Language.Lang $.Section $pageSlug }} - {{ end}} - - {{ $isNextPagePassed = true }} - {{break}} - - {{ else if $isActivePage }} - {{ $isActivePagePassed = true }} - - {{ else }} - {{ $prevPageTitle = $page.title }} - - {{ if eq .Site.Language.Lang .Site.DefaultContentLanguage }} - {{ $prevPageHref = printf "/%s/%s/" $.Section $pageSlug }} - {{ else }} - {{ $prevPageHref = printf "/%s/%s/%s/" $.Site.Language.Lang $.Section $pageSlug }} - {{ end}} - - {{ end }} - {{ end}} - {{- end }} -{{ end }} - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/article-header.html b/themes/docura/layouts/partials/article-header.html deleted file mode 100644 index df73bc6..0000000 --- a/themes/docura/layouts/partials/article-header.html +++ /dev/null @@ -1,3 +0,0 @@ -
    -

    {{ .Title | markdownify }}

    -
    \ No newline at end of file diff --git a/themes/docura/layouts/partials/article-nav.html b/themes/docura/layouts/partials/article-nav.html deleted file mode 100644 index a87e7b1..0000000 --- a/themes/docura/layouts/partials/article-nav.html +++ /dev/null @@ -1,4 +0,0 @@ - \ No newline at end of file diff --git a/themes/docura/layouts/partials/buy-me-a-coffee-widget.html b/themes/docura/layouts/partials/buy-me-a-coffee-widget.html deleted file mode 100644 index c06d5a7..0000000 --- a/themes/docura/layouts/partials/buy-me-a-coffee-widget.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ with .Site.Params.donate.buyMeACoffee }} - -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/partials/favicons.html b/themes/docura/layouts/partials/favicons.html deleted file mode 100644 index d0bfbee..0000000 --- a/themes/docura/layouts/partials/favicons.html +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/github-buttons.html b/themes/docura/layouts/partials/github-buttons.html deleted file mode 100644 index 3281f91..0000000 --- a/themes/docura/layouts/partials/github-buttons.html +++ /dev/null @@ -1,5 +0,0 @@ -
    - Star - Fork - -
    diff --git a/themes/docura/layouts/partials/github-repos-grid.html b/themes/docura/layouts/partials/github-repos-grid.html deleted file mode 100644 index 2793b35..0000000 --- a/themes/docura/layouts/partials/github-repos-grid.html +++ /dev/null @@ -1,79 +0,0 @@ - - - -
    - {{ range . }} -
    -
    {{ .icon | safeHTML }}
    -

    {{ .name }}

    -

    {{ .description }}

    - {{ range .repos }} -
    - - - - -
    - - {{ .repo }} - Star - Fork -
    -
    - {{ end }} -
    - {{ end }} -
    - - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/head-meta-theme-colors.html b/themes/docura/layouts/partials/head-meta-theme-colors.html deleted file mode 100644 index 32924d0..0000000 --- a/themes/docura/layouts/partials/head-meta-theme-colors.html +++ /dev/null @@ -1,2 +0,0 @@ - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/head.html b/themes/docura/layouts/partials/head.html deleted file mode 100644 index 1c94138..0000000 --- a/themes/docura/layouts/partials/head.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - - -{{ if .IsHome }}{{ .Site.Title | markdownify }} · {{ .Site.Params.description | markdownify }}{{ else }}{{ .Title | markdownify }} · {{ .Site.Title | markdownify }}{{ end }} - - - -{{ with .Params.robots -}} - -{{- end }} - -{{ partial "stylesheets" . }} -{{ partial "favicons" . }} -{{ partial "head-meta-theme-colors" . }} -{{ partial "analytics" . }} - -{{ if not .IsHome }} - {{ partial "buy-me-a-coffee-widget" . }} - {{ partial "adsense" . }} -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/partials/scripts.html b/themes/docura/layouts/partials/scripts.html deleted file mode 100644 index 36f94c0..0000000 --- a/themes/docura/layouts/partials/scripts.html +++ /dev/null @@ -1,29 +0,0 @@ -{{ $dropdown := resources.Get "js/component/dropdown.js" }} -{{ $colorPreference := resources.Get "js/component/color-preference.js" }} -{{ $articleNav := resources.Get "js/component/article-nav.js" }} -{{ $sidebar := resources.Get "js/component/sidebar.js" }} -{{ $toc := resources.Get "js/component/toc.js" }} -{{ $baseJs := slice $dropdown $colorPreference $articleNav $sidebar $toc | resources.Concat "js/base.js" | minify }} - - - -{{ $algolia := index .Site.Params.algolia .Site.Language.Lang }} -{{ if $algolia }} - {{ $docsearch := resources.Get "js/component/docsearch.min.js" }} - - -{{ end }} - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/sidebar.html b/themes/docura/layouts/partials/sidebar.html deleted file mode 100644 index 9fd89d5..0000000 --- a/themes/docura/layouts/partials/sidebar.html +++ /dev/null @@ -1,45 +0,0 @@ -{{ $data := index .Site.Data .Site.Language.Lang .Section "sidebar" }} - -{{- $url := split .Permalink "/" -}} -{{- $urlPageSlug := index $url (sub (len $url) 2) -}} - - \ No newline at end of file diff --git a/themes/docura/layouts/partials/site-footer.html b/themes/docura/layouts/partials/site-footer.html deleted file mode 100644 index 8f54340..0000000 --- a/themes/docura/layouts/partials/site-footer.html +++ /dev/null @@ -1,57 +0,0 @@ - \ No newline at end of file diff --git a/themes/docura/layouts/partials/site-header.html b/themes/docura/layouts/partials/site-header.html deleted file mode 100644 index 63db9a5..0000000 --- a/themes/docura/layouts/partials/site-header.html +++ /dev/null @@ -1,56 +0,0 @@ - \ No newline at end of file diff --git a/themes/docura/layouts/partials/stylesheets.html b/themes/docura/layouts/partials/stylesheets.html deleted file mode 100644 index d9ad4c3..0000000 --- a/themes/docura/layouts/partials/stylesheets.html +++ /dev/null @@ -1,21 +0,0 @@ -{{ $options := (dict "outputStyle" "compressed" "enableSourceMap" true) }} - -{{ $homeStyle := resources.Get "scss/home.scss" | resources.ToCSS $options }} -{{ $baseStyle := resources.Get "scss/base.scss" | resources.ToCSS $options }} - -{{ if .IsHome }} - - -{{ else }} - -{{ end }} - -{{ $themeStyle := resources.Get "scss/theme/default.scss" | resources.ToCSS $options }} - - -{{ $algolia := index .Site.Params.algolia .Site.Language.Lang }} -{{ if $algolia }} - - {{ $docsearchStyle := resources.Get "scss/component/docsearch.scss" | resources.ToCSS $options }} - -{{ end }} \ No newline at end of file diff --git a/themes/docura/layouts/partials/toc.html b/themes/docura/layouts/partials/toc.html deleted file mode 100644 index 1d729d2..0000000 --- a/themes/docura/layouts/partials/toc.html +++ /dev/null @@ -1,7 +0,0 @@ - \ No newline at end of file diff --git a/themes/docura/static/font/Inter-Italic.woff b/themes/docura/static/font/Inter-Italic.woff deleted file mode 100644 index a806b38..0000000 Binary files a/themes/docura/static/font/Inter-Italic.woff and /dev/null differ diff --git a/themes/docura/static/font/Inter-Italic.woff2 b/themes/docura/static/font/Inter-Italic.woff2 deleted file mode 100644 index a619fc5..0000000 Binary files a/themes/docura/static/font/Inter-Italic.woff2 and /dev/null differ diff --git a/themes/docura/static/font/Inter-Regular.woff b/themes/docura/static/font/Inter-Regular.woff deleted file mode 100644 index 62d3a61..0000000 Binary files a/themes/docura/static/font/Inter-Regular.woff and /dev/null differ diff --git a/themes/docura/static/font/Inter-Regular.woff2 b/themes/docura/static/font/Inter-Regular.woff2 deleted file mode 100644 index 6c2b689..0000000 Binary files a/themes/docura/static/font/Inter-Regular.woff2 and /dev/null differ diff --git a/themes/docura/static/font/Inter-SemiBold.woff b/themes/docura/static/font/Inter-SemiBold.woff deleted file mode 100644 index a815f43..0000000 Binary files a/themes/docura/static/font/Inter-SemiBold.woff and /dev/null differ diff --git a/themes/docura/static/font/Inter-SemiBold.woff2 b/themes/docura/static/font/Inter-SemiBold.woff2 deleted file mode 100644 index 611e90c..0000000 Binary files a/themes/docura/static/font/Inter-SemiBold.woff2 and /dev/null differ diff --git a/themes/docura/static/img/icon/favicon.ico b/themes/docura/static/img/icon/favicon.ico deleted file mode 100644 index 4f47bdc..0000000 Binary files a/themes/docura/static/img/icon/favicon.ico and /dev/null differ diff --git a/themes/docura/static/img/icon/icon-16.png b/themes/docura/static/img/icon/icon-16.png deleted file mode 100644 index c55011a..0000000 Binary files a/themes/docura/static/img/icon/icon-16.png and /dev/null differ diff --git a/themes/docura/static/img/icon/icon-180.png b/themes/docura/static/img/icon/icon-180.png deleted file mode 100644 index 694fd85..0000000 Binary files a/themes/docura/static/img/icon/icon-180.png and /dev/null differ diff --git a/themes/docura/static/img/icon/icon-192.png b/themes/docura/static/img/icon/icon-192.png deleted file mode 100644 index a47b1d8..0000000 Binary files a/themes/docura/static/img/icon/icon-192.png and /dev/null differ diff --git a/themes/docura/static/img/icon/icon-32.png b/themes/docura/static/img/icon/icon-32.png deleted file mode 100644 index afaee33..0000000 Binary files a/themes/docura/static/img/icon/icon-32.png and /dev/null differ diff --git a/themes/docura/static/img/icon/icon-512.png b/themes/docura/static/img/icon/icon-512.png deleted file mode 100644 index d69620b..0000000 Binary files a/themes/docura/static/img/icon/icon-512.png and /dev/null differ diff --git a/themes/docura/static/img/icon/icon-vector.svg b/themes/docura/static/img/icon/icon-vector.svg deleted file mode 100644 index fc8a34c..0000000 --- a/themes/docura/static/img/icon/icon-vector.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/themes/docura/static/img/icon/maskable-icon-192.png b/themes/docura/static/img/icon/maskable-icon-192.png deleted file mode 100644 index c9f099c..0000000 Binary files a/themes/docura/static/img/icon/maskable-icon-192.png and /dev/null differ diff --git a/themes/docura/static/img/icon/maskable-icon-512.png b/themes/docura/static/img/icon/maskable-icon-512.png deleted file mode 100644 index 281dae9..0000000 Binary files a/themes/docura/static/img/icon/maskable-icon-512.png and /dev/null differ diff --git a/themes/docura/static/manifest.json b/themes/docura/static/manifest.json deleted file mode 100644 index 67381e1..0000000 --- a/themes/docura/static/manifest.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "short_name": "Docura", - "name": "Docura", - "description": "A modular Hugo theme to build your next documentation site.", - "start_url": "/?source=pwa", - "display": "standalone", - "icons": [ - { - "src": "/img/icon/icon-192.png", - "type": "image/png", - "sizes": "192x192" - }, - { - "src": "/img/icon/icon-512.png", - "type": "image/png", - "sizes": "512x512" - }, - { - "src": "/img/icon/maskable-icon-192.png", - "type": "image/png", - "sizes": "192x192", - "purpose": "maskable" - }, - { - "src": "/img/icon/maskable-icon-512.png", - "type": "image/png", - "sizes": "512x512", - "purpose": "maskable" - }, - { - "src": "/img/icon/icon-vector.svg", - "type": "image/svg+xml", - "sizes": "512x512" - } - ], - "background_color": "#ffffff", - "theme_color": "#ffffff" -} \ No newline at end of file diff --git a/themes/docura/static/sw.js b/themes/docura/static/sw.js deleted file mode 100644 index e13c4c6..0000000 --- a/themes/docura/static/sw.js +++ /dev/null @@ -1,61 +0,0 @@ -const cacheName = 'docura-{{ now.Format "2006-01-02" }}'; -const staticAssets = [ - './', - './index.html', - './manifest.json', - './docs/**/*', - './font/*', - './img/icon/favicon.ico', - './img/icon/icon-16.png', - './img/icon/icon-32.png', - './img/icon/icon-180.png', - './img/icon/icon-192.png', - './img/icon/icon-512.png', - './img/icon/icon-vector.svg', - './img/icon/maskable-icon-192.png', - './img/icon/maskable-icon-512.png', - './js/base.min.js', - './js/component/docsearch.min.js', - './scss/base.css', - './scss/component/docsearch.css', - './scss/home.css', -]; - -self.addEventListener('install', async e => { - const cache = await caches.open(cacheName); - await cache.addAll(staticAssets); - return self.skipWaiting(); -}); - -self.addEventListener('activate', e => { - self.clients.claim(); -}); - -self.addEventListener('fetch', async e => { - const req = e.request; - const url = new URL(req.url); - - if (url.origin === location.origin) { - e.respondWith(cacheFirst(req)); - } else { - e.respondWith(networkFirst(req)); - } -}); - -async function cacheFirst(req) { - const cache = await caches.open(cacheName); - const cached = await cache.match(req); - return cached || fetch(req); -} - -async function networkFirst(req) { - const cache = await caches.open(cacheName); - try { - const fresh = await fetch(req); - cache.put(req, fresh.clone()); - return fresh; - } catch (e) { - const cached = await cache.match(req); - return cached; - } -} \ No newline at end of file diff --git a/themes/docura/theme.toml b/themes/docura/theme.toml deleted file mode 100644 index d3dafce..0000000 --- a/themes/docura/theme.toml +++ /dev/null @@ -1,13 +0,0 @@ -name = "Docura" -license = "MIT" -licenselink = "https://github.com/docura/docura/blob/main/LICENSE" -description = "A modular Hugo theme for your next documentation site" -homepage = "https://docura.github.io" -demosite = "https://docura.github.io" -tags = ["documentation", "responsive", "dark mode", "light mode", "modern", "clean", "customizable"] -features = ["responsive", "customizable", "dark mode", "light mode", "night mode", "search", "syntax highlighting", "multilingual", "seo", "docsearch"] -min_version = "0.41.0" - -[author] - name = "Dumindu Madunuwan" - homepage = "https://github.com/dumindu" diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..417e7db --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,13 @@ +{ + "compilerOptions": { + "strict": true, + "skipLibCheck": true, + "esModuleInterop": true, + "jsx": "react", + "baseUrl": "./", + "paths": { + "@@/*": [".dumi/tmp/*"] + } + }, + "include": [".dumirc.ts"] +}