m.removeEventListener("scroll",f)}},[r.viewport,r.isPositioned]),l?o.jsx(X2,{...e,ref:d,onAutoScroll:()=>{const{viewport:f,selectedItem:m}=r;f&&m&&(f.scrollTop=f.scrollTop+m.offsetHeight)}}):null});G2.displayName=ip;var X2=w.forwardRef((e,n)=>{const{__scopeSelect:r,onAutoScroll:a,...l}=e,c=Ur("SelectScrollButton",r),d=w.useRef(null),f=Md(r),m=w.useCallback(()=>{d.current!==null&&(window.clearInterval(d.current),d.current=null)},[]);return w.useEffect(()=>()=>m(),[m]),Wt(()=>{f().find(g=>g.ref.current===document.activeElement)?.ref.current?.scrollIntoView({block:"nearest"})},[f]),o.jsx(Ye.div,{"aria-hidden":!0,...l,ref:n,style:{flexShrink:0,...l.style},onPointerDown:ke(l.onPointerDown,()=>{d.current===null&&(d.current=window.setInterval(a,50))}),onPointerMove:ke(l.onPointerMove,()=>{c.onItemLeave?.(),d.current===null&&(d.current=window.setInterval(a,50))}),onPointerLeave:ke(l.onPointerLeave,()=>{m()})})}),qD="SelectSeparator",FD=w.forwardRef((e,n)=>{const{__scopeSelect:r,...a}=e;return o.jsx(Ye.div,{"aria-hidden":!0,...a,ref:n})});FD.displayName=qD;var lp="SelectArrow",YD=w.forwardRef((e,n)=>{const{__scopeSelect:r,...a}=e,l=Rd(r),c=Hr(lp,r),d=Ur(lp,r);return c.open&&d.position==="popper"?o.jsx(Vp,{...l,...a,ref:n}):null});YD.displayName=lp;var GD="SelectBubbleInput",Z2=w.forwardRef(({__scopeSelect:e,value:n,...r},a)=>{const l=w.useRef(null),c=rt(a,l),d=fg(n);return w.useEffect(()=>{const f=l.current;if(!f)return;const m=window.HTMLSelectElement.prototype,g=Object.getOwnPropertyDescriptor(m,"value").set;if(d!==n&&g){const x=new Event("change",{bubbles:!0});g.call(f,n),f.dispatchEvent(x)}},[d,n]),o.jsx(Ye.select,{...r,style:{...GN,...r.style},ref:c,defaultValue:n})});Z2.displayName=GD;function W2(e){return e===""||e===void 0}function K2(e){const n=Zt(e),r=w.useRef(""),a=w.useRef(0),l=w.useCallback(d=>{const f=r.current+d;n(f),(function m(h){r.current=h,window.clearTimeout(a.current),h!==""&&(a.current=window.setTimeout(()=>m(""),1e3))})(f)},[n]),c=w.useCallback(()=>{r.current="",window.clearTimeout(a.current)},[]);return w.useEffect(()=>()=>window.clearTimeout(a.current),[]),[r,l,c]}function Q2(e,n,r){const l=n.length>1&&Array.from(n).every(h=>h===n[0])?n[0]:n,c=r?e.indexOf(r):-1;let d=XD(e,Math.max(c,0));l.length===1&&(d=d.filter(h=>h!==r));const m=d.find(h=>h.textValue.toLowerCase().startsWith(l.toLowerCase()));return m!==r?m:void 0}function XD(e,n){return e.map((r,a)=>e[(n+a)%e.length])}var ZD=C2,WD=T2,KD=M2,QD=R2,JD=D2,e6=O2,t6=$2,n6=B2,s6=V2,r6=F2,o6=Y2,a6=G2;function vg({...e}){return o.jsx(ZD,{"data-slot":"select",...e})}function bg({...e}){return o.jsx(KD,{"data-slot":"select-value",...e})}function wg({className:e,size:n="default",children:r,...a}){return o.jsxs(WD,{"data-slot":"select-trigger","data-size":n,className:We("border-input data-[placeholder]:text-muted-foreground [&_svg:not([class*='text-'])]:text-muted-foreground focus-visible:border-ring focus-visible:ring-ring/50 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive dark:bg-input/30 dark:hover:bg-input/50 flex w-fit items-center justify-between gap-2 rounded-md border bg-transparent px-3 py-2 text-sm whitespace-nowrap shadow-xs transition-[color,box-shadow] outline-none focus-visible:ring-[3px] disabled:cursor-not-allowed disabled:opacity-50 data-[size=default]:h-9 data-[size=sm]:h-8 *:data-[slot=select-value]:line-clamp-1 *:data-[slot=select-value]:flex *:data-[slot=select-value]:items-center *:data-[slot=select-value]:gap-2 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",e),...a,children:[r,o.jsx(QD,{asChild:!0,children:o.jsx(Rt,{className:"size-4 opacity-50"})})]})}function Ng({className:e,children:n,position:r="popper",...a}){return o.jsx(JD,{children:o.jsxs(e6,{"data-slot":"select-content",className:We("bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 relative z-50 max-h-(--radix-select-content-available-height) min-w-[8rem] origin-(--radix-select-content-transform-origin) overflow-x-hidden overflow-y-auto rounded-md border shadow-md",r==="popper"&&"data-[side=bottom]:translate-y-1 data-[side=left]:-translate-x-1 data-[side=right]:translate-x-1 data-[side=top]:-translate-y-1",e),position:r,...a,children:[o.jsx(i6,{}),o.jsx(t6,{className:We("p-1",r==="popper"&&"h-[var(--radix-select-trigger-height)] w-full min-w-[var(--radix-select-trigger-width)] scroll-my-1"),children:n}),o.jsx(l6,{})]})})}function jg({className:e,children:n,...r}){return o.jsxs(n6,{"data-slot":"select-item",className:We("focus:bg-accent focus:text-accent-foreground [&_svg:not([class*='text-'])]:text-muted-foreground relative flex w-full cursor-default items-center gap-2 rounded-sm py-1.5 pr-8 pl-2 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4 *:[span]:last:flex *:[span]:last:items-center *:[span]:last:gap-2",e),...r,children:[o.jsx("span",{className:"absolute right-2 flex size-3.5 items-center justify-center",children:o.jsx(r6,{children:o.jsx(jo,{className:"size-4"})})}),o.jsx(s6,{children:n})]})}function i6({className:e,...n}){return o.jsx(o6,{"data-slot":"select-scroll-up-button",className:We("flex cursor-default items-center justify-center py-1",e),...n,children:o.jsx(rN,{className:"size-4"})})}function l6({className:e,...n}){return o.jsx(a6,{"data-slot":"select-scroll-down-button",className:We("flex cursor-default items-center justify-center py-1",e),...n,children:o.jsx(Rt,{className:"size-4"})})}function io({title:e,icon:n,children:r,className:a=""}){return o.jsxs("div",{className:`border rounded-lg p-4 bg-card ${a}`,children:[o.jsxs("div",{className:"flex items-center gap-2 mb-3",children:[n,o.jsx("h3",{className:"text-sm font-semibold text-foreground",children:e})]}),o.jsx("div",{className:"text-sm text-muted-foreground",children:r})]})}function c6({agent:e,open:n,onOpenChange:r}){const a=e.source==="directory"?o.jsx(aN,{className:"h-4 w-4 text-muted-foreground"}):e.source==="in_memory"?o.jsx(Kh,{className:"h-4 w-4 text-muted-foreground"}):o.jsx(iN,{className:"h-4 w-4 text-muted-foreground"}),l=e.source==="directory"?"Local":e.source==="in_memory"?"In-Memory":"Gallery";return o.jsx(Ir,{open:n,onOpenChange:r,children:o.jsxs(Lr,{className:"max-w-4xl max-h-[90vh] flex flex-col",children:[o.jsxs($r,{className:"px-6 pt-6 flex-shrink-0",children:[o.jsx(Pr,{children:"Agent Details"}),o.jsx(So,{onClose:()=>r(!1)})]}),o.jsxs("div",{className:"px-6 pb-6 overflow-y-auto flex-1",children:[o.jsxs("div",{className:"mb-6",children:[o.jsxs("div",{className:"flex items-center gap-3 mb-2",children:[o.jsx(Vs,{className:"h-6 w-6 text-primary"}),o.jsx("h2",{className:"text-xl font-semibold text-foreground",children:e.name||e.id})]}),e.description&&o.jsx("p",{className:"text-muted-foreground",children:e.description})]}),o.jsx("div",{className:"h-px bg-border mb-6"}),o.jsxs("div",{className:"grid grid-cols-1 md:grid-cols-2 gap-4 mb-4",children:[(e.model_id||e.chat_client_type)&&o.jsx(io,{title:"Model & Client",icon:o.jsx(Vs,{className:"h-4 w-4 text-muted-foreground"}),children:o.jsxs("div",{className:"space-y-1",children:[e.model_id&&o.jsx("div",{className:"font-mono text-foreground",children:e.model_id}),e.chat_client_type&&o.jsxs("div",{className:"text-xs",children:["(",e.chat_client_type,")"]})]})}),o.jsx(io,{title:"Source",icon:a,children:o.jsxs("div",{className:"space-y-1",children:[o.jsx("div",{className:"text-foreground",children:l}),e.module_path&&o.jsx("div",{className:"font-mono text-xs break-all",children:e.module_path})]})}),o.jsx(io,{title:"Environment",icon:e.has_env?o.jsx(kl,{className:"h-4 w-4 text-orange-500"}):o.jsx(yd,{className:"h-4 w-4 text-green-500"}),className:"md:col-span-2",children:o.jsx("div",{className:e.has_env?"text-orange-600 dark:text-orange-400":"text-green-600 dark:text-green-400",children:e.has_env?"Requires environment variables":"No environment variables required"})})]}),e.instructions&&o.jsx(io,{title:"Instructions",icon:o.jsx(qs,{className:"h-4 w-4 text-muted-foreground"}),className:"mb-4",children:o.jsx("div",{className:"text-sm text-foreground leading-relaxed whitespace-pre-wrap",children:e.instructions})}),o.jsxs("div",{className:"grid grid-cols-1 md:grid-cols-2 gap-4",children:[e.tools&&e.tools.length>0&&o.jsx(io,{title:`Tools (${e.tools.length})`,icon:o.jsx(Uu,{className:"h-4 w-4 text-muted-foreground"}),children:o.jsx("ul",{className:"space-y-1",children:e.tools.map((c,d)=>o.jsxs("li",{className:"font-mono text-xs text-foreground",children:["• ",c]},d))})}),e.middleware&&e.middleware.length>0&&o.jsx(io,{title:`Middleware (${e.middleware.length})`,icon:o.jsx(Uu,{className:"h-4 w-4 text-muted-foreground"}),children:o.jsx("ul",{className:"space-y-1",children:e.middleware.map((c,d)=>o.jsxs("li",{className:"font-mono text-xs text-foreground",children:["• ",c]},d))})}),e.context_providers&&e.context_providers.length>0&&o.jsx(io,{title:`Context Providers (${e.context_providers.length})`,icon:o.jsx(Kh,{className:"h-4 w-4 text-muted-foreground"}),className:!e.middleware||e.middleware.length===0?"md:col-start-2":"",children:o.jsx("ul",{className:"space-y-1",children:e.context_providers.map((c,d)=>o.jsxs("li",{className:"font-mono text-xs text-foreground",children:["• ",c]},d))})})]})]})]})})}function u6({item:e,toolCalls:n=[],toolResults:r=[]}){const[a,l]=w.useState(!1),[c,d]=w.useState(!1),[f,m]=w.useState(!1),h=le(y=>y.showToolCalls),g=()=>e.type==="message"?e.content.filter(y=>y.type==="text").map(y=>y.text).join(`
+`), language: h
+}, a.length)); continue
+ } const d = c.match(/^(#{1,6})\s+(.+)$/); if (d) { const f = d[1].length, m = d[2], g = `${["text-2xl", "text-xl", "text-lg", "text-base", "text-sm", "text-sm"][f - 1]} font-semibold mt-4 mb-2 first:mt-0 break-words`, x = f === 1 ? o.jsx("h1", { className: g, children: wn(m) }, a.length) : f === 2 ? o.jsx("h2", { className: g, children: wn(m) }, a.length) : f === 3 ? o.jsx("h3", { className: g, children: wn(m) }, a.length) : f === 4 ? o.jsx("h4", { className: g, children: wn(m) }, a.length) : f === 5 ? o.jsx("h5", { className: g, children: wn(m) }, a.length) : o.jsx("h6", { className: g, children: wn(m) }, a.length); a.push(x), l++; continue } if (c.match(/^[\s]*[-*+]\s+/)) { const f = []; for (; l < r.length && r[l].match(/^[\s]*[-*+]\s+/);) { const m = r[l].replace(/^[\s]*[-*+]\s+/, ""); f.push(m), l++ } a.push(o.jsx("ul", { className: "my-2 ml-4 list-disc space-y-1 break-words", children: f.map((m, h) => o.jsx("li", { className: "text-sm break-words", children: wn(m) }, h)) }, a.length)); continue } if (c.match(/^[\s]*\d+\.\s+/)) { const f = []; for (; l < r.length && r[l].match(/^[\s]*\d+\.\s+/);) { const m = r[l].replace(/^[\s]*\d+\.\s+/, ""); f.push(m), l++ } a.push(o.jsx("ol", { className: "my-2 ml-4 list-decimal space-y-1 break-words", children: f.map((m, h) => o.jsx("li", { className: "text-sm break-words", children: wn(m) }, h)) }, a.length)); continue } if (c.trim().startsWith("|") && c.trim().endsWith("|")) { const f = []; for (; l < r.length && r[l].trim().startsWith("|") && r[l].trim().endsWith("|");)f.push(r[l].trim()), l++; if (f.length >= 2) { const m = f[0].split("|").slice(1, -1).map(g => g.trim()); if (f[1].match(/^\|[\s\-:|]+\|$/)) { const g = f.slice(2).map(x => x.split("|").slice(1, -1).map(y => y.trim())); a.push(o.jsx("div", { className: "my-3 overflow-x-auto", children: o.jsxs("table", { className: "min-w-full border border-foreground/10 text-sm", children: [o.jsx("thead", { className: "bg-foreground/5", children: o.jsx("tr", { children: m.map((x, y) => o.jsx("th", { className: "border-b border-foreground/10 px-3 py-2 text-left font-semibold break-words", children: wn(x) }, y)) }) }), o.jsx("tbody", { children: g.map((x, y) => o.jsx("tr", { className: "border-b border-foreground/5 last:border-b-0", children: x.map((b, j) => o.jsx("td", { className: "px-3 py-2 border-r border-foreground/5 last:border-r-0 break-words", children: wn(b) }, j)) }, y)) })] }) }, a.length)); continue } } for (const m of f) a.push(o.jsx("p", { className: "my-1", children: wn(m) }, a.length)); continue } if (c.trim().startsWith(">")) { const f = []; for (; l < r.length && r[l].trim().startsWith(">");)f.push(r[l].replace(/^>\s?/, "")), l++; a.push(o.jsx("blockquote", { className: "my-2 pl-4 border-l-4 border-current/30 opacity-80 italic break-words", children: f.map((m, h) => o.jsx("div", { className: "break-words", children: wn(m) }, h)) }, a.length)); continue } if (c.match(/^[\s]*[-*_]{3,}[\s]*$/)) { a.push(o.jsx("hr", { className: "my-4 border-t border-border" }, a.length)), l++; continue } if (c.trim() === "") { a.push(o.jsx("div", { className: "h-2" }, a.length)), l++; continue } a.push(o.jsx("p", { className: "my-1 break-words", children: wn(c) }, a.length)), l++
+ } return o.jsx("div", { className: `markdown-content break-words ${n}`, children: a })
+} function wn(e) { const n = []; let r = e, a = 0; for (; r.length > 0;) { const l = r.match(/`([^`]+)`/); if (l && l.index !== void 0) { l.index > 0 && n.push(o.jsx("span", { children: nl(r.slice(0, l.index)) }, a++)), n.push(o.jsx("code", { className: "px-1.5 py-0.5 bg-foreground/10 rounded text-xs font-mono border border-foreground/20", children: l[1] }, a++)), r = r.slice(l.index + l[0].length); continue } n.push(o.jsx("span", { children: nl(r) }, a++)); break } return n } function nl(e) { const n = []; let r = e, a = 0; for (; r.length > 0;) { const l = [{ regex: /\*\*\[([^\]]+)\]\(([^)]+)\)\*\*/, component: "strong-link" }, { regex: /__\[([^\]]+)\]\(([^)]+)\)__/, component: "strong-link" }, { regex: /\*\[([^\]]+)\]\(([^)]+)\)\*/, component: "em-link" }, { regex: /_\[([^\]]+)\]\(([^)]+)\)_/, component: "em-link" }, { regex: /\[([^\]]+)\]\(([^)]+)\)/, component: "link" }, { regex: /\*\*(.+?)\*\*/, component: "strong" }, { regex: /__(.+?)__/, component: "strong" }, { regex: /\*(.+?)\*/, component: "em" }, { regex: /_(.+?)_/, component: "em" }]; let c = !1; for (const d of l) { const f = r.match(d.regex); if (f && f.index !== void 0) { if (f.index > 0 && n.push(r.slice(0, f.index)), d.component === "strong") n.push(o.jsx("strong", { className: "font-semibold", children: f[1] }, a++)); else if (d.component === "em") n.push(o.jsx("em", { className: "italic", children: f[1] }, a++)); else if (d.component === "strong-link") { const m = f[1], h = f[2], g = nl(m); n.push(o.jsx("strong", { className: "font-semibold", children: o.jsx("a", { href: h, target: "_blank", rel: "noopener noreferrer", className: "text-primary hover:underline break-words", children: g }) }, a++)) } else if (d.component === "em-link") { const m = f[1], h = f[2], g = nl(m); n.push(o.jsx("em", { className: "italic", children: o.jsx("a", { href: h, target: "_blank", rel: "noopener noreferrer", className: "text-primary hover:underline break-words", children: g }) }, a++)) } else if (d.component === "link") { const m = f[1], h = f[2], g = nl(m); n.push(o.jsx("a", { href: h, target: "_blank", rel: "noopener noreferrer", className: "text-primary hover:underline break-words", children: g }, a++)) } r = r.slice(f.index + f[0].length), c = !0; break } } if (!c) { r.length > 0 && n.push(r); break } } return n } function gD({ content: e, className: n, isStreaming: r }) { if (e.type !== "text" && e.type !== "input_text" && e.type !== "output_text") return null; const a = e.text; return o.jsxs("div", { className: `break-words ${n || ""}`, children: [o.jsx(pD, { content: a }), r && a.length > 0 && o.jsx("span", { className: "ml-1 inline-block h-2 w-2 animate-pulse rounded-full bg-current" })] }) } function xD({ content: e, className: n }) { const [r, a] = w.useState(!1), [l, c] = w.useState(!1); if (e.type !== "input_image" && e.type !== "output_image") return null; const d = e.image_url; return r ? o.jsx("div", { className: `my-2 p-3 border rounded-lg bg-muted ${n || ""}`, children: o.jsxs("div", { className: "flex items-center gap-2 text-sm text-muted-foreground", children: [o.jsx(qs, { className: "h-4 w-4" }), o.jsx("span", { children: "Image could not be loaded" })] }) }) : o.jsxs("div", { className: `my-2 ${n || ""}`, children: [o.jsx("img", { src: d, alt: "Uploaded image", className: `rounded-lg border max-w-full transition-all cursor-pointer ${l ? "max-h-none" : "max-h-64"}`, onClick: () => c(!l), onError: () => a(!0) }), l && o.jsx("div", { className: "text-xs text-muted-foreground mt-1", children: "Click to collapse" })] }) } function yD(e, n) { const [r, a] = w.useState(null); return w.useEffect(() => { if (!e) { a(null); return } try { let l; if (e.startsWith("data:")) { const h = e.split(","); if (h.length !== 2) { a(null); return } l = h[1] } else l = e; const c = atob(l), d = new Uint8Array(c.length); for (let h = 0; h < c.length; h++)d[h] = c.charCodeAt(h); const f = new Blob([d], { type: n }), m = URL.createObjectURL(f); return a(m), () => { URL.revokeObjectURL(m) } } catch (l) { console.error("Failed to convert base64 to blob URL:", l), a(null) } }, [e, n]), r } function vD({ content: e, className: n }) { const [r, a] = w.useState(!0), l = e.type === "input_file" || e.type === "output_file", c = l ? e.file_url || e.file_data : void 0, d = l ? e.filename || "file" : void 0, f = d?.toLowerCase().endsWith(".pdf") || c?.includes("application/pdf"), m = d?.toLowerCase().match(/\.(mp3|wav|m4a|ogg|flac|aac)$/), h = l && f ? e.file_data || e.file_url : void 0, g = yD(h, "application/pdf"); if (!l) return null; const x = g || c, y = () => { x && window.open(x, "_blank") }; return f && c ? o.jsxs("div", { className: `my-2 ${n || ""}`, children: [o.jsxs("div", { className: "flex items-center gap-2 mb-2 px-1", children: [o.jsx(qs, { className: "h-4 w-4 text-red-500" }), o.jsx("span", { className: "text-sm font-medium truncate flex-1", children: d }), o.jsx("button", { onClick: () => a(!r), className: "text-xs text-muted-foreground hover:text-foreground flex items-center gap-1", children: r ? o.jsxs(o.Fragment, { children: [o.jsx(Rt, { className: "h-3 w-3" }), "Collapse"] }) : o.jsxs(o.Fragment, { children: [o.jsx(en, { className: "h-3 w-3" }), "Expand"] }) })] }), r && o.jsxs("div", { className: "border rounded-lg p-6 bg-muted/50 flex flex-col items-center justify-center gap-4", children: [o.jsx(qs, { className: "h-16 w-16 text-red-400" }), o.jsxs("div", { className: "text-center", children: [o.jsx("p", { className: "text-sm font-medium mb-1", children: d }), o.jsx("p", { className: "text-xs text-muted-foreground", children: "PDF Document" })] }), o.jsxs("div", { className: "flex gap-3", children: [o.jsx("button", { onClick: y, className: "text-sm bg-primary text-primary-foreground hover:bg-primary/90 flex items-center gap-2 px-4 py-2 rounded-md transition-colors", children: "Open in new tab" }), o.jsxs("a", { href: x || c, download: d, className: "text-sm text-foreground hover:bg-accent flex items-center gap-2 px-4 py-2 border rounded-md transition-colors", children: [o.jsx(Pu, { className: "h-4 w-4" }), "Download"] })] })] })] }) : m && c ? o.jsxs("div", { className: `my-2 p-3 border rounded-lg ${n || ""}`, children: [o.jsxs("div", { className: "flex items-center gap-2 mb-2", children: [o.jsx(lN, { className: "h-4 w-4 text-muted-foreground" }), o.jsx("span", { className: "text-sm font-medium", children: d })] }), o.jsxs("audio", { controls: !0, className: "w-full", children: [o.jsx("source", { src: c }), "Your browser does not support audio playback."] })] }) : o.jsx("div", { className: `my-2 p-3 border rounded-lg bg-muted ${n || ""}`, children: o.jsxs("div", { className: "flex items-center justify-between", children: [o.jsxs("div", { className: "flex items-center gap-2", children: [o.jsx(qs, { className: "h-4 w-4 text-muted-foreground" }), o.jsx("span", { className: "text-sm", children: d })] }), c && o.jsxs("a", { href: c, download: d, className: "text-xs text-primary hover:underline flex items-center gap-1", children: [o.jsx(Pu, { className: "h-3 w-3" }), "Download"] })] }) }) } function bD({ content: e, className: n }) { const [r, a] = w.useState(!1); if (e.type !== "output_data") return null; const l = e.data, c = e.mime_type, d = e.description; let f = l; try { const m = JSON.parse(l); f = JSON.stringify(m, null, 2) } catch { } return o.jsxs("div", { className: `my-2 p-3 border rounded-lg bg-muted ${n || ""}`, children: [o.jsxs("div", { className: "flex items-center gap-2 cursor-pointer", onClick: () => a(!r), children: [o.jsx(qs, { className: "h-4 w-4 text-muted-foreground" }), o.jsx("span", { className: "text-sm font-medium", children: d || "Data Output" }), o.jsx("span", { className: "text-xs text-muted-foreground ml-auto", children: c }), r ? o.jsx(Rt, { className: "h-4 w-4 text-muted-foreground" }) : o.jsx(en, { className: "h-4 w-4 text-muted-foreground" })] }), r && o.jsx("pre", { className: "mt-2 text-xs overflow-auto max-h-64 bg-background p-2 rounded border font-mono", children: f })] }) } function wD({ content: e, className: n }) { const [r, a] = w.useState(!1); if (e.type !== "function_approval_request") return null; const { status: l, function_call: c } = e, f = { pending: { icon: Jp, label: "Awaiting approval", iconClass: "text-amber-600 dark:text-amber-400" }, approved: { icon: jo, label: "Approved", iconClass: "text-green-600 dark:text-green-400" }, rejected: { icon: Ea, label: "Rejected", iconClass: "text-red-600 dark:text-red-400" } }[l], m = f.icon; let h; try { h = typeof c.arguments == "string" ? JSON.parse(c.arguments) : c.arguments } catch { h = c.arguments } return o.jsxs("div", { className: n, children: [o.jsxs("button", { onClick: () => a(!r), className: "flex items-center gap-2 px-2 py-1 text-xs rounded hover:bg-muted/50 transition-colors w-fit", children: [o.jsx(m, { className: `h-3 w-3 ${f.iconClass}` }), o.jsx("span", { className: "text-muted-foreground font-mono", children: c.name }), o.jsx("span", { className: `text-xs ${f.iconClass}`, children: f.label }), r ? o.jsx("span", { className: "text-xs text-muted-foreground", children: "▼" }) : o.jsx("span", { className: "text-xs text-muted-foreground", children: "▶" })] }), r && o.jsx("div", { className: "ml-5 mt-1 text-xs font-mono text-muted-foreground border-l-2 border-muted pl-3", children: o.jsx("pre", { className: "whitespace-pre-wrap break-all", children: JSON.stringify(h, null, 2) }) })] }) } function ND({ content: e, className: n, isStreaming: r }) { switch (e.type) { case "text": case "input_text": case "output_text": return o.jsx(gD, { content: e, className: n, isStreaming: r }); case "input_image": case "output_image": return o.jsx(xD, { content: e, className: n }); case "input_file": case "output_file": return o.jsx(vD, { content: e, className: n }); case "output_data": return o.jsx(bD, { content: e, className: n }); case "function_approval_request": return o.jsx(wD, { content: e, className: n }); default: return null } } function jD({ name: e, arguments: n, className: r }) { const [a, l] = w.useState(!1); let c; try { c = typeof n == "string" ? JSON.parse(n) : n } catch { c = n } return o.jsxs("div", { className: `my-2 p-3 border rounded bg-blue-50 dark:bg-blue-950/20 ${r || ""}`, children: [o.jsxs("div", { className: "flex items-center gap-2 cursor-pointer", onClick: () => l(!a), children: [o.jsx(oN, { className: "h-4 w-4 text-blue-600 dark:text-blue-400" }), o.jsxs("span", { className: "text-sm font-medium text-blue-800 dark:text-blue-300", children: ["Function Call: ", e] }), a ? o.jsx(Rt, { className: "h-4 w-4 text-blue-600 dark:text-blue-400 ml-auto" }) : o.jsx(en, { className: "h-4 w-4 text-blue-600 dark:text-blue-400 ml-auto" })] }), a && o.jsxs("div", { className: "mt-2 text-xs font-mono bg-white dark:bg-gray-900 p-2 rounded border", children: [o.jsx("div", { className: "text-blue-600 dark:text-blue-400 mb-1", children: "Arguments:" }), o.jsx("pre", { className: "whitespace-pre-wrap", children: JSON.stringify(c, null, 2) })] })] }) } function SD({ output: e, call_id: n, className: r }) { const [a, l] = w.useState(!1); let c; try { c = typeof e == "string" ? JSON.parse(e) : e } catch { c = e } return o.jsxs("div", { className: `my-2 p-3 border rounded bg-green-50 dark:bg-green-950/20 ${r || ""}`, children: [o.jsxs("div", { className: "flex items-center gap-2 cursor-pointer", onClick: () => l(!a), children: [o.jsx(oN, { className: "h-4 w-4 text-green-600 dark:text-green-400" }), o.jsx("span", { className: "text-sm font-medium text-green-800 dark:text-green-300", children: "Function Result" }), a ? o.jsx(Rt, { className: "h-4 w-4 text-green-600 dark:text-green-400 ml-auto" }) : o.jsx(en, { className: "h-4 w-4 text-green-600 dark:text-green-400 ml-auto" })] }), a && o.jsxs("div", { className: "mt-2 text-xs font-mono bg-white dark:bg-gray-900 p-2 rounded border", children: [o.jsx("div", { className: "text-green-600 dark:text-green-400 mb-1", children: "Output:" }), o.jsx("pre", { className: "whitespace-pre-wrap", children: JSON.stringify(c, null, 2) }), o.jsxs("div", { className: "text-gray-500 text-[10px] mt-2", children: ["Call ID: ", n] })] })] }) } function _D({ item: e, className: n }) { if (e.type === "message") { const r = e.status === "in_progress", a = e.content.length > 0; return o.jsxs("div", { className: n, children: [e.content.map((l, c) => o.jsx(ND, { content: l, className: c > 0 ? "mt-2" : "", isStreaming: r }, c)), r && !a && o.jsx("div", { className: "flex items-center space-x-1", children: o.jsxs("div", { className: "flex space-x-1", children: [o.jsx("div", { className: "h-2 w-2 animate-bounce rounded-full bg-current [animation-delay:-0.3s]" }), o.jsx("div", { className: "h-2 w-2 animate-bounce rounded-full bg-current [animation-delay:-0.15s]" }), o.jsx("div", { className: "h-2 w-2 animate-bounce rounded-full bg-current" })] }) })] }) } return e.type === "function_call" ? o.jsx(jD, { name: e.name, arguments: e.arguments, className: n }) : e.type === "function_call_output" ? o.jsx(SD, { output: e.output, call_id: e.call_id, className: n }) : null } var ED = [" ", "Enter", "ArrowUp", "ArrowDown"], CD = [" ", "Enter"], go = "Select", [Ad, Md, kD] = Tp(go), [Ba, t$] = Kn(go, [kD, Ua]), Rd = Ua(), [TD, Hr] = Ba(go), [AD, MD] = Ba(go), C2 = e => { const { __scopeSelect: n, children: r, open: a, defaultOpen: l, onOpenChange: c, value: d, defaultValue: f, onValueChange: m, dir: h, name: g, autoComplete: x, disabled: y, required: b, form: j } = e, N = Rd(n), [S, _] = w.useState(null), [A, E] = w.useState(null), [M, T] = w.useState(!1), D = jl(h), [z, H] = Ar({ prop: a, defaultProp: l ?? !1, onChange: c, caller: go }), [q, X] = Ar({ prop: d, defaultProp: f, onChange: m, caller: go }), W = w.useRef(null), G = S ? j || !!S.closest("form") : !0, [ne, B] = w.useState(new Set), U = Array.from(ne).map(R => R.props.value).join(";"); return o.jsx(Hp, { ...N, children: o.jsxs(TD, { required: b, scope: n, trigger: S, onTriggerChange: _, valueNode: A, onValueNodeChange: E, valueNodeHasChildren: M, onValueNodeHasChildrenChange: T, contentId: Mr(), value: q, onValueChange: X, open: z, onOpenChange: H, dir: D, triggerPointerDownPosRef: W, disabled: y, children: [o.jsx(Ad.Provider, { scope: n, children: o.jsx(AD, { scope: e.__scopeSelect, onNativeOptionAdd: w.useCallback(R => { B(L => new Set(L).add(R)) }, []), onNativeOptionRemove: w.useCallback(R => { B(L => { const I = new Set(L); return I.delete(R), I }) }, []), children: r }) }), G ? o.jsxs(Z2, { "aria-hidden": !0, required: b, tabIndex: -1, name: g, autoComplete: x, value: q, onChange: R => X(R.target.value), disabled: y, form: j, children: [q === void 0 ? o.jsx("option", { value: "" }) : null, Array.from(ne)] }, U) : null] }) }) }; C2.displayName = go; var k2 = "SelectTrigger", T2 = w.forwardRef((e, n) => { const { __scopeSelect: r, disabled: a = !1, ...l } = e, c = Rd(r), d = Hr(k2, r), f = d.disabled || a, m = rt(n, d.onTriggerChange), h = Md(r), g = w.useRef("touch"), [x, y, b] = K2(N => { const S = h().filter(E => !E.disabled), _ = S.find(E => E.value === d.value), A = Q2(S, N, _); A !== void 0 && d.onValueChange(A.value) }), j = N => { f || (d.onOpenChange(!0), b()), N && (d.triggerPointerDownPosRef.current = { x: Math.round(N.pageX), y: Math.round(N.pageY) }) }; return o.jsx(Up, { asChild: !0, ...c, children: o.jsx(Ye.button, { type: "button", role: "combobox", "aria-controls": d.contentId, "aria-expanded": d.open, "aria-required": d.required, "aria-autocomplete": "none", dir: d.dir, "data-state": d.open ? "open" : "closed", disabled: f, "data-disabled": f ? "" : void 0, "data-placeholder": W2(d.value) ? "" : void 0, ...l, ref: m, onClick: ke(l.onClick, N => { N.currentTarget.focus(), g.current !== "mouse" && j(N) }), onPointerDown: ke(l.onPointerDown, N => { g.current = N.pointerType; const S = N.target; S.hasPointerCapture(N.pointerId) && S.releasePointerCapture(N.pointerId), N.button === 0 && N.ctrlKey === !1 && N.pointerType === "mouse" && (j(N), N.preventDefault()) }), onKeyDown: ke(l.onKeyDown, N => { const S = x.current !== ""; !(N.ctrlKey || N.altKey || N.metaKey) && N.key.length === 1 && y(N.key), !(S && N.key === " ") && ED.includes(N.key) && (j(), N.preventDefault()) }) }) }) }); T2.displayName = k2; var A2 = "SelectValue", M2 = w.forwardRef((e, n) => { const { __scopeSelect: r, className: a, style: l, children: c, placeholder: d = "", ...f } = e, m = Hr(A2, r), { onValueNodeHasChildrenChange: h } = m, g = c !== void 0, x = rt(n, m.onValueNodeChange); return Wt(() => { h(g) }, [h, g]), o.jsx(Ye.span, { ...f, ref: x, style: { pointerEvents: "none" }, children: W2(m.value) ? o.jsx(o.Fragment, { children: d }) : c }) }); M2.displayName = A2; var RD = "SelectIcon", R2 = w.forwardRef((e, n) => { const { __scopeSelect: r, children: a, ...l } = e; return o.jsx(Ye.span, { "aria-hidden": !0, ...l, ref: n, children: a || "▼" }) }); R2.displayName = RD; var DD = "SelectPortal", D2 = e => o.jsx(fd, { asChild: !0, ...e }); D2.displayName = DD; var xo = "SelectContent", O2 = w.forwardRef((e, n) => { const r = Hr(xo, e.__scopeSelect), [a, l] = w.useState(); if (Wt(() => { l(new DocumentFragment) }, []), !r.open) { const c = a; return c ? Nl.createPortal(o.jsx(z2, { scope: e.__scopeSelect, children: o.jsx(Ad.Slot, { scope: e.__scopeSelect, children: o.jsx("div", { children: e.children }) }) }), c) : null } return o.jsx(I2, { ...e, ref: n }) }); O2.displayName = xo; var qn = 10, [z2, Ur] = Ba(xo), OD = "SelectContentImpl", zD = ja("SelectContent.RemoveScroll"), I2 = w.forwardRef((e, n) => { const { __scopeSelect: r, position: a = "item-aligned", onCloseAutoFocus: l, onEscapeKeyDown: c, onPointerDownOutside: d, side: f, sideOffset: m, align: h, alignOffset: g, arrowPadding: x, collisionBoundary: y, collisionPadding: b, sticky: j, hideWhenDetached: N, avoidCollisions: S, ..._ } = e, A = Hr(xo, r), [E, M] = w.useState(null), [T, D] = w.useState(null), z = rt(n, ee => M(ee)), [H, q] = w.useState(null), [X, W] = w.useState(null), G = Md(r), [ne, B] = w.useState(!1), U = w.useRef(!1); w.useEffect(() => { if (E) return h1(E) }, [E]), Lw(); const R = w.useCallback(ee => { const [ie, ...ge] = G().map(ve => ve.ref.current), [Ee] = ge.slice(-1), Ne = document.activeElement; for (const ve of ee) if (ve === Ne || (ve?.scrollIntoView({ block: "nearest" }), ve === ie && T && (T.scrollTop = 0), ve === Ee && T && (T.scrollTop = T.scrollHeight), ve?.focus(), document.activeElement !== Ne)) return }, [G, T]), L = w.useCallback(() => R([H, E]), [R, H, E]); w.useEffect(() => { ne && L() }, [ne, L]); const { onOpenChange: I, triggerPointerDownPosRef: P } = A; w.useEffect(() => { if (E) { let ee = { x: 0, y: 0 }; const ie = Ee => { ee = { x: Math.abs(Math.round(Ee.pageX) - (P.current?.x ?? 0)), y: Math.abs(Math.round(Ee.pageY) - (P.current?.y ?? 0)) } }, ge = Ee => { ee.x <= 10 && ee.y <= 10 ? Ee.preventDefault() : E.contains(Ee.target) || I(!1), document.removeEventListener("pointermove", ie), P.current = null }; return P.current !== null && (document.addEventListener("pointermove", ie), document.addEventListener("pointerup", ge, { capture: !0, once: !0 })), () => { document.removeEventListener("pointermove", ie), document.removeEventListener("pointerup", ge, { capture: !0 }) } } }, [E, I, P]), w.useEffect(() => { const ee = () => I(!1); return window.addEventListener("blur", ee), window.addEventListener("resize", ee), () => { window.removeEventListener("blur", ee), window.removeEventListener("resize", ee) } }, [I]); const [C, $] = K2(ee => { const ie = G().filter(Ne => !Ne.disabled), ge = ie.find(Ne => Ne.ref.current === document.activeElement), Ee = Q2(ie, ee, ge); Ee && setTimeout(() => Ee.ref.current.focus()) }), Y = w.useCallback((ee, ie, ge) => { const Ee = !U.current && !ge; (A.value !== void 0 && A.value === ie || Ee) && (q(ee), Ee && (U.current = !0)) }, [A.value]), V = w.useCallback(() => E?.focus(), [E]), J = w.useCallback((ee, ie, ge) => { const Ee = !U.current && !ge; (A.value !== void 0 && A.value === ie || Ee) && W(ee) }, [A.value]), ce = a === "popper" ? rp : L2, fe = ce === rp ? { side: f, sideOffset: m, align: h, alignOffset: g, arrowPadding: x, collisionBoundary: y, collisionPadding: b, sticky: j, hideWhenDetached: N, avoidCollisions: S } : {}; return o.jsx(z2, { scope: r, content: E, viewport: T, onViewportChange: D, itemRefCallback: Y, selectedItem: H, onItemLeave: V, itemTextRefCallback: J, focusSelectedItem: L, selectedItemText: X, position: a, isPositioned: ne, searchRef: C, children: o.jsx(qp, { as: zD, allowPinchZoom: !0, children: o.jsx(Ap, { asChild: !0, trapped: A.open, onMountAutoFocus: ee => { ee.preventDefault() }, onUnmountAutoFocus: ke(l, ee => { A.trigger?.focus({ preventScroll: !0 }), ee.preventDefault() }), children: o.jsx(id, { asChild: !0, disableOutsidePointerEvents: !0, onEscapeKeyDown: c, onPointerDownOutside: d, onFocusOutside: ee => ee.preventDefault(), onDismiss: () => A.onOpenChange(!1), children: o.jsx(ce, { role: "listbox", id: A.contentId, "data-state": A.open ? "open" : "closed", dir: A.dir, onContextMenu: ee => ee.preventDefault(), ..._, ...fe, onPlaced: () => B(!0), ref: z, style: { display: "flex", flexDirection: "column", outline: "none", ..._.style }, onKeyDown: ke(_.onKeyDown, ee => { const ie = ee.ctrlKey || ee.altKey || ee.metaKey; if (ee.key === "Tab" && ee.preventDefault(), !ie && ee.key.length === 1 && $(ee.key), ["ArrowUp", "ArrowDown", "Home", "End"].includes(ee.key)) { let Ee = G().filter(Ne => !Ne.disabled).map(Ne => Ne.ref.current); if (["ArrowUp", "End"].includes(ee.key) && (Ee = Ee.slice().reverse()), ["ArrowUp", "ArrowDown"].includes(ee.key)) { const Ne = ee.target, ve = Ee.indexOf(Ne); Ee = Ee.slice(ve + 1) } setTimeout(() => R(Ee)), ee.preventDefault() } }) }) }) }) }) }) }); I2.displayName = OD; var ID = "SelectItemAlignedPosition", L2 = w.forwardRef((e, n) => { const { __scopeSelect: r, onPlaced: a, ...l } = e, c = Hr(xo, r), d = Ur(xo, r), [f, m] = w.useState(null), [h, g] = w.useState(null), x = rt(n, z => g(z)), y = Md(r), b = w.useRef(!1), j = w.useRef(!0), { viewport: N, selectedItem: S, selectedItemText: _, focusSelectedItem: A } = d, E = w.useCallback(() => { if (c.trigger && c.valueNode && f && h && N && S && _) { const z = c.trigger.getBoundingClientRect(), H = h.getBoundingClientRect(), q = c.valueNode.getBoundingClientRect(), X = _.getBoundingClientRect(); if (c.dir !== "rtl") { const Ne = X.left - H.left, ve = q.left - Ne, ze = z.left - ve, re = z.width + ze, Q = Math.max(re, H.width), me = window.innerWidth - qn, be = tp(ve, [qn, Math.max(qn, me - Q)]); f.style.minWidth = re + "px", f.style.left = be + "px" } else { const Ne = H.right - X.right, ve = window.innerWidth - q.right - Ne, ze = window.innerWidth - z.right - ve, re = z.width + ze, Q = Math.max(re, H.width), me = window.innerWidth - qn, be = tp(ve, [qn, Math.max(qn, me - Q)]); f.style.minWidth = re + "px", f.style.right = be + "px" } const W = y(), G = window.innerHeight - qn * 2, ne = N.scrollHeight, B = window.getComputedStyle(h), U = parseInt(B.borderTopWidth, 10), R = parseInt(B.paddingTop, 10), L = parseInt(B.borderBottomWidth, 10), I = parseInt(B.paddingBottom, 10), P = U + R + ne + I + L, C = Math.min(S.offsetHeight * 5, P), $ = window.getComputedStyle(N), Y = parseInt($.paddingTop, 10), V = parseInt($.paddingBottom, 10), J = z.top + z.height / 2 - qn, ce = G - J, fe = S.offsetHeight / 2, ee = S.offsetTop + fe, ie = U + R + ee, ge = P - ie; if (ie <= J) { const Ne = W.length > 0 && S === W[W.length - 1].ref.current; f.style.bottom = "0px"; const ve = h.clientHeight - N.offsetTop - N.offsetHeight, ze = Math.max(ce, fe + (Ne ? V : 0) + ve + L), re = ie + ze; f.style.height = re + "px" } else { const Ne = W.length > 0 && S === W[0].ref.current; f.style.top = "0px"; const ze = Math.max(J, U + N.offsetTop + (Ne ? Y : 0) + fe) + ge; f.style.height = ze + "px", N.scrollTop = ie - J + N.offsetTop } f.style.margin = `${qn}px 0`, f.style.minHeight = C + "px", f.style.maxHeight = G + "px", a?.(), requestAnimationFrame(() => b.current = !0) } }, [y, c.trigger, c.valueNode, f, h, N, S, _, c.dir, a]); Wt(() => E(), [E]); const [M, T] = w.useState(); Wt(() => { h && T(window.getComputedStyle(h).zIndex) }, [h]); const D = w.useCallback(z => { z && j.current === !0 && (E(), A?.(), j.current = !1) }, [E, A]); return o.jsx($D, { scope: r, contentWrapper: f, shouldExpandOnScrollRef: b, onScrollButtonChange: D, children: o.jsx("div", { ref: m, style: { display: "flex", flexDirection: "column", position: "fixed", zIndex: M }, children: o.jsx(Ye.div, { ...l, ref: x, style: { boxSizing: "border-box", maxHeight: "100%", ...l.style } }) }) }) }); L2.displayName = ID; var LD = "SelectPopperPosition", rp = w.forwardRef((e, n) => { const { __scopeSelect: r, align: a = "start", collisionPadding: l = qn, ...c } = e, d = Rd(r); return o.jsx(Bp, { ...d, ...c, ref: n, align: a, collisionPadding: l, style: { boxSizing: "border-box", ...c.style, "--radix-select-content-transform-origin": "var(--radix-popper-transform-origin)", "--radix-select-content-available-width": "var(--radix-popper-available-width)", "--radix-select-content-available-height": "var(--radix-popper-available-height)", "--radix-select-trigger-width": "var(--radix-popper-anchor-width)", "--radix-select-trigger-height": "var(--radix-popper-anchor-height)" } }) }); rp.displayName = LD; var [$D, yg] = Ba(xo, {}), op = "SelectViewport", $2 = w.forwardRef((e, n) => { const { __scopeSelect: r, nonce: a, ...l } = e, c = Ur(op, r), d = yg(op, r), f = rt(n, c.onViewportChange), m = w.useRef(0); return o.jsxs(o.Fragment, { children: [o.jsx("style", { dangerouslySetInnerHTML: { __html: "[data-radix-select-viewport]{scrollbar-width:none;-ms-overflow-style:none;-webkit-overflow-scrolling:touch;}[data-radix-select-viewport]::-webkit-scrollbar{display:none}" }, nonce: a }), o.jsx(Ad.Slot, { scope: r, children: o.jsx(Ye.div, { "data-radix-select-viewport": "", role: "presentation", ...l, ref: f, style: { position: "relative", flex: 1, overflow: "hidden auto", ...l.style }, onScroll: ke(l.onScroll, h => { const g = h.currentTarget, { contentWrapper: x, shouldExpandOnScrollRef: y } = d; if (y?.current && x) { const b = Math.abs(m.current - g.scrollTop); if (b > 0) { const j = window.innerHeight - qn * 2, N = parseFloat(x.style.minHeight), S = parseFloat(x.style.height), _ = Math.max(N, S); if (_ < j) { const A = _ + b, E = Math.min(j, A), M = A - E; x.style.height = E + "px", x.style.bottom === "0px" && (g.scrollTop = M > 0 ? M : 0, x.style.justifyContent = "flex-end") } } } m.current = g.scrollTop }) }) })] }) }); $2.displayName = op; var P2 = "SelectGroup", [PD, HD] = Ba(P2), UD = w.forwardRef((e, n) => { const { __scopeSelect: r, ...a } = e, l = Mr(); return o.jsx(PD, { scope: r, id: l, children: o.jsx(Ye.div, { role: "group", "aria-labelledby": l, ...a, ref: n }) }) }); UD.displayName = P2; var H2 = "SelectLabel", BD = w.forwardRef((e, n) => { const { __scopeSelect: r, ...a } = e, l = HD(H2, r); return o.jsx(Ye.div, { id: l.id, ...a, ref: n }) }); BD.displayName = H2; var Xu = "SelectItem", [VD, U2] = Ba(Xu), B2 = w.forwardRef((e, n) => { const { __scopeSelect: r, value: a, disabled: l = !1, textValue: c, ...d } = e, f = Hr(Xu, r), m = Ur(Xu, r), h = f.value === a, [g, x] = w.useState(c ?? ""), [y, b] = w.useState(!1), j = rt(n, A => m.itemRefCallback?.(A, a, l)), N = Mr(), S = w.useRef("touch"), _ = () => { l || (f.onValueChange(a), f.onOpenChange(!1)) }; if (a === "") throw new Error("A must have a value prop that is not an empty string. This is because the Select value can be set to an empty string to clear the selection and show the placeholder."); return o.jsx(VD, { scope: r, value: a, disabled: l, textId: N, isSelected: h, onItemTextChange: w.useCallback(A => { x(E => E || (A?.textContent ?? "").trim()) }, []), children: o.jsx(Ad.ItemSlot, { scope: r, value: a, disabled: l, textValue: g, children: o.jsx(Ye.div, { role: "option", "aria-labelledby": N, "data-highlighted": y ? "" : void 0, "aria-selected": h && y, "data-state": h ? "checked" : "unchecked", "aria-disabled": l || void 0, "data-disabled": l ? "" : void 0, tabIndex: l ? void 0 : -1, ...d, ref: j, onFocus: ke(d.onFocus, () => b(!0)), onBlur: ke(d.onBlur, () => b(!1)), onClick: ke(d.onClick, () => { S.current !== "mouse" && _() }), onPointerUp: ke(d.onPointerUp, () => { S.current === "mouse" && _() }), onPointerDown: ke(d.onPointerDown, A => { S.current = A.pointerType }), onPointerMove: ke(d.onPointerMove, A => { S.current = A.pointerType, l ? m.onItemLeave?.() : S.current === "mouse" && A.currentTarget.focus({ preventScroll: !0 }) }), onPointerLeave: ke(d.onPointerLeave, A => { A.currentTarget === document.activeElement && m.onItemLeave?.() }), onKeyDown: ke(d.onKeyDown, A => { m.searchRef?.current !== "" && A.key === " " || (CD.includes(A.key) && _(), A.key === " " && A.preventDefault()) }) }) }) }) }); B2.displayName = Xu; var Ki = "SelectItemText", V2 = w.forwardRef((e, n) => { const { __scopeSelect: r, className: a, style: l, ...c } = e, d = Hr(Ki, r), f = Ur(Ki, r), m = U2(Ki, r), h = MD(Ki, r), [g, x] = w.useState(null), y = rt(n, _ => x(_), m.onItemTextChange, _ => f.itemTextRefCallback?.(_, m.value, m.disabled)), b = g?.textContent, j = w.useMemo(() => o.jsx("option", { value: m.value, disabled: m.disabled, children: b }, m.value), [m.disabled, m.value, b]), { onNativeOptionAdd: N, onNativeOptionRemove: S } = h; return Wt(() => (N(j), () => S(j)), [N, S, j]), o.jsxs(o.Fragment, { children: [o.jsx(Ye.span, { id: m.textId, ...c, ref: y }), m.isSelected && d.valueNode && !d.valueNodeHasChildren ? Nl.createPortal(c.children, d.valueNode) : null] }) }); V2.displayName = Ki; var q2 = "SelectItemIndicator", F2 = w.forwardRef((e, n) => { const { __scopeSelect: r, ...a } = e; return U2(q2, r).isSelected ? o.jsx(Ye.span, { "aria-hidden": !0, ...a, ref: n }) : null }); F2.displayName = q2; var ap = "SelectScrollUpButton", Y2 = w.forwardRef((e, n) => { const r = Ur(ap, e.__scopeSelect), a = yg(ap, e.__scopeSelect), [l, c] = w.useState(!1), d = rt(n, a.onScrollButtonChange); return Wt(() => { if (r.viewport && r.isPositioned) { let f = function () { const h = m.scrollTop > 0; c(h) }; const m = r.viewport; return f(), m.addEventListener("scroll", f), () => m.removeEventListener("scroll", f) } }, [r.viewport, r.isPositioned]), l ? o.jsx(X2, { ...e, ref: d, onAutoScroll: () => { const { viewport: f, selectedItem: m } = r; f && m && (f.scrollTop = f.scrollTop - m.offsetHeight) } }) : null }); Y2.displayName = ap; var ip = "SelectScrollDownButton", G2 = w.forwardRef((e, n) => { const r = Ur(ip, e.__scopeSelect), a = yg(ip, e.__scopeSelect), [l, c] = w.useState(!1), d = rt(n, a.onScrollButtonChange); return Wt(() => { if (r.viewport && r.isPositioned) { let f = function () { const h = m.scrollHeight - m.clientHeight, g = Math.ceil(m.scrollTop) < h; c(g) }; const m = r.viewport; return f(), m.addEventListener("scroll", f), () => m.removeEventListener("scroll", f) } }, [r.viewport, r.isPositioned]), l ? o.jsx(X2, { ...e, ref: d, onAutoScroll: () => { const { viewport: f, selectedItem: m } = r; f && m && (f.scrollTop = f.scrollTop + m.offsetHeight) } }) : null }); G2.displayName = ip; var X2 = w.forwardRef((e, n) => { const { __scopeSelect: r, onAutoScroll: a, ...l } = e, c = Ur("SelectScrollButton", r), d = w.useRef(null), f = Md(r), m = w.useCallback(() => { d.current !== null && (window.clearInterval(d.current), d.current = null) }, []); return w.useEffect(() => () => m(), [m]), Wt(() => { f().find(g => g.ref.current === document.activeElement)?.ref.current?.scrollIntoView({ block: "nearest" }) }, [f]), o.jsx(Ye.div, { "aria-hidden": !0, ...l, ref: n, style: { flexShrink: 0, ...l.style }, onPointerDown: ke(l.onPointerDown, () => { d.current === null && (d.current = window.setInterval(a, 50)) }), onPointerMove: ke(l.onPointerMove, () => { c.onItemLeave?.(), d.current === null && (d.current = window.setInterval(a, 50)) }), onPointerLeave: ke(l.onPointerLeave, () => { m() }) }) }), qD = "SelectSeparator", FD = w.forwardRef((e, n) => { const { __scopeSelect: r, ...a } = e; return o.jsx(Ye.div, { "aria-hidden": !0, ...a, ref: n }) }); FD.displayName = qD; var lp = "SelectArrow", YD = w.forwardRef((e, n) => { const { __scopeSelect: r, ...a } = e, l = Rd(r), c = Hr(lp, r), d = Ur(lp, r); return c.open && d.position === "popper" ? o.jsx(Vp, { ...l, ...a, ref: n }) : null }); YD.displayName = lp; var GD = "SelectBubbleInput", Z2 = w.forwardRef(({ __scopeSelect: e, value: n, ...r }, a) => { const l = w.useRef(null), c = rt(a, l), d = fg(n); return w.useEffect(() => { const f = l.current; if (!f) return; const m = window.HTMLSelectElement.prototype, g = Object.getOwnPropertyDescriptor(m, "value").set; if (d !== n && g) { const x = new Event("change", { bubbles: !0 }); g.call(f, n), f.dispatchEvent(x) } }, [d, n]), o.jsx(Ye.select, { ...r, style: { ...GN, ...r.style }, ref: c, defaultValue: n }) }); Z2.displayName = GD; function W2(e) { return e === "" || e === void 0 } function K2(e) { const n = Zt(e), r = w.useRef(""), a = w.useRef(0), l = w.useCallback(d => { const f = r.current + d; n(f), (function m(h) { r.current = h, window.clearTimeout(a.current), h !== "" && (a.current = window.setTimeout(() => m(""), 1e3)) })(f) }, [n]), c = w.useCallback(() => { r.current = "", window.clearTimeout(a.current) }, []); return w.useEffect(() => () => window.clearTimeout(a.current), []), [r, l, c] } function Q2(e, n, r) { const l = n.length > 1 && Array.from(n).every(h => h === n[0]) ? n[0] : n, c = r ? e.indexOf(r) : -1; let d = XD(e, Math.max(c, 0)); l.length === 1 && (d = d.filter(h => h !== r)); const m = d.find(h => h.textValue.toLowerCase().startsWith(l.toLowerCase())); return m !== r ? m : void 0 } function XD(e, n) { return e.map((r, a) => e[(n + a) % e.length]) } var ZD = C2, WD = T2, KD = M2, QD = R2, JD = D2, e6 = O2, t6 = $2, n6 = B2, s6 = V2, r6 = F2, o6 = Y2, a6 = G2; function vg({ ...e }) { return o.jsx(ZD, { "data-slot": "select", ...e }) } function bg({ ...e }) { return o.jsx(KD, { "data-slot": "select-value", ...e }) } function wg({ className: e, size: n = "default", children: r, ...a }) { return o.jsxs(WD, { "data-slot": "select-trigger", "data-size": n, className: We("border-input data-[placeholder]:text-muted-foreground [&_svg:not([class*='text-'])]:text-muted-foreground focus-visible:border-ring focus-visible:ring-ring/50 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive dark:bg-input/30 dark:hover:bg-input/50 flex w-fit items-center justify-between gap-2 rounded-md border bg-transparent px-3 py-2 text-sm whitespace-nowrap shadow-xs transition-[color,box-shadow] outline-none focus-visible:ring-[3px] disabled:cursor-not-allowed disabled:opacity-50 data-[size=default]:h-9 data-[size=sm]:h-8 *:data-[slot=select-value]:line-clamp-1 *:data-[slot=select-value]:flex *:data-[slot=select-value]:items-center *:data-[slot=select-value]:gap-2 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4", e), ...a, children: [r, o.jsx(QD, { asChild: !0, children: o.jsx(Rt, { className: "size-4 opacity-50" }) })] }) } function Ng({ className: e, children: n, position: r = "popper", ...a }) { return o.jsx(JD, { children: o.jsxs(e6, { "data-slot": "select-content", className: We("bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 relative z-50 max-h-(--radix-select-content-available-height) min-w-[8rem] origin-(--radix-select-content-transform-origin) overflow-x-hidden overflow-y-auto rounded-md border shadow-md", r === "popper" && "data-[side=bottom]:translate-y-1 data-[side=left]:-translate-x-1 data-[side=right]:translate-x-1 data-[side=top]:-translate-y-1", e), position: r, ...a, children: [o.jsx(i6, {}), o.jsx(t6, { className: We("p-1", r === "popper" && "h-[var(--radix-select-trigger-height)] w-full min-w-[var(--radix-select-trigger-width)] scroll-my-1"), children: n }), o.jsx(l6, {})] }) }) } function jg({ className: e, children: n, ...r }) { return o.jsxs(n6, { "data-slot": "select-item", className: We("focus:bg-accent focus:text-accent-foreground [&_svg:not([class*='text-'])]:text-muted-foreground relative flex w-full cursor-default items-center gap-2 rounded-sm py-1.5 pr-8 pl-2 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4 *:[span]:last:flex *:[span]:last:items-center *:[span]:last:gap-2", e), ...r, children: [o.jsx("span", { className: "absolute right-2 flex size-3.5 items-center justify-center", children: o.jsx(r6, { children: o.jsx(jo, { className: "size-4" }) }) }), o.jsx(s6, { children: n })] }) } function i6({ className: e, ...n }) { return o.jsx(o6, { "data-slot": "select-scroll-up-button", className: We("flex cursor-default items-center justify-center py-1", e), ...n, children: o.jsx(rN, { className: "size-4" }) }) } function l6({ className: e, ...n }) { return o.jsx(a6, { "data-slot": "select-scroll-down-button", className: We("flex cursor-default items-center justify-center py-1", e), ...n, children: o.jsx(Rt, { className: "size-4" }) }) } function io({ title: e, icon: n, children: r, className: a = "" }) { return o.jsxs("div", { className: `border rounded-lg p-4 bg-card ${a}`, children: [o.jsxs("div", { className: "flex items-center gap-2 mb-3", children: [n, o.jsx("h3", { className: "text-sm font-semibold text-foreground", children: e })] }), o.jsx("div", { className: "text-sm text-muted-foreground", children: r })] }) } function c6({ agent: e, open: n, onOpenChange: r }) { const a = e.source === "directory" ? o.jsx(aN, { className: "h-4 w-4 text-muted-foreground" }) : e.source === "in_memory" ? o.jsx(Kh, { className: "h-4 w-4 text-muted-foreground" }) : o.jsx(iN, { className: "h-4 w-4 text-muted-foreground" }), l = e.source === "directory" ? "Local" : e.source === "in_memory" ? "In-Memory" : "Gallery"; return o.jsx(Ir, { open: n, onOpenChange: r, children: o.jsxs(Lr, { className: "max-w-4xl max-h-[90vh] flex flex-col", children: [o.jsxs($r, { className: "px-6 pt-6 flex-shrink-0", children: [o.jsx(Pr, { children: "Agent Details" }), o.jsx(So, { onClose: () => r(!1) })] }), o.jsxs("div", { className: "px-6 pb-6 overflow-y-auto flex-1", children: [o.jsxs("div", { className: "mb-6", children: [o.jsxs("div", { className: "flex items-center gap-3 mb-2", children: [o.jsx(Vs, { className: "h-6 w-6 text-primary" }), o.jsx("h2", { className: "text-xl font-semibold text-foreground", children: e.name || e.id })] }), e.description && o.jsx("p", { className: "text-muted-foreground", children: e.description })] }), o.jsx("div", { className: "h-px bg-border mb-6" }), o.jsxs("div", { className: "grid grid-cols-1 md:grid-cols-2 gap-4 mb-4", children: [(e.model_id || e.chat_client_type) && o.jsx(io, { title: "Model & Client", icon: o.jsx(Vs, { className: "h-4 w-4 text-muted-foreground" }), children: o.jsxs("div", { className: "space-y-1", children: [e.model_id && o.jsx("div", { className: "font-mono text-foreground", children: e.model_id }), e.chat_client_type && o.jsxs("div", { className: "text-xs", children: ["(", e.chat_client_type, ")"] })] }) }), o.jsx(io, { title: "Source", icon: a, children: o.jsxs("div", { className: "space-y-1", children: [o.jsx("div", { className: "text-foreground", children: l }), e.module_path && o.jsx("div", { className: "font-mono text-xs break-all", children: e.module_path })] }) }), o.jsx(io, { title: "Environment", icon: e.has_env ? o.jsx(kl, { className: "h-4 w-4 text-orange-500" }) : o.jsx(yd, { className: "h-4 w-4 text-green-500" }), className: "md:col-span-2", children: o.jsx("div", { className: e.has_env ? "text-orange-600 dark:text-orange-400" : "text-green-600 dark:text-green-400", children: e.has_env ? "Requires environment variables" : "No environment variables required" }) })] }), e.instructions && o.jsx(io, { title: "Instructions", icon: o.jsx(qs, { className: "h-4 w-4 text-muted-foreground" }), className: "mb-4", children: o.jsx("div", { className: "text-sm text-foreground leading-relaxed whitespace-pre-wrap", children: e.instructions }) }), o.jsxs("div", { className: "grid grid-cols-1 md:grid-cols-2 gap-4", children: [e.tools && e.tools.length > 0 && o.jsx(io, { title: `Tools (${e.tools.length})`, icon: o.jsx(Uu, { className: "h-4 w-4 text-muted-foreground" }), children: o.jsx("ul", { className: "space-y-1", children: e.tools.map((c, d) => o.jsxs("li", { className: "font-mono text-xs text-foreground", children: ["• ", c] }, d)) }) }), e.middleware && e.middleware.length > 0 && o.jsx(io, { title: `MiddlewareTypes (${e.middleware.length})`, icon: o.jsx(Uu, { className: "h-4 w-4 text-muted-foreground" }), children: o.jsx("ul", { className: "space-y-1", children: e.middleware.map((c, d) => o.jsxs("li", { className: "font-mono text-xs text-foreground", children: ["• ", c] }, d)) }) }), e.context_providers && e.context_providers.length > 0 && o.jsx(io, { title: `Context Providers (${e.context_providers.length})`, icon: o.jsx(Kh, { className: "h-4 w-4 text-muted-foreground" }), className: !e.middleware || e.middleware.length === 0 ? "md:col-start-2" : "", children: o.jsx("ul", { className: "space-y-1", children: e.context_providers.map((c, d) => o.jsxs("li", { className: "font-mono text-xs text-foreground", children: ["• ", c] }, d)) }) })] })] })] }) }) } function u6({ item: e, toolCalls: n = [], toolResults: r = [] }) {
+ const [a, l] = w.useState(!1), [c, d] = w.useState(!1), [f, m] = w.useState(!1), h = le(y => y.showToolCalls), g = () => e.type === "message" ? e.content.filter(y => y.type === "text").map(y => y.text).join(`
`):"",x=async()=>{const y=g();if(y)try{await navigator.clipboard.writeText(y),d(!0),setTimeout(()=>d(!1),2e3)}catch(b){console.error("Failed to copy:",b)}};if(e.type==="message"){const y=e.role==="user",b=e.status==="incomplete",j=y?cN:b?hs:Vs,N=g();return o.jsxs("div",{className:`flex gap-3 ${y?"flex-row-reverse":""}`,onMouseEnter:()=>l(!0),onMouseLeave:()=>l(!1),children:[o.jsx("div",{className:`flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border ${y?"bg-primary text-primary-foreground":b?"bg-orange-100 dark:bg-orange-900 text-orange-600 dark:text-orange-400 border-orange-200 dark:border-orange-800":"bg-muted"}`,children:o.jsx(j,{className:"h-4 w-4"})}),o.jsxs("div",{className:`flex flex-col space-y-1 ${y?"items-end":"items-start"} max-w-[80%]`,children:[o.jsxs("div",{className:"relative group",children:[o.jsxs("div",{className:`rounded px-3 py-2 text-sm ${y?"bg-primary text-primary-foreground":b?"bg-orange-50 dark:bg-orange-950/50 text-orange-800 dark:text-orange-200 border border-orange-200 dark:border-orange-800":"bg-muted"}`,children:[b&&o.jsxs("div",{className:"flex items-start gap-2 mb-2",children:[o.jsx(hs,{className:"h-4 w-4 text-orange-500 mt-0.5 flex-shrink-0"}),o.jsx("span",{className:"font-medium text-sm",children:"Unable to process request"})]}),o.jsx("div",{className:b?"text-xs leading-relaxed break-all":"",children:o.jsx(_D,{item:e})})]}),N&&a&&o.jsx("button",{onClick:x,className:`absolute top-1 right-1
p-1.5 rounded-md border shadow-sm
bg-background hover:bg-accent
@@ -578,7 +583,7 @@ asyncio.run(main())`})]})]}),o.jsxs("div",{className:"flex gap-2 pt-4 border-t",
0% { stroke-dashoffset: 0; }
100% { stroke-dashoffset: -10; }
}
-
+
/* Dark theme styles for React Flow controls */
.dark .react-flow__controls {
background-color: rgba(31, 41, 55, 0.9) !important;
diff --git a/python/packages/devui/frontend/src/components/features/agent/agent-details-modal.tsx b/python/packages/devui/frontend/src/components/features/agent/agent-details-modal.tsx
index f9fa4480a0..117e6e2e95 100644
--- a/python/packages/devui/frontend/src/components/features/agent/agent-details-modal.tsx
+++ b/python/packages/devui/frontend/src/components/features/agent/agent-details-modal.tsx
@@ -161,7 +161,7 @@ export function AgentDetailsModal({
)}
- {/* Tools and Middleware Grid */}
+ {/* Tools and MiddlewareTypes Grid */}
{/* Tools */}
{agent.tools && agent.tools.length > 0 && (
diff --git a/python/packages/devui/tests/test_checkpoints.py b/python/packages/devui/tests/test_checkpoints.py
index fbaf8734cd..17841c77eb 100644
--- a/python/packages/devui/tests/test_checkpoints.py
+++ b/python/packages/devui/tests/test_checkpoints.py
@@ -338,7 +338,7 @@ async def test_manual_checkpoint_save_via_injected_storage(self, checkpoint_mana
checkpoint_storage = checkpoint_manager.get_checkpoint_storage(conversation_id)
# Set build-time storage (equivalent to .with_checkpointing() at build time)
- # Note: In production, DevUI uses runtime injection via run_stream() parameter
+ # Note: In production, DevUI uses runtime injection via run(stream=True) parameter
if hasattr(test_workflow, "_runner") and hasattr(test_workflow._runner, "context"):
test_workflow._runner.context._checkpoint_storage = checkpoint_storage
@@ -406,7 +406,7 @@ async def test_workflow_auto_saves_checkpoints_to_injected_storage(self, checkpo
3. Framework automatically saves checkpoint to our storage
4. Checkpoint is accessible via manager for UI to list/resume
- Note: In production, DevUI passes checkpoint_storage to run_stream() as runtime parameter.
+ Note: In production, DevUI passes checkpoint_storage to run(stream=True) as runtime parameter.
This test uses build-time injection to verify framework's checkpoint auto-save behavior.
"""
entity_id = "test_entity"
@@ -427,7 +427,7 @@ async def test_workflow_auto_saves_checkpoints_to_injected_storage(self, checkpo
# Run workflow until it reaches IDLE_WITH_PENDING_REQUESTS (after checkpoint is created)
saw_request_event = False
- async for event in test_workflow.run_stream(WorkflowTestData(value="test")):
+ async for event in test_workflow.run(WorkflowTestData(value="test"), stream=True):
if isinstance(event, RequestInfoEvent):
saw_request_event = True
# Wait for IDLE_WITH_PENDING_REQUESTS status (comes after checkpoint creation)
diff --git a/python/packages/devui/tests/test_cleanup_hooks.py b/python/packages/devui/tests/test_cleanup_hooks.py
index 68c8ff6af2..f52cdbc2cf 100644
--- a/python/packages/devui/tests/test_cleanup_hooks.py
+++ b/python/packages/devui/tests/test_cleanup_hooks.py
@@ -7,7 +7,7 @@
from pathlib import Path
import pytest
-from agent_framework import AgentResponse, ChatMessage, Content
+from agent_framework import AgentResponse, ChatMessage, Content, Role
from agent_framework_devui import register_cleanup
from agent_framework_devui._discovery import EntityDiscovery
@@ -33,10 +33,18 @@ def __init__(self, name: str = "TestAgent"):
self.cleanup_called = False
self.async_cleanup_called = False
- async def run_stream(self, messages=None, *, thread=None, **kwargs):
- """Mock streaming run method."""
- yield AgentResponse(
- messages=[ChatMessage("assistant", [Content.from_text(text="Test response")])],
+ async def run(self, messages=None, *, stream: bool = False, thread=None, **kwargs):
+ """Mock run method with streaming support."""
+ if stream:
+
+ async def _stream():
+ yield AgentResponse(
+ messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="Test response")])],
+ )
+
+ return _stream()
+ return AgentResponse(
+ messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="Test response")])],
)
@@ -277,9 +285,16 @@ class TestAgent:
name = "Test Agent"
description = "Test agent with cleanup"
- async def run_stream(self, messages=None, *, thread=None, **kwargs):
- yield AgentResponse(
- messages=[ChatMessage("assistant", [Content.from_text(text="Test")])],
+ async def run(self, messages=None, *, stream: bool = False, thread=None, **kwargs):
+ if stream:
+ async def _stream():
+ yield AgentResponse(
+ messages=[ChatMessage(role=Role.ASSISTANT, content=[Content.from_text(text="Test")])],
+ inner_messages=[],
+ )
+ return _stream()
+ return AgentResponse(
+ messages=[ChatMessage(role=Role.ASSISTANT, content=[Content.from_text(text="Test")])],
inner_messages=[],
)
diff --git a/python/packages/devui/tests/test_conversations.py b/python/packages/devui/tests/test_conversations.py
index cd1451f79b..dbc2e4ddb2 100644
--- a/python/packages/devui/tests/test_conversations.py
+++ b/python/packages/devui/tests/test_conversations.py
@@ -216,7 +216,7 @@ async def test_list_items_converts_function_calls():
# Simulate messages from agent execution with function calls
messages = [
- ChatMessage("user", [{"type": "text", "text": "What's the weather in SF?"}]),
+ ChatMessage(role="user", contents=[{"type": "text", "text": "What's the weather in SF?"}]),
ChatMessage(
role="assistant",
contents=[
@@ -238,7 +238,7 @@ async def test_list_items_converts_function_calls():
}
],
),
- ChatMessage("assistant", [{"type": "text", "text": "The weather is sunny, 65°F"}]),
+ ChatMessage(role="assistant", contents=[{"type": "text", "text": "The weather is sunny, 65°F"}]),
]
# Add messages to thread
diff --git a/python/packages/devui/tests/test_discovery.py b/python/packages/devui/tests/test_discovery.py
index 8b0cf9fb3a..58388a8b5f 100644
--- a/python/packages/devui/tests/test_discovery.py
+++ b/python/packages/devui/tests/test_discovery.py
@@ -89,7 +89,7 @@ async def test_discovery_accepts_agents_with_only_run():
class NonStreamingAgent:
id = "non_streaming"
name = "Non-Streaming Agent"
- description = "Agent without run_stream"
+ description = "Agent with run() method"
async def run(self, messages=None, *, thread=None, **kwargs):
return AgentResponse(
@@ -125,7 +125,6 @@ def get_new_thread(self, **kwargs):
enriched = discovery.get_entity_info(entity.id)
assert enriched.type == "agent" # Now correctly identified
assert enriched.name == "Non-Streaming Agent"
- assert not enriched.metadata.get("has_run_stream")
async def test_lazy_loading():
@@ -210,7 +209,7 @@ class TestAgent:
async def run(self, messages=None, *, thread=None, **kwargs):
return AgentResponse(
- messages=[ChatMessage("assistant", [Content.from_text(text="test")])],
+ messages=[ChatMessage(role="assistant", contents=[Content.from_text(text="test")])],
response_id="test"
)
@@ -342,7 +341,7 @@ class WeatherAgent:
name = "Weather Agent"
description = "Gets weather information"
- def run_stream(self, input_str):
+ def run(self, input_str, *, stream: bool = False, thread=None, **kwargs):
return f"Weather in {input_str}"
""")
diff --git a/python/packages/devui/tests/test_execution.py b/python/packages/devui/tests/test_execution.py
index ce763d227e..79a6865c71 100644
--- a/python/packages/devui/tests/test_execution.py
+++ b/python/packages/devui/tests/test_execution.py
@@ -564,23 +564,38 @@ def test_extract_workflow_hil_responses_handles_stringified_json():
assert executor._extract_workflow_hil_responses({"email": "test"}) is None
-async def test_executor_handles_non_streaming_agent():
- """Test executor can handle agents with only run() method (no run_stream)."""
- from agent_framework import AgentResponse, AgentThread, ChatMessage, Content
+async def test_executor_handles_streaming_agent():
+ """Test executor handles agents with run(stream=True) method."""
+ from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content, Role
- class NonStreamingAgent:
- """Agent with only run() method - does NOT satisfy full AgentProtocol."""
+ class StreamingAgent:
+ """Agent with run() method supporting stream parameter."""
- id = "non_streaming_test"
- name = "Non-Streaming Test Agent"
- description = "Test agent without run_stream()"
+ id = "streaming_test"
+ name = "Streaming Test Agent"
+ description = "Test agent with run(stream=True)"
- async def run(self, messages=None, *, thread=None, **kwargs):
+ def run(self, messages=None, *, stream=False, thread=None, **kwargs):
+ if stream:
+ # Return an async generator for streaming
+ return self._stream_impl(messages)
+ # Return awaitable for non-streaming
+ return self._run_impl(messages)
+
+ async def _run_impl(self, messages):
return AgentResponse(
- messages=[ChatMessage("assistant", [Content.from_text(text=f"Processed: {messages}")])],
+ messages=[
+ ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=f"Processed: {messages}")])
+ ],
response_id="test_123",
)
+ async def _stream_impl(self, messages):
+ yield AgentResponseUpdate(
+ contents=[Content.from_text(text=f"Processed: {messages}")],
+ role=Role.ASSISTANT,
+ )
+
def get_new_thread(self, **kwargs):
return AgentThread()
@@ -589,11 +604,11 @@ def get_new_thread(self, **kwargs):
mapper = MessageMapper()
executor = AgentFrameworkExecutor(discovery, mapper)
- agent = NonStreamingAgent()
+ agent = StreamingAgent()
entity_info = await discovery.create_entity_info_from_object(agent, source="test")
discovery.register_entity(entity_info.id, entity_info, agent)
- # Execute non-streaming agent (use metadata.entity_id for routing)
+ # Execute streaming agent (use metadata.entity_id for routing)
request = AgentFrameworkRequest(
metadata={"entity_id": entity_info.id},
input="hello",
@@ -604,7 +619,7 @@ def get_new_thread(self, **kwargs):
async for event in executor.execute_streaming(request):
events.append(event)
- # Should get events even though agent doesn't stream
+ # Should get events from streaming agent
assert len(events) > 0
text_events = [e for e in events if hasattr(e, "type") and e.type == "response.output_text.delta"]
assert len(text_events) > 0
@@ -769,9 +784,13 @@ class StreamingAgent:
name = "Streaming Test Agent"
description = "Test agent for streaming"
- async def run_stream(self, input_str):
- for i, word in enumerate(f"Processing {input_str}".split()):
- yield f"word_{i}: {word} "
+ async def run(self, input_str, *, stream: bool = False, thread=None, **kwargs):
+ if stream:
+ async def _stream():
+ for i, word in enumerate(f"Processing {input_str}".split()):
+ yield f"word_{i}: {word} "
+ return _stream()
+ return f"Processing {input_str}"
""")
discovery = EntityDiscovery(str(temp_path))
diff --git a/python/packages/devui/tests/test_helpers.py b/python/packages/devui/tests/test_helpers.py
index d0d9b36b6e..88ae5a3526 100644
--- a/python/packages/devui/tests/test_helpers.py
+++ b/python/packages/devui/tests/test_helpers.py
@@ -14,7 +14,7 @@
"""
import sys
-from collections.abc import AsyncIterable, MutableSequence
+from collections.abc import AsyncIterable, Awaitable, MutableSequence, Sequence
from typing import Any, Generic
from agent_framework import (
@@ -29,11 +29,15 @@
ChatResponseUpdate,
ConcurrentBuilder,
Content,
+ ResponseStream,
+ Role,
SequentialBuilder,
- use_chat_middleware,
)
from agent_framework._clients import TOptions_co
+from agent_framework._middleware import ChatMiddlewareLayer
+from agent_framework._tools import FunctionInvocationLayer
from agent_framework._workflows._agent_executor import AgentExecutorResponse
+from agent_framework.observability import ChatTelemetryLayer
if sys.version_info >= (3, 12):
from typing import override # type: ignore # pragma: no cover
@@ -73,55 +77,78 @@ def __init__(self) -> None:
async def get_response(
self,
messages: str | ChatMessage | list[str] | list[ChatMessage],
+ *,
+ stream: bool = False,
**kwargs: Any,
- ) -> ChatResponse:
+ ) -> ChatResponse | AsyncIterable[ChatResponseUpdate]:
self.call_count += 1
+ if stream:
+ return self._get_streaming_response_impl()
if self.responses:
return self.responses.pop(0)
- return ChatResponse(messages=ChatMessage("assistant", ["test response"]))
+ return ChatResponse(messages=ChatMessage(role="assistant", text="test response"))
- async def get_streaming_response(
- self,
- messages: str | ChatMessage | list[str] | list[ChatMessage],
- **kwargs: Any,
- ) -> AsyncIterable[ChatResponseUpdate]:
- self.call_count += 1
+ async def _get_streaming_response_impl(self) -> AsyncIterable[ChatResponseUpdate]:
if self.streaming_responses:
for update in self.streaming_responses.pop(0):
yield update
else:
- yield ChatResponseUpdate(contents=[Content.from_text(text="test streaming response")], role="assistant")
+ yield ChatResponseUpdate(text=Content.from_text(text="test streaming response"), role="assistant")
-@use_chat_middleware
-class MockBaseChatClient(BaseChatClient[TOptions_co], Generic[TOptions_co]):
- """Full BaseChatClient mock with middleware support.
+class MockBaseChatClient(
+ ChatMiddlewareLayer[TOptions_co],
+ FunctionInvocationLayer[TOptions_co],
+ ChatTelemetryLayer[TOptions_co],
+ BaseChatClient[TOptions_co],
+ Generic[TOptions_co],
+):
+ """Full ChatClient mock with middleware support.
- Use this when testing features that require the full BaseChatClient interface.
+ Use this when testing features that require the full ChatClient interface.
This goes through all the middleware, message normalization, etc. - only the
actual LLM call is mocked.
"""
def __init__(self, **kwargs: Any):
- super().__init__(**kwargs)
+ super().__init__(function_middleware=[], **kwargs)
self.run_responses: list[ChatResponse] = []
self.streaming_responses: list[list[ChatResponseUpdate]] = []
self.call_count: int = 0
self.received_messages: list[list[ChatMessage]] = []
@override
- async def _inner_get_response(
+ def _inner_get_response(
self,
*,
messages: MutableSequence[ChatMessage],
options: dict[str, Any],
+ stream: bool = False,
**kwargs: Any,
- ) -> ChatResponse:
+ ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]:
self.call_count += 1
self.received_messages.append(list(messages))
- if self.run_responses:
- return self.run_responses.pop(0)
- return ChatResponse(messages=ChatMessage("assistant", ["Mock response from ChatAgent"]))
+ if stream:
+
+ async def _stream() -> AsyncIterable[ChatResponseUpdate]:
+ async for update in self._inner_get_streaming_response(
+ messages=messages,
+ options=options,
+ **kwargs,
+ ):
+ yield update
+
+ def _finalize(updates: Sequence[ChatResponseUpdate]) -> ChatResponse:
+ return ChatResponse.from_chat_response_updates(updates)
+
+ return ResponseStream(_stream(), finalizer=_finalize)
+
+ async def _get_response() -> ChatResponse:
+ if self.run_responses:
+ return self.run_responses.pop(0)
+ return ChatResponse(messages=ChatMessage(role="assistant", text="Mock response from ChatAgent"))
+
+ return _get_response()
@override
async def _inner_get_streaming_response(
@@ -131,17 +158,15 @@ async def _inner_get_streaming_response(
options: dict[str, Any],
**kwargs: Any,
) -> AsyncIterable[ChatResponseUpdate]:
- self.call_count += 1
- self.received_messages.append(list(messages))
if self.streaming_responses:
for update in self.streaming_responses.pop(0):
yield update
else:
# Simulate realistic streaming chunks
- yield ChatResponseUpdate(contents=[Content.from_text(text="Mock ")], role="assistant")
- yield ChatResponseUpdate(contents=[Content.from_text(text="streaming ")], role="assistant")
- yield ChatResponseUpdate(contents=[Content.from_text(text="response ")], role="assistant")
- yield ChatResponseUpdate(contents=[Content.from_text(text="from ChatAgent")], role="assistant")
+ yield ChatResponseUpdate(text=Content.from_text(text="Mock "), role="assistant")
+ yield ChatResponseUpdate(text=Content.from_text(text="streaming "), role="assistant")
+ yield ChatResponseUpdate(text=Content.from_text(text="response "), role="assistant")
+ yield ChatResponseUpdate(text=Content.from_text(text="from ChatAgent"), role="assistant")
# =============================================================================
@@ -163,26 +188,27 @@ def __init__(
self.streaming_chunks = streaming_chunks or [response_text]
self.call_count = 0
- async def run(
+ def run(
self,
messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
*,
+ stream: bool = False,
thread: AgentThread | None = None,
**kwargs: Any,
- ) -> AgentResponse:
+ ) -> AgentResponse | AsyncIterable[AgentResponseUpdate]:
self.call_count += 1
- return AgentResponse(messages=[ChatMessage("assistant", [Content.from_text(text=self.response_text)])])
+ if stream:
+ return self._run_stream_impl()
+ return self._run_impl()
- async def run_stream(
- self,
- messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
- *,
- thread: AgentThread | None = None,
- **kwargs: Any,
- ) -> AsyncIterable[AgentResponseUpdate]:
- self.call_count += 1
+ async def _run_impl(self) -> AgentResponse:
+ return AgentResponse(
+ messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=self.response_text)])]
+ )
+
+ async def _run_stream_impl(self) -> AsyncIterable[AgentResponseUpdate]:
for chunk in self.streaming_chunks:
- yield AgentResponseUpdate(contents=[Content.from_text(text=chunk)], role="assistant")
+ yield AgentResponseUpdate(contents=[Content.from_text(text=chunk)], role=Role.ASSISTANT)
class MockToolCallingAgent(BaseAgent):
@@ -192,28 +218,27 @@ def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self.call_count = 0
- async def run(
+ def run(
self,
messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
*,
+ stream: bool = False,
thread: AgentThread | None = None,
**kwargs: Any,
- ) -> AgentResponse:
+ ) -> AgentResponse | AsyncIterable[AgentResponseUpdate]:
self.call_count += 1
- return AgentResponse(messages=[ChatMessage("assistant", ["done"])])
+ if stream:
+ return self._run_stream_impl()
+ return self._run_impl()
- async def run_stream(
- self,
- messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
- *,
- thread: AgentThread | None = None,
- **kwargs: Any,
- ) -> AsyncIterable[AgentResponseUpdate]:
- self.call_count += 1
+ async def _run_impl(self) -> AgentResponse:
+ return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="done")])
+
+ async def _run_stream_impl(self) -> AsyncIterable[AgentResponseUpdate]:
# First: text
yield AgentResponseUpdate(
contents=[Content.from_text(text="Let me search for that...")],
- role="assistant",
+ role=Role.ASSISTANT,
)
# Second: tool call
yield AgentResponseUpdate(
@@ -224,7 +249,7 @@ async def run_stream(
arguments={"query": "weather"},
)
],
- role="assistant",
+ role=Role.ASSISTANT,
)
# Third: tool result
yield AgentResponseUpdate(
@@ -234,12 +259,12 @@ async def run_stream(
result={"temperature": 72, "condition": "sunny"},
)
],
- role="tool",
+ role=Role.TOOL,
)
# Fourth: final text
yield AgentResponseUpdate(
contents=[Content.from_text(text="The weather is sunny, 72°F.")],
- role="assistant",
+ role=Role.ASSISTANT,
)
@@ -272,7 +297,7 @@ def create_mock_chat_client() -> MockChatClient:
def create_mock_base_chat_client() -> MockBaseChatClient:
- """Create a mock BaseChatClient."""
+ """Create a mock chat client with all layers (middleware, telemetry, function invocation)."""
return MockBaseChatClient()
@@ -292,7 +317,7 @@ def create_mock_tool_agent(id: str = "tool_agent", name: str = "ToolAgent") -> M
def create_agent_run_response(text: str = "Test response") -> AgentResponse:
"""Create an AgentResponse with the given text."""
- return AgentResponse(messages=[ChatMessage("assistant", [Content.from_text(text=text)])])
+ return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=text)])])
def create_agent_executor_response(
@@ -305,8 +330,8 @@ def create_agent_executor_response(
executor_id=executor_id,
agent_response=agent_response,
full_conversation=[
- ChatMessage("user", [Content.from_text(text="User input")]),
- ChatMessage("assistant", [Content.from_text(text=response_text)]),
+ ChatMessage(role=Role.USER, contents=[Content.from_text(text="User input")]),
+ ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=response_text)]),
],
)
@@ -388,8 +413,8 @@ async def create_sequential_workflow() -> tuple[AgentFrameworkExecutor, str, Moc
"""
mock_client = MockBaseChatClient()
mock_client.run_responses = [
- ChatResponse(messages=ChatMessage("assistant", ["Here's the draft content about the topic."])),
- ChatResponse(messages=ChatMessage("assistant", ["Review: Content is clear and well-structured."])),
+ ChatResponse(messages=ChatMessage(role=Role.ASSISTANT, text="Here's the draft content about the topic.")),
+ ChatResponse(messages=ChatMessage(role=Role.ASSISTANT, text="Review: Content is clear and well-structured.")),
]
writer = ChatAgent(
@@ -431,9 +456,9 @@ async def create_concurrent_workflow() -> tuple[AgentFrameworkExecutor, str, Moc
"""
mock_client = MockBaseChatClient()
mock_client.run_responses = [
- ChatResponse(messages=ChatMessage("assistant", ["Research findings: Key data points identified."])),
- ChatResponse(messages=ChatMessage("assistant", ["Analysis: Trends indicate positive growth."])),
- ChatResponse(messages=ChatMessage("assistant", ["Summary: Overall outlook is favorable."])),
+ ChatResponse(messages=ChatMessage(role=Role.ASSISTANT, text="Research findings: Key data points identified.")),
+ ChatResponse(messages=ChatMessage(role=Role.ASSISTANT, text="Analysis: Trends indicate positive growth.")),
+ ChatResponse(messages=ChatMessage(role=Role.ASSISTANT, text="Summary: Overall outlook is favorable.")),
]
researcher = ChatAgent(
diff --git a/python/packages/devui/tests/test_mapper.py b/python/packages/devui/tests/test_mapper.py
index 70bf44b773..9a80707916 100644
--- a/python/packages/devui/tests/test_mapper.py
+++ b/python/packages/devui/tests/test_mapper.py
@@ -602,8 +602,8 @@ async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_
# Sequential/Concurrent workflows often output list[ChatMessage]
messages = [
- ChatMessage("user", [Content.from_text(text="Hello")]),
- ChatMessage("assistant", [Content.from_text(text="World")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="Hello")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="World")]),
]
event = WorkflowOutputEvent(data=messages, executor_id="complete")
events = await mapper.convert_event(event, test_request)
diff --git a/python/packages/devui/tests/test_multimodal_workflow.py b/python/packages/devui/tests/test_multimodal_workflow.py
index dbd4c4dfae..7defb7254e 100644
--- a/python/packages/devui/tests/test_multimodal_workflow.py
+++ b/python/packages/devui/tests/test_multimodal_workflow.py
@@ -72,7 +72,7 @@ def test_convert_openai_input_to_chat_message_with_image(self):
# Verify result is ChatMessage
assert isinstance(result, ChatMessage), f"Expected ChatMessage, got {type(result)}"
- assert result.role == "user"
+ assert result.role.value == "user"
# Verify contents
assert len(result.contents) == 2, f"Expected 2 contents, got {len(result.contents)}"
@@ -86,9 +86,8 @@ def test_convert_openai_input_to_chat_message_with_image(self):
assert result.contents[1].media_type == "image/png"
assert result.contents[1].uri == TEST_IMAGE_DATA_URI
- def test_parse_workflow_input_handles_json_string_with_multimodal(self):
+ async def test_parse_workflow_input_handles_json_string_with_multimodal(self):
"""Test that _parse_workflow_input correctly handles JSON string with multimodal content."""
- import asyncio
from agent_framework import ChatMessage
@@ -113,7 +112,7 @@ def test_parse_workflow_input_handles_json_string_with_multimodal(self):
mock_workflow = MagicMock()
# Parse the input
- result = asyncio.run(executor._parse_workflow_input(mock_workflow, json_string_input))
+ result = await executor._parse_workflow_input(mock_workflow, json_string_input)
# Verify result is ChatMessage with multimodal content
assert isinstance(result, ChatMessage), f"Expected ChatMessage, got {type(result)}"
@@ -127,9 +126,8 @@ def test_parse_workflow_input_handles_json_string_with_multimodal(self):
assert result.contents[1].type == "data"
assert result.contents[1].media_type == "image/png"
- def test_parse_workflow_input_still_handles_simple_dict(self):
+ async def test_parse_workflow_input_still_handles_simple_dict(self):
"""Test that simple dict input still works (backward compatibility)."""
- import asyncio
from agent_framework import ChatMessage
@@ -148,7 +146,7 @@ def test_parse_workflow_input_still_handles_simple_dict(self):
mock_workflow.get_start_executor.return_value = mock_executor
# Parse the input
- result = asyncio.run(executor._parse_workflow_input(mock_workflow, json_string_input))
+ result = await executor._parse_workflow_input(mock_workflow, json_string_input)
# Result should be ChatMessage (from _parse_structured_workflow_input)
assert isinstance(result, ChatMessage), f"Expected ChatMessage, got {type(result)}"
diff --git a/python/packages/devui/tests/test_server.py b/python/packages/devui/tests/test_server.py
index ac835bdfb5..907a6de890 100644
--- a/python/packages/devui/tests/test_server.py
+++ b/python/packages/devui/tests/test_server.py
@@ -159,6 +159,7 @@ async def test_credential_cleanup() -> None:
mock_client = Mock()
mock_client.async_credential = mock_credential
mock_client.model_id = "test-model"
+ mock_client.function_invocation_configuration = None
# Create agent with mock client
agent = ChatAgent(name="TestAgent", chat_client=mock_client, instructions="Test agent")
@@ -191,6 +192,7 @@ async def test_credential_cleanup_error_handling() -> None:
mock_client = Mock()
mock_client.async_credential = mock_credential
mock_client.model_id = "test-model"
+ mock_client.function_invocation_configuration = None
# Create agent with mock client
agent = ChatAgent(name="TestAgent", chat_client=mock_client, instructions="Test agent")
@@ -225,6 +227,7 @@ async def test_multiple_credential_attributes() -> None:
mock_client.credential = mock_cred1
mock_client.async_credential = mock_cred2
mock_client.model_id = "test-model"
+ mock_client.function_invocation_configuration = None
# Create agent with mock client
agent = ChatAgent(name="TestAgent", chat_client=mock_client, instructions="Test agent")
@@ -346,7 +349,7 @@ class WeatherAgent:
name = "Weather Agent"
description = "Gets weather information"
- def run_stream(self, input_str):
+ def run(self, input_str, *, stream: bool = False, thread=None, **kwargs):
return f"Weather in {input_str} is sunny"
""")
diff --git a/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py b/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py
index aabfa4bf08..af4e369a7b 100644
--- a/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py
+++ b/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py
@@ -817,7 +817,7 @@ def from_chat_message(chat_message: ChatMessage) -> DurableAgentStateMessage:
]
return DurableAgentStateMessage(
- role=chat_message.role,
+ role=chat_message.role.value if hasattr(chat_message.role, "value") else str(chat_message.role),
contents=contents_list,
author_name=chat_message.author_name,
extension_data=dict(chat_message.additional_properties) if chat_message.additional_properties else None,
diff --git a/python/packages/durabletask/agent_framework_durabletask/_entities.py b/python/packages/durabletask/agent_framework_durabletask/_entities.py
index c842d58fe7..ad54888410 100644
--- a/python/packages/durabletask/agent_framework_durabletask/_entities.py
+++ b/python/packages/durabletask/agent_framework_durabletask/_entities.py
@@ -6,6 +6,7 @@
import inspect
from collections.abc import AsyncIterable
+from datetime import datetime, timezone
from typing import Any, cast
from agent_framework import (
@@ -177,7 +178,10 @@ async def run(
error_message = ChatMessage(
role="assistant", contents=[Content.from_error(message=str(exc), error_code=type(exc).__name__)]
)
- error_response = AgentResponse(messages=[error_message])
+ error_response = AgentResponse(
+ messages=[error_message],
+ created_at=datetime.now(tz=timezone.utc).isoformat(),
+ )
error_state_response = DurableAgentStateResponse.from_run_response(correlation_id, error_response)
error_state_response.is_error = True
@@ -202,32 +206,33 @@ async def _invoke_agent(
request_message=request_message,
)
- run_stream_callable = getattr(self.agent, "run_stream", None)
- if callable(run_stream_callable):
- try:
- stream_candidate = run_stream_callable(**run_kwargs)
- if inspect.isawaitable(stream_candidate):
- stream_candidate = await stream_candidate
-
- return await self._consume_stream(
- stream=cast(AsyncIterable[AgentResponseUpdate], stream_candidate),
- callback_context=callback_context,
- )
- except TypeError as type_error:
- if "__aiter__" not in str(type_error):
- raise
- logger.debug(
- "run_stream returned a non-async result; falling back to run(): %s",
- type_error,
- )
- except Exception as stream_error:
- logger.warning(
- "run_stream failed; falling back to run(): %s",
- stream_error,
- exc_info=True,
- )
- else:
- logger.debug("Agent does not expose run_stream; falling back to run().")
+ run_callable = getattr(self.agent, "run", None)
+ if run_callable is None or not callable(run_callable):
+ raise AttributeError("Agent does not implement run() method")
+
+ # Try streaming first with run(stream=True)
+ try:
+ stream_candidate = run_callable(stream=True, **run_kwargs)
+ if inspect.isawaitable(stream_candidate):
+ stream_candidate = await stream_candidate
+
+ return await self._consume_stream(
+ stream=cast(AsyncIterable[AgentResponseUpdate], stream_candidate),
+ callback_context=callback_context,
+ )
+ except TypeError as type_error:
+ if "__aiter__" not in str(type_error) and "stream" not in str(type_error):
+ raise
+ logger.debug(
+ "run(stream=True) returned a non-async result; falling back to run(): %s",
+ type_error,
+ )
+ except Exception as stream_error:
+ logger.warning(
+ "run(stream=True) failed; falling back to run(): %s",
+ stream_error,
+ exc_info=True,
+ )
agent_run_response = await self._invoke_non_stream(run_kwargs)
await self._notify_final_response(agent_run_response, callback_context)
@@ -246,7 +251,7 @@ async def _consume_stream(
await self._notify_stream_update(update, callback_context)
if updates:
- response = AgentResponse.from_updates(updates)
+ response = AgentResponse.from_agent_run_response_updates(updates)
else:
logger.debug("[AgentEntity] No streaming updates received; creating empty response")
response = AgentResponse(messages=[])
diff --git a/python/packages/durabletask/agent_framework_durabletask/_shim.py b/python/packages/durabletask/agent_framework_durabletask/_shim.py
index a624cdc8b5..3291b8bfdc 100644
--- a/python/packages/durabletask/agent_framework_durabletask/_shim.py
+++ b/python/packages/durabletask/agent_framework_durabletask/_shim.py
@@ -10,10 +10,9 @@
from __future__ import annotations
from abc import ABC, abstractmethod
-from collections.abc import AsyncIterator
-from typing import Any, Generic, TypeVar
+from typing import Any, Generic, Literal, TypeVar
-from agent_framework import AgentProtocol, AgentResponseUpdate, AgentThread, ChatMessage
+from agent_framework import AgentProtocol, AgentThread, ChatMessage
from ._executors import DurableAgentExecutor
from ._models import DurableAgentThread
@@ -89,6 +88,7 @@ def run( # type: ignore[override]
self,
messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
*,
+ stream: Literal[False] = False,
thread: AgentThread | None = None,
options: dict[str, Any] | None = None,
) -> TaskT:
@@ -96,6 +96,8 @@ def run( # type: ignore[override]
Args:
messages: The message(s) to send to the agent
+ stream: Whether to use streaming for the response (must be False)
+ DurableAgents do not support streaming mode.
thread: Optional agent thread for conversation context
options: Optional options dictionary. Supported keys include
``response_format``, ``enable_tool_calls``, and ``wait_for_response``.
@@ -115,6 +117,8 @@ def run( # type: ignore[override]
Raises:
ValueError: If wait_for_response=False is used in an unsupported context
"""
+ if stream is not False:
+ raise ValueError("DurableAIAgent does not support streaming mode (stream must be False)")
message_str = self._normalize_messages(messages)
run_request = self._executor.get_run_request(
@@ -128,25 +132,6 @@ def run( # type: ignore[override]
thread=thread,
)
- def run_stream( # type: ignore[override]
- self,
- messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
- *,
- thread: AgentThread | None = None,
- **kwargs: Any,
- ) -> AsyncIterator[AgentResponseUpdate]:
- """Run the agent with streaming (not supported for durable agents).
-
- Args:
- messages: The message(s) to send to the agent
- thread: Optional agent thread for conversation context
- **kwargs: Additional arguments
-
- Raises:
- NotImplementedError: Streaming is not supported for durable agents
- """
- raise NotImplementedError("Streaming is not supported for durable agents")
-
def get_new_thread(self, **kwargs: Any) -> DurableAgentThread:
"""Create a new agent thread via the provider."""
return self._executor.get_new_thread(self.name, **kwargs)
diff --git a/python/packages/durabletask/tests/test_durable_entities.py b/python/packages/durabletask/tests/test_durable_entities.py
index acebcd8492..2ffd0aa370 100644
--- a/python/packages/durabletask/tests/test_durable_entities.py
+++ b/python/packages/durabletask/tests/test_durable_entities.py
@@ -81,8 +81,27 @@ def _role_value(chat_message: DurableAgentStateMessage) -> str:
def _agent_response(text: str | None) -> AgentResponse:
"""Create an AgentResponse with a single assistant message."""
- message = ChatMessage("assistant", [text]) if text is not None else ChatMessage("assistant", [])
- return AgentResponse(messages=[message])
+ message = ChatMessage(role="assistant", text=text) if text is not None else ChatMessage(role="assistant", text="")
+ return AgentResponse(messages=[message], created_at="2024-01-01T00:00:00Z")
+
+
+def _create_mock_run(response: AgentResponse | None = None, side_effect: Exception | None = None):
+ """Create a mock run function that handles stream parameter correctly.
+
+ The durabletask entity code tries run(stream=True) first, then falls back to run(stream=False).
+ This helper creates a mock that raises TypeError for streaming (to trigger fallback) and
+ returns the response or raises the side_effect for non-streaming.
+ """
+
+ async def mock_run(*args, stream=False, **kwargs):
+ if stream:
+ # Simulate "streaming not supported" to trigger fallback
+ raise TypeError("streaming not supported")
+ if side_effect:
+ raise side_effect
+ return response
+
+ return mock_run
class RecordingCallback:
@@ -194,7 +213,14 @@ async def test_run_executes_agent(self) -> None:
"""Test that run executes the agent."""
mock_agent = Mock()
mock_response = _agent_response("Test response")
- mock_agent.run = AsyncMock(return_value=mock_response)
+
+ # Mock run() to return response for non-streaming, raise for streaming (to test fallback)
+ async def mock_run(*args, stream=False, **kwargs):
+ if stream:
+ raise TypeError("streaming not supported")
+ return mock_response
+
+ mock_agent.run = mock_run
entity = _make_entity(mock_agent)
@@ -203,22 +229,12 @@ async def test_run_executes_agent(self) -> None:
"correlationId": "corr-entity-1",
})
- # Verify agent.run was called
- mock_agent.run.assert_called_once()
- _, kwargs = mock_agent.run.call_args
- sent_messages: list[Any] = kwargs.get("messages")
- assert len(sent_messages) == 1
- sent_message = sent_messages[0]
- assert isinstance(sent_message, ChatMessage)
- assert getattr(sent_message, "text", None) == "Test message"
- assert getattr(sent_message.role, "value", sent_message.role) == "user"
-
# Verify result
assert isinstance(result, AgentResponse)
assert result.text == "Test response"
async def test_run_agent_streaming_callbacks_invoked(self) -> None:
- """Ensure streaming updates trigger callbacks and run() is not used."""
+ """Ensure streaming updates trigger callbacks when using run(stream=True)."""
updates = [
AgentResponseUpdate(contents=[Content.from_text(text="Hello")]),
AgentResponseUpdate(contents=[Content.from_text(text=" world")]),
@@ -230,8 +246,14 @@ async def update_generator() -> AsyncIterator[AgentResponseUpdate]:
mock_agent = Mock()
mock_agent.name = "StreamingAgent"
- mock_agent.run_stream = Mock(return_value=update_generator())
- mock_agent.run = AsyncMock(side_effect=AssertionError("run() should not be called when streaming succeeds"))
+
+ # Mock run() to return async generator when stream=True
+ def mock_run(*args, stream=False, **kwargs):
+ if stream:
+ return update_generator()
+ raise AssertionError("run(stream=False) should not be called when streaming succeeds")
+
+ mock_agent.run = mock_run
callback = RecordingCallback()
entity = _make_entity(mock_agent, callback=callback, thread_id="session-1")
@@ -247,7 +269,6 @@ async def update_generator() -> AsyncIterator[AgentResponseUpdate]:
assert "Hello" in result.text
assert callback.stream_mock.await_count == len(updates)
assert callback.response_mock.await_count == 1
- mock_agent.run.assert_not_called()
# Validate callback arguments
stream_calls = callback.stream_mock.await_args_list
@@ -272,9 +293,8 @@ async def test_run_agent_final_callback_without_streaming(self) -> None:
"""Ensure the final callback fires even when streaming is unavailable."""
mock_agent = Mock()
mock_agent.name = "NonStreamingAgent"
- mock_agent.run_stream = None
agent_response = _agent_response("Final response")
- mock_agent.run = AsyncMock(return_value=agent_response)
+ mock_agent.run = _create_mock_run(response=agent_response)
callback = RecordingCallback()
entity = _make_entity(mock_agent, callback=callback, thread_id="session-2")
@@ -304,7 +324,7 @@ async def test_run_agent_updates_conversation_history(self) -> None:
"""Test that run_agent updates the conversation history."""
mock_agent = Mock()
mock_response = _agent_response("Agent response")
- mock_agent.run = AsyncMock(return_value=mock_response)
+ mock_agent.run = _create_mock_run(response=mock_response)
entity = _make_entity(mock_agent)
@@ -327,7 +347,7 @@ async def test_run_agent_updates_conversation_history(self) -> None:
async def test_run_agent_increments_message_count(self) -> None:
"""Test that run_agent increments the message count."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -345,7 +365,7 @@ async def test_run_agent_increments_message_count(self) -> None:
async def test_run_requires_entity_thread_id(self) -> None:
"""Test that AgentEntity.run rejects missing entity thread identifiers."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent, thread_id="")
@@ -355,7 +375,7 @@ async def test_run_requires_entity_thread_id(self) -> None:
async def test_run_agent_multiple_conversations(self) -> None:
"""Test that run_agent maintains history across multiple messages."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -419,7 +439,7 @@ def test_reset_clears_message_count(self) -> None:
async def test_reset_after_conversation(self) -> None:
"""Test reset after a full conversation."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -445,7 +465,7 @@ class TestErrorHandling:
async def test_run_agent_handles_agent_exception(self) -> None:
"""Test that run_agent handles agent exceptions."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(side_effect=Exception("Agent failed"))
+ mock_agent.run = _create_mock_run(side_effect=Exception("Agent failed"))
entity = _make_entity(mock_agent)
@@ -461,7 +481,7 @@ async def test_run_agent_handles_agent_exception(self) -> None:
async def test_run_agent_handles_value_error(self) -> None:
"""Test that run_agent handles ValueError instances."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(side_effect=ValueError("Invalid input"))
+ mock_agent.run = _create_mock_run(side_effect=ValueError("Invalid input"))
entity = _make_entity(mock_agent)
@@ -477,7 +497,7 @@ async def test_run_agent_handles_value_error(self) -> None:
async def test_run_agent_handles_timeout_error(self) -> None:
"""Test that run_agent handles TimeoutError instances."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(side_effect=TimeoutError("Request timeout"))
+ mock_agent.run = _create_mock_run(side_effect=TimeoutError("Request timeout"))
entity = _make_entity(mock_agent)
@@ -492,7 +512,7 @@ async def test_run_agent_handles_timeout_error(self) -> None:
async def test_run_agent_preserves_message_on_error(self) -> None:
"""Test that run_agent preserves message information on error."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(side_effect=Exception("Error"))
+ mock_agent.run = _create_mock_run(side_effect=Exception("Error"))
entity = _make_entity(mock_agent)
@@ -513,7 +533,7 @@ class TestConversationHistory:
async def test_conversation_history_has_timestamps(self) -> None:
"""Test that conversation history entries include timestamps."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -533,17 +553,17 @@ async def test_conversation_history_ordering(self) -> None:
entity = _make_entity(mock_agent)
# Send multiple messages with different responses
- mock_agent.run = AsyncMock(return_value=_agent_response("Response 1"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response 1"))
await entity.run(
{"message": "Message 1", "correlationId": "corr-entity-history-2a"},
)
- mock_agent.run = AsyncMock(return_value=_agent_response("Response 2"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response 2"))
await entity.run(
{"message": "Message 2", "correlationId": "corr-entity-history-2b"},
)
- mock_agent.run = AsyncMock(return_value=_agent_response("Response 3"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response 3"))
await entity.run(
{"message": "Message 3", "correlationId": "corr-entity-history-2c"},
)
@@ -561,7 +581,7 @@ async def test_conversation_history_ordering(self) -> None:
async def test_conversation_history_role_alternation(self) -> None:
"""Test that conversation history alternates between user and assistant roles."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -587,7 +607,7 @@ class TestRunRequestSupport:
async def test_run_agent_with_run_request_object(self) -> None:
"""Test run_agent with a RunRequest object."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -606,7 +626,7 @@ async def test_run_agent_with_run_request_object(self) -> None:
async def test_run_agent_with_dict_request(self) -> None:
"""Test run_agent with a dictionary request."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -625,7 +645,7 @@ async def test_run_agent_with_dict_request(self) -> None:
async def test_run_agent_with_string_raises_without_correlation(self) -> None:
"""Test that run_agent rejects legacy string input without correlation ID."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -635,7 +655,7 @@ async def test_run_agent_with_string_raises_without_correlation(self) -> None:
async def test_run_agent_stores_role_in_history(self) -> None:
"""Test that run_agent stores the role in conversation history."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -657,7 +677,7 @@ async def test_run_agent_with_response_format(self) -> None:
"""Test run_agent with a JSON response format."""
mock_agent = Mock()
# Return JSON response
- mock_agent.run = AsyncMock(return_value=_agent_response('{"answer": 42}'))
+ mock_agent.run = _create_mock_run(response=_agent_response('{"answer": 42}'))
entity = _make_entity(mock_agent)
@@ -676,7 +696,7 @@ async def test_run_agent_with_response_format(self) -> None:
async def test_run_agent_disable_tool_calls(self) -> None:
"""Test run_agent with tool calls disabled."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(return_value=_agent_response("Response"))
+ mock_agent.run = _create_mock_run(response=_agent_response("Response"))
entity = _make_entity(mock_agent)
@@ -686,7 +706,7 @@ async def test_run_agent_disable_tool_calls(self) -> None:
assert isinstance(result, AgentResponse)
# Agent should have been called (tool disabling is framework-dependent)
- mock_agent.run.assert_called_once()
+ assert result.text == "Response"
if __name__ == "__main__":
diff --git a/python/packages/durabletask/tests/test_executors.py b/python/packages/durabletask/tests/test_executors.py
index 802007541f..745b8e0ca4 100644
--- a/python/packages/durabletask/tests/test_executors.py
+++ b/python/packages/durabletask/tests/test_executors.py
@@ -241,7 +241,7 @@ def test_fire_and_forget_returns_empty_response(self, mock_client: Mock) -> None
# Verify it contains an acceptance message
assert isinstance(result, AgentResponse)
assert len(result.messages) == 1
- assert result.messages[0].role == "system"
+ assert result.messages[0].role.value == "system"
# Check message contains key information
message_text = result.messages[0].text
assert "accepted" in message_text.lower()
@@ -294,7 +294,7 @@ def test_orchestration_fire_and_forget_returns_acceptance_response(self, mock_or
response = result.get_result()
assert isinstance(response, AgentResponse)
assert len(response.messages) == 1
- assert response.messages[0].role == "system"
+ assert response.messages[0].role.value == "system"
assert "test-789" in response.messages[0].text
def test_orchestration_blocking_mode_calls_call_entity(self, mock_orchestration_context: Mock) -> None:
@@ -392,7 +392,7 @@ def test_durable_agent_task_transforms_successful_result(
result = task.get_result()
assert isinstance(result, AgentResponse)
assert len(result.messages) == 1
- assert result.messages[0].role == "assistant"
+ assert result.messages[0].role.value == "assistant"
def test_durable_agent_task_propagates_failure(self, configure_failed_entity_task: Any) -> None:
"""Verify DurableAgentTask propagates task failures."""
@@ -519,8 +519,8 @@ def test_durable_agent_task_handles_multiple_messages(self, configure_successful
result = task.get_result()
assert isinstance(result, AgentResponse)
assert len(result.messages) == 2
- assert result.messages[0].role == "assistant"
- assert result.messages[1].role == "assistant"
+ assert result.messages[0].role.value == "assistant"
+ assert result.messages[1].role.value == "assistant"
def test_durable_agent_task_is_not_complete_initially(self, mock_entity_task: Mock) -> None:
"""Verify DurableAgentTask is not complete when first created."""
diff --git a/python/packages/durabletask/tests/test_shim.py b/python/packages/durabletask/tests/test_shim.py
index d1b0cf2cab..26988edca4 100644
--- a/python/packages/durabletask/tests/test_shim.py
+++ b/python/packages/durabletask/tests/test_shim.py
@@ -77,7 +77,7 @@ def test_run_accepts_string_message(self, test_agent: DurableAIAgent[Any], mock_
def test_run_accepts_chat_message(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None:
"""Verify run accepts and normalizes ChatMessage objects."""
- chat_msg = ChatMessage("user", ["Test message"])
+ chat_msg = ChatMessage(role="user", text="Test message")
test_agent.run(chat_msg)
mock_executor.run_durable_agent.assert_called_once()
@@ -95,8 +95,8 @@ def test_run_accepts_list_of_strings(self, test_agent: DurableAIAgent[Any], mock
def test_run_accepts_list_of_chat_messages(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None:
"""Verify run accepts and joins list of ChatMessage objects."""
messages = [
- ChatMessage("user", ["Message 1"]),
- ChatMessage("assistant", ["Message 2"]),
+ ChatMessage(role="user", text="Message 1"),
+ ChatMessage(role="assistant", text="Message 2"),
]
test_agent.run(messages)
diff --git a/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py b/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py
index 380bd64f7b..0ee6ce4ab0 100644
--- a/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py
+++ b/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py
@@ -1,13 +1,22 @@
# Copyright (c) Microsoft. All rights reserved.
+from __future__ import annotations
+
import sys
+from collections.abc import Sequence
from typing import Any, ClassVar, Generic
-from agent_framework import ChatOptions, use_chat_middleware, use_function_invocation
+from agent_framework import (
+ ChatAndFunctionMiddlewareTypes,
+ ChatMiddlewareLayer,
+ ChatOptions,
+ FunctionInvocationConfiguration,
+ FunctionInvocationLayer,
+)
from agent_framework._pydantic import AFBaseSettings
from agent_framework.exceptions import ServiceInitializationError
-from agent_framework.observability import use_instrumentation
-from agent_framework.openai._chat_client import OpenAIBaseChatClient
+from agent_framework.observability import ChatTelemetryLayer
+from agent_framework.openai._chat_client import RawOpenAIChatClient
from foundry_local import FoundryLocalManager
from foundry_local.models import DeviceType
from openai import AsyncOpenAI
@@ -22,6 +31,7 @@
else:
from typing_extensions import TypedDict # type: ignore # pragma: no cover
+
__all__ = [
"FoundryLocalChatOptions",
"FoundryLocalClient",
@@ -126,11 +136,14 @@ class FoundryLocalSettings(AFBaseSettings):
model_id: str
-@use_function_invocation
-@use_instrumentation
-@use_chat_middleware
-class FoundryLocalClient(OpenAIBaseChatClient[TFoundryLocalChatOptions], Generic[TFoundryLocalChatOptions]):
- """Foundry Local Chat completion class."""
+class FoundryLocalClient(
+ ChatMiddlewareLayer[TFoundryLocalChatOptions],
+ FunctionInvocationLayer[TFoundryLocalChatOptions],
+ ChatTelemetryLayer[TFoundryLocalChatOptions],
+ RawOpenAIChatClient[TFoundryLocalChatOptions],
+ Generic[TFoundryLocalChatOptions],
+):
+ """Foundry Local Chat completion class with middleware, telemetry, and function invocation support."""
def __init__(
self,
@@ -140,6 +153,8 @@ def __init__(
timeout: float | None = None,
prepare_model: bool = True,
device: DeviceType | None = None,
+ middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None,
+ function_invocation_configuration: FunctionInvocationConfiguration | None = None,
env_file_path: str | None = None,
env_file_encoding: str = "utf-8",
**kwargs: Any,
@@ -161,9 +176,11 @@ def __init__(
The device is used to select the appropriate model variant.
If not provided, the default device for your system will be used.
The values are in the foundry_local.models.DeviceType enum.
+ middleware: Optional sequence of ChatAndFunctionMiddlewareTypes to apply to requests.
+ function_invocation_configuration: Optional configuration for function invocation support.
env_file_path: If provided, the .env settings are read from this file path location.
env_file_encoding: The encoding of the .env file, defaults to 'utf-8'.
- kwargs: Additional keyword arguments, are passed to the OpenAIBaseChatClient.
+ kwargs: Additional keyword arguments, are passed to the RawOpenAIChatClient.
This can include middleware and additional properties.
Examples:
@@ -254,6 +271,8 @@ class MyOptions(FoundryLocalChatOptions, total=False):
super().__init__(
model_id=model_info.id,
client=AsyncOpenAI(base_url=manager.endpoint, api_key=manager.api_key),
+ middleware=middleware,
+ function_invocation_configuration=function_invocation_configuration,
**kwargs,
)
self.manager = manager
diff --git a/python/packages/foundry_local/samples/foundry_local_agent.py b/python/packages/foundry_local/samples/foundry_local_agent.py
index 4bb704ec59..6d4705f8cb 100644
--- a/python/packages/foundry_local/samples/foundry_local_agent.py
+++ b/python/packages/foundry_local/samples/foundry_local_agent.py
@@ -48,7 +48,7 @@ async def streaming_example(agent: "ChatAgent") -> None:
query = "What's the weather like in Amsterdam?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/packages/github_copilot/agent_framework_github_copilot/_agent.py b/python/packages/github_copilot/agent_framework_github_copilot/_agent.py
index 778a340039..ee0e6aa490 100644
--- a/python/packages/github_copilot/agent_framework_github_copilot/_agent.py
+++ b/python/packages/github_copilot/agent_framework_github_copilot/_agent.py
@@ -4,8 +4,8 @@
import contextlib
import logging
import sys
-from collections.abc import AsyncIterable, Callable, MutableMapping, Sequence
-from typing import Any, ClassVar, Generic, TypedDict
+from collections.abc import AsyncIterable, Awaitable, Callable, MutableMapping, Sequence
+from typing import Any, ClassVar, Generic, Literal, TypedDict, overload
from agent_framework import (
AgentMiddlewareTypes,
@@ -16,6 +16,8 @@
ChatMessage,
Content,
ContextProvider,
+ ResponseStream,
+ Role,
normalize_messages,
)
from agent_framework._tools import FunctionTool, ToolProtocol
@@ -272,34 +274,79 @@ async def stop(self) -> None:
self._started = False
- async def run(
+ @overload
+ def run(
self,
messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
*,
+ stream: Literal[False] = False,
thread: AgentThread | None = None,
options: TOptions | None = None,
**kwargs: Any,
- ) -> AgentResponse:
+ ) -> Awaitable[AgentResponse]: ...
+
+ @overload
+ def run(
+ self,
+ messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ *,
+ stream: Literal[True],
+ thread: AgentThread | None = None,
+ options: TOptions | None = None,
+ **kwargs: Any,
+ ) -> ResponseStream[AgentResponseUpdate, AgentResponse]: ...
+
+ def run(
+ self,
+ messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ *,
+ stream: bool = False,
+ thread: AgentThread | None = None,
+ options: TOptions | None = None,
+ **kwargs: Any,
+ ) -> Awaitable[AgentResponse] | ResponseStream[AgentResponseUpdate, AgentResponse]:
"""Get a response from the agent.
This method returns the final result of the agent's execution
- as a single AgentResponse object. The caller is blocked until
- the final result is available.
+ as a single AgentResponse object when stream=False. When stream=True,
+ it returns a ResponseStream that yields AgentResponseUpdate objects.
Args:
messages: The message(s) to send to the agent.
Keyword Args:
+ stream: Whether to stream the response. Defaults to False.
thread: The conversation thread associated with the message(s).
options: Runtime options (model, timeout, etc.).
kwargs: Additional keyword arguments.
Returns:
- An agent response item.
+ When stream=False: An Awaitable[AgentResponse].
+ When stream=True: A ResponseStream of AgentResponseUpdate items.
Raises:
ServiceException: If the request fails.
"""
+ if stream:
+
+ def _finalize(updates: Sequence[AgentResponseUpdate]) -> AgentResponse:
+ return AgentResponse.from_agent_run_response_updates(updates)
+
+ return ResponseStream(
+ self._stream_updates(messages=messages, thread=thread, options=options, **kwargs),
+ finalizer=_finalize,
+ )
+ return self._run_impl(messages=messages, thread=thread, options=options, **kwargs)
+
+ async def _run_impl(
+ self,
+ messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ *,
+ thread: AgentThread | None = None,
+ options: TOptions | None = None,
+ **kwargs: Any,
+ ) -> AgentResponse:
+ """Non-streaming implementation of run."""
if not self._started:
await self.start()
@@ -329,7 +376,7 @@ async def run(
if response_event.data.content:
response_messages.append(
ChatMessage(
- role="assistant",
+ role=Role.ASSISTANT,
contents=[Content.from_text(response_event.data.content)],
message_id=message_id,
raw_representation=response_event,
@@ -339,7 +386,7 @@ async def run(
return AgentResponse(messages=response_messages, response_id=response_id)
- async def run_stream(
+ async def _stream_updates(
self,
messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
*,
@@ -347,10 +394,7 @@ async def run_stream(
options: TOptions | None = None,
**kwargs: Any,
) -> AsyncIterable[AgentResponseUpdate]:
- """Run the agent as a stream.
-
- This method will return the intermediate steps and final results of the
- agent's execution as a stream of AgentResponseUpdate objects to the caller.
+ """Internal method to stream updates from GitHub Copilot.
Args:
messages: The message(s) to send to the agent.
@@ -361,7 +405,7 @@ async def run_stream(
kwargs: Additional keyword arguments.
Yields:
- An agent response update for each delta.
+ AgentResponseUpdate items.
Raises:
ServiceException: If the request fails.
@@ -384,7 +428,7 @@ def event_handler(event: SessionEvent) -> None:
if event.type == SessionEventType.ASSISTANT_MESSAGE_DELTA:
if event.data.delta_content:
update = AgentResponseUpdate(
- role="assistant",
+ role=Role.ASSISTANT,
contents=[Content.from_text(event.data.delta_content)],
response_id=event.data.message_id,
message_id=event.data.message_id,
diff --git a/python/packages/github_copilot/tests/test_github_copilot_agent.py b/python/packages/github_copilot/tests/test_github_copilot_agent.py
index 37707465cb..e7686d8b72 100644
--- a/python/packages/github_copilot/tests/test_github_copilot_agent.py
+++ b/python/packages/github_copilot/tests/test_github_copilot_agent.py
@@ -281,7 +281,7 @@ async def test_run_string_message(
assert isinstance(response, AgentResponse)
assert len(response.messages) == 1
- assert response.messages[0].role == "assistant"
+ assert response.messages[0].role.value == "assistant"
assert response.messages[0].contents[0].text == "Test response"
async def test_run_chat_message(
@@ -294,7 +294,7 @@ async def test_run_chat_message(
mock_session.send_and_wait.return_value = assistant_message_event
agent = GitHubCopilotAgent(client=mock_client)
- chat_message = ChatMessage("user", [Content.from_text("Hello")])
+ chat_message = ChatMessage(role="user", contents=[Content.from_text("Hello")])
response = await agent.run(chat_message)
assert isinstance(response, AgentResponse)
@@ -362,10 +362,10 @@ async def test_run_auto_starts(
mock_client.start.assert_called_once()
-class TestGitHubCopilotAgentRunStream:
- """Test cases for run_stream method."""
+class TestGitHubCopilotAgentRunStreaming:
+ """Test cases for run(stream=True) method."""
- async def test_run_stream_basic(
+ async def test_run_streaming_basic(
self,
mock_client: MagicMock,
mock_session: MagicMock,
@@ -384,15 +384,15 @@ def mock_on(handler: Any) -> Any:
agent = GitHubCopilotAgent(client=mock_client)
responses: list[AgentResponseUpdate] = []
- async for update in agent.run_stream("Hello"):
+ async for update in agent.run("Hello", stream=True):
responses.append(update)
assert len(responses) == 1
assert isinstance(responses[0], AgentResponseUpdate)
- assert responses[0].role == "assistant"
+ assert responses[0].role.value == "assistant"
assert responses[0].contents[0].text == "Hello"
- async def test_run_stream_with_thread(
+ async def test_run_streaming_with_thread(
self,
mock_client: MagicMock,
mock_session: MagicMock,
@@ -409,12 +409,12 @@ def mock_on(handler: Any) -> Any:
agent = GitHubCopilotAgent(client=mock_client)
thread = AgentThread()
- async for _ in agent.run_stream("Hello", thread=thread):
+ async for _ in agent.run("Hello", thread=thread, stream=True):
pass
assert thread.service_thread_id == mock_session.session_id
- async def test_run_stream_error(
+ async def test_run_streaming_error(
self,
mock_client: MagicMock,
mock_session: MagicMock,
@@ -431,16 +431,16 @@ def mock_on(handler: Any) -> Any:
agent = GitHubCopilotAgent(client=mock_client)
with pytest.raises(ServiceException, match="session error"):
- async for _ in agent.run_stream("Hello"):
+ async for _ in agent.run("Hello", stream=True):
pass
- async def test_run_stream_auto_starts(
+ async def test_run_streaming_auto_starts(
self,
mock_client: MagicMock,
mock_session: MagicMock,
session_idle_event: SessionEvent,
) -> None:
- """Test that run_stream auto-starts the agent if not started."""
+ """Test that run(stream=True) auto-starts the agent if not started."""
def mock_on(handler: Any) -> Any:
handler(session_idle_event)
@@ -451,7 +451,7 @@ def mock_on(handler: Any) -> Any:
agent = GitHubCopilotAgent(client=mock_client)
assert agent._started is False # type: ignore
- async for _ in agent.run_stream("Hello"):
+ async for _ in agent.run("Hello", stream=True):
pass
assert agent._started is True # type: ignore
diff --git a/python/packages/lab/pyproject.toml b/python/packages/lab/pyproject.toml
index 86cee50527..22eb969bd1 100644
--- a/python/packages/lab/pyproject.toml
+++ b/python/packages/lab/pyproject.toml
@@ -60,12 +60,6 @@ dev = [
"pre-commit >= 3.7",
"ruff>=0.11.8",
"pytest>=8.4.1",
- "pytest-asyncio>=1.0.0",
- "pytest-cov>=6.2.1",
- "pytest-env>=1.1.5",
- "pytest-xdist[psutil]>=3.8.0",
- "pytest-timeout>=2.3.1",
- "pytest-retry>=1",
"mypy>=1.16.1",
"pyright>=1.1.402",
#tasks
diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py b/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py
index 4fd5e21fb7..dccf6e2882 100644
--- a/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py
+++ b/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py
@@ -1,9 +1,16 @@
# Copyright (c) Microsoft. All rights reserved.
+from typing import Any
+
from agent_framework._types import ChatMessage, Content
from loguru import logger
+def _get_role_value(role: Any) -> str:
+ """Get the string value of a role, handling both enum and string."""
+ return role.value if hasattr(role, "value") else str(role)
+
+
def flip_messages(messages: list[ChatMessage]) -> list[ChatMessage]:
"""Flip message roles between assistant and user for role-playing scenarios.
@@ -18,7 +25,8 @@ def filter_out_function_calls(messages: list[Content]) -> list[Content]:
flipped_messages = []
for msg in messages:
- if msg.role == "assistant":
+ role_value = _get_role_value(msg.role)
+ if role_value == "assistant":
# Flip assistant to user
contents = filter_out_function_calls(msg.contents)
if contents:
@@ -30,13 +38,13 @@ def filter_out_function_calls(messages: list[Content]) -> list[Content]:
message_id=msg.message_id,
)
flipped_messages.append(flipped_msg)
- elif msg.role == "user":
+ elif role_value == "user":
# Flip user to assistant
flipped_msg = ChatMessage(
role="assistant", contents=msg.contents, author_name=msg.author_name, message_id=msg.message_id
)
flipped_messages.append(flipped_msg)
- elif msg.role == "tool":
+ elif role_value == "tool":
# Skip tool messages
pass
else:
@@ -53,22 +61,23 @@ def log_messages(messages: list[ChatMessage]) -> None:
"""
logger_ = logger.opt(colors=True)
for msg in messages:
+ role_value = _get_role_value(msg.role)
# Handle different content types
if hasattr(msg, "contents") and msg.contents:
for content in msg.contents:
if hasattr(content, "type"):
if content.type == "text":
escape_text = content.text.replace("<", r"\<") # type: ignore[union-attr]
- if msg.role == "system":
+ if role_value == "system":
logger_.info(f"[SYSTEM] {escape_text}")
- elif msg.role == "user":
+ elif role_value == "user":
logger_.info(f"[USER] {escape_text}")
- elif msg.role == "assistant":
+ elif role_value == "assistant":
logger_.info(f"[ASSISTANT] {escape_text}")
- elif msg.role == "tool":
+ elif role_value == "tool":
logger_.info(f"[TOOL] {escape_text}")
else:
- logger_.info(f"[{msg.role.upper()}] {escape_text}")
+ logger_.info(f"[{role_value.upper()}] {escape_text}")
elif content.type == "function_call":
function_call_text = f"{content.name}({content.arguments})"
function_call_text = function_call_text.replace("<", r"\<")
@@ -79,34 +88,34 @@ def log_messages(messages: list[ChatMessage]) -> None:
logger_.info(f"[TOOL_RESULT] 🔨 {function_result_text}")
else:
content_text = str(content).replace("<", r"\<")
- logger_.info(f"[{msg.role.upper()}] ({content.type}) {content_text}")
+ logger_.info(f"[{role_value.upper()}] ({content.type}) {content_text}")
else:
# Fallback for content without type
text_content = str(content).replace("<", r"\<")
- if msg.role == "system":
+ if role_value == "system":
logger_.info(f"[SYSTEM] {text_content}")
- elif msg.role == "user":
+ elif role_value == "user":
logger_.info(f"[USER] {text_content}")
- elif msg.role == "assistant":
+ elif role_value == "assistant":
logger_.info(f"[ASSISTANT] {text_content}")
- elif msg.role == "tool":
+ elif role_value == "tool":
logger_.info(f"[TOOL] {text_content}")
else:
- logger_.info(f"[{msg.role.upper()}] {text_content}")
+ logger_.info(f"[{role_value.upper()}] {text_content}")
elif hasattr(msg, "text") and msg.text:
# Handle simple text messages
text_content = msg.text.replace("<", r"\<")
- if msg.role == "system":
+ if role_value == "system":
logger_.info(f"[SYSTEM] {text_content}")
- elif msg.role == "user":
+ elif role_value == "user":
logger_.info(f"[USER] {text_content}")
- elif msg.role == "assistant":
+ elif role_value == "assistant":
logger_.info(f"[ASSISTANT] {text_content}")
- elif msg.role == "tool":
+ elif role_value == "tool":
logger_.info(f"[TOOL] {text_content}")
else:
- logger_.info(f"[{msg.role.upper()}] {text_content}")
+ logger_.info(f"[{role_value.upper()}] {text_content}")
else:
# Fallback for other message formats
text_content = str(msg).replace("<", r"\<")
- logger_.info(f"[{msg.role.upper()}] {text_content}")
+ logger_.info(f"[{role_value.upper()}] {text_content}")
diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py b/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py
index cec984272f..03e3b2b3d7 100644
--- a/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py
+++ b/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py
@@ -51,7 +51,14 @@ def truncate_messages(self) -> None:
logger.warning("Messages exceed max tokens. Truncating oldest message.")
self.truncated_messages.pop(0)
# Remove leading tool messages
- while len(self.truncated_messages) > 0 and self.truncated_messages[0].role == "tool":
+ while len(self.truncated_messages) > 0:
+ role_value = (
+ self.truncated_messages[0].role.value
+ if hasattr(self.truncated_messages[0].role, "value")
+ else self.truncated_messages[0].role
+ )
+ if role_value != "tool":
+ break
logger.warning("Removing leading tool message because tool result cannot be the first message.")
self.truncated_messages.pop(0)
diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py
index 0e63f4085e..4822835316 100644
--- a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py
+++ b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py
@@ -338,11 +338,11 @@ async def run(
# Matches tau2's expected conversation start pattern
logger.info(f"Starting workflow with hardcoded greeting: '{DEFAULT_FIRST_AGENT_MESSAGE}'")
- first_message = ChatMessage("assistant", text=DEFAULT_FIRST_AGENT_MESSAGE)
+ first_message = ChatMessage(role="assistant", text=DEFAULT_FIRST_AGENT_MESSAGE)
initial_greeting = AgentExecutorResponse(
executor_id=ASSISTANT_AGENT_ID,
agent_response=AgentResponse(messages=[first_message]),
- full_conversation=[ChatMessage("assistant", text=DEFAULT_FIRST_AGENT_MESSAGE)],
+ full_conversation=[ChatMessage(role="assistant", text=DEFAULT_FIRST_AGENT_MESSAGE)],
)
# STEP 4: Execute the workflow and collect results
diff --git a/python/packages/lab/tau2/tests/test_message_utils.py b/python/packages/lab/tau2/tests/test_message_utils.py
index 33b705db3a..f221d9b113 100644
--- a/python/packages/lab/tau2/tests/test_message_utils.py
+++ b/python/packages/lab/tau2/tests/test_message_utils.py
@@ -20,7 +20,7 @@ def test_flip_messages_user_to_assistant():
flipped = flip_messages(messages)
assert len(flipped) == 1
- assert flipped[0].role == "assistant"
+ assert flipped[0].role.value == "assistant"
assert flipped[0].text == "Hello assistant"
assert flipped[0].author_name == "User1"
assert flipped[0].message_id == "msg_001"
@@ -40,7 +40,7 @@ def test_flip_messages_assistant_to_user():
flipped = flip_messages(messages)
assert len(flipped) == 1
- assert flipped[0].role == "user"
+ assert flipped[0].role.value == "user"
assert flipped[0].text == "Hello user"
assert flipped[0].author_name == "Assistant1"
assert flipped[0].message_id == "msg_002"
@@ -65,7 +65,7 @@ def test_flip_messages_assistant_with_function_calls_filtered():
flipped = flip_messages(messages)
assert len(flipped) == 1
- assert flipped[0].role == "user"
+ assert flipped[0].role.value == "user"
# Function call should be filtered out
assert len(flipped[0].contents) == 2
assert all(content.type == "text" for content in flipped[0].contents)
@@ -78,7 +78,7 @@ def test_flip_messages_assistant_with_only_function_calls_skipped():
function_call = Content.from_function_call(call_id="call_456", name="another_function", arguments={"key": "value"})
messages = [
- ChatMessage("assistant", [function_call], message_id="msg_004") # Only function call, no text
+ ChatMessage(role="assistant", contents=[function_call], message_id="msg_004") # Only function call, no text
]
flipped = flip_messages(messages)
@@ -91,7 +91,7 @@ def test_flip_messages_tool_messages_skipped():
"""Test that tool messages are skipped."""
function_result = Content.from_function_result(call_id="call_789", result={"success": True})
- messages = [ChatMessage("tool", [function_result])]
+ messages = [ChatMessage(role="tool", contents=[function_result])]
flipped = flip_messages(messages)
@@ -101,12 +101,14 @@ def test_flip_messages_tool_messages_skipped():
def test_flip_messages_system_messages_preserved():
"""Test that system messages are preserved as-is."""
- messages = [ChatMessage("system", [Content.from_text(text="System instruction")], message_id="sys_001")]
+ messages = [
+ ChatMessage(role="system", contents=[Content.from_text(text="System instruction")], message_id="sys_001")
+ ]
flipped = flip_messages(messages)
assert len(flipped) == 1
- assert flipped[0].role == "system"
+ assert flipped[0].role.value == "system"
assert flipped[0].text == "System instruction"
assert flipped[0].message_id == "sys_001"
@@ -118,11 +120,11 @@ def test_flip_messages_mixed_conversation():
function_result = Content.from_function_result(call_id="call_mixed", result="function result")
messages = [
- ChatMessage("system", [Content.from_text(text="System prompt")]),
- ChatMessage("user", [Content.from_text(text="User question")]),
- ChatMessage("assistant", [Content.from_text(text="Assistant response"), function_call]),
- ChatMessage("tool", [function_result]),
- ChatMessage("assistant", [Content.from_text(text="Final response")]),
+ ChatMessage(role="system", contents=[Content.from_text(text="System prompt")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="User question")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="Assistant response"), function_call]),
+ ChatMessage(role="tool", contents=[function_result]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="Final response")]),
]
flipped = flip_messages(messages)
@@ -132,18 +134,18 @@ def test_flip_messages_mixed_conversation():
assert len(flipped) == 4
# Check each flipped message
- assert flipped[0].role == "system"
+ assert flipped[0].role.value == "system"
assert flipped[0].text == "System prompt"
- assert flipped[1].role == "assistant"
+ assert flipped[1].role.value == "assistant"
assert flipped[1].text == "User question"
- assert flipped[2].role == "user"
+ assert flipped[2].role.value == "user"
assert flipped[2].text == "Assistant response" # Function call filtered out
# Tool message skipped
- assert flipped[3].role == "user"
+ assert flipped[3].role.value == "user"
assert flipped[3].text == "Final response"
@@ -176,8 +178,8 @@ def test_flip_messages_preserves_metadata():
def test_log_messages_text_content(mock_logger):
"""Test logging messages with text content."""
messages = [
- ChatMessage("user", [Content.from_text(text="Hello")]),
- ChatMessage("assistant", [Content.from_text(text="Hi there!")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="Hello")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="Hi there!")]),
]
log_messages(messages)
@@ -191,7 +193,7 @@ def test_log_messages_function_call(mock_logger):
"""Test logging messages with function calls."""
function_call = Content.from_function_call(call_id="call_log", name="log_function", arguments={"param": "value"})
- messages = [ChatMessage("assistant", [function_call])]
+ messages = [ChatMessage(role="assistant", contents=[function_call])]
log_messages(messages)
@@ -207,7 +209,7 @@ def test_log_messages_function_result(mock_logger):
"""Test logging messages with function results."""
function_result = Content.from_function_result(call_id="call_result", result="success")
- messages = [ChatMessage("tool", [function_result])]
+ messages = [ChatMessage(role="tool", contents=[function_result])]
log_messages(messages)
@@ -221,10 +223,10 @@ def test_log_messages_function_result(mock_logger):
def test_log_messages_different_roles(mock_logger):
"""Test logging messages with different roles get different colors."""
messages = [
- ChatMessage("system", [Content.from_text(text="System")]),
- ChatMessage("user", [Content.from_text(text="User")]),
- ChatMessage("assistant", [Content.from_text(text="Assistant")]),
- ChatMessage("tool", [Content.from_text(text="Tool")]),
+ ChatMessage(role="system", contents=[Content.from_text(text="System")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="User")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="Assistant")]),
+ ChatMessage(role="tool", contents=[Content.from_text(text="Tool")]),
]
log_messages(messages)
@@ -248,7 +250,7 @@ def test_log_messages_different_roles(mock_logger):
@patch("agent_framework_lab_tau2._message_utils.logger")
def test_log_messages_escapes_html(mock_logger):
"""Test that HTML-like characters are properly escaped in log output."""
- messages = [ChatMessage("user", [Content.from_text(text="Message with content")])]
+ messages = [ChatMessage(role="user", contents=[Content.from_text(text="Message with content")])]
log_messages(messages)
diff --git a/python/packages/lab/tau2/tests/test_sliding_window.py b/python/packages/lab/tau2/tests/test_sliding_window.py
index 971a391882..1c4960838d 100644
--- a/python/packages/lab/tau2/tests/test_sliding_window.py
+++ b/python/packages/lab/tau2/tests/test_sliding_window.py
@@ -36,8 +36,8 @@ def test_initialization_with_parameters():
def test_initialization_with_messages():
"""Test initializing with existing messages."""
messages = [
- ChatMessage("user", [Content.from_text(text="Hello")]),
- ChatMessage("assistant", [Content.from_text(text="Hi there!")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="Hello")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="Hi there!")]),
]
sliding_window = SlidingWindowChatMessageStore(messages=messages, max_tokens=1000)
@@ -51,8 +51,8 @@ async def test_add_messages_simple():
sliding_window = SlidingWindowChatMessageStore(max_tokens=10000) # Large limit
new_messages = [
- ChatMessage("user", [Content.from_text(text="What's the weather?")]),
- ChatMessage("assistant", [Content.from_text(text="I can help with that.")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="What's the weather?")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="I can help with that.")]),
]
await sliding_window.add_messages(new_messages)
@@ -68,7 +68,9 @@ async def test_list_all_messages_vs_list_messages():
sliding_window = SlidingWindowChatMessageStore(max_tokens=50) # Small limit to force truncation
# Add many messages to trigger truncation
- messages = [ChatMessage("user", [Content.from_text(text=f"Message {i} with some content")]) for i in range(10)]
+ messages = [
+ ChatMessage(role="user", contents=[Content.from_text(text=f"Message {i} with some content")]) for i in range(10)
+ ]
await sliding_window.add_messages(messages)
@@ -85,7 +87,7 @@ async def test_list_all_messages_vs_list_messages():
def test_get_token_count_basic():
"""Test basic token counting."""
sliding_window = SlidingWindowChatMessageStore(max_tokens=1000)
- sliding_window.truncated_messages = [ChatMessage("user", [Content.from_text(text="Hello")])]
+ sliding_window.truncated_messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])]
token_count = sliding_window.get_token_count()
@@ -102,7 +104,7 @@ def test_get_token_count_with_system_message():
token_count_empty = sliding_window.get_token_count()
# Add a message
- sliding_window.truncated_messages = [ChatMessage("user", [Content.from_text(text="Hello")])]
+ sliding_window.truncated_messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])]
token_count_with_message = sliding_window.get_token_count()
# With message should be more tokens
@@ -115,7 +117,7 @@ def test_get_token_count_function_call():
function_call = Content.from_function_call(call_id="call_123", name="test_function", arguments={"param": "value"})
sliding_window = SlidingWindowChatMessageStore(max_tokens=1000)
- sliding_window.truncated_messages = [ChatMessage("assistant", [function_call])]
+ sliding_window.truncated_messages = [ChatMessage(role="assistant", contents=[function_call])]
token_count = sliding_window.get_token_count()
assert token_count > 0
@@ -126,7 +128,7 @@ def test_get_token_count_function_result():
function_result = Content.from_function_result(call_id="call_123", result={"success": True, "data": "result"})
sliding_window = SlidingWindowChatMessageStore(max_tokens=1000)
- sliding_window.truncated_messages = [ChatMessage("tool", [function_result])]
+ sliding_window.truncated_messages = [ChatMessage(role="tool", contents=[function_result])]
token_count = sliding_window.get_token_count()
assert token_count > 0
@@ -149,7 +151,7 @@ def test_truncate_messages_removes_old_messages(mock_logger):
Content.from_text(text="This is another very long message that should also exceed the token limit")
],
),
- ChatMessage("user", [Content.from_text(text="Short msg")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="Short msg")]),
]
sliding_window.truncated_messages = messages.copy()
@@ -171,14 +173,14 @@ def test_truncate_messages_removes_leading_tool_messages(mock_logger):
tool_message = ChatMessage(
role="tool", contents=[Content.from_function_result(call_id="call_123", result="result")]
)
- user_message = ChatMessage("user", [Content.from_text(text="Hello")])
+ user_message = ChatMessage(role="user", contents=[Content.from_text(text="Hello")])
sliding_window.truncated_messages = [tool_message, user_message]
sliding_window.truncate_messages()
# Tool message should be removed from the beginning
assert len(sliding_window.truncated_messages) == 1
- assert sliding_window.truncated_messages[0].role == "user"
+ assert sliding_window.truncated_messages[0].role.value == "user"
# Should have logged warning about removing tool message
mock_logger.warning.assert_called()
@@ -229,12 +231,12 @@ async def test_real_world_scenario():
# Simulate a conversation
conversation = [
- ChatMessage("user", [Content.from_text(text="Hello, how are you?")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="Hello, how are you?")]),
ChatMessage(
role="assistant",
contents=[Content.from_text(text="I'm doing well, thank you! How can I help you today?")],
),
- ChatMessage("user", [Content.from_text(text="Can you tell me about the weather?")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="Can you tell me about the weather?")]),
ChatMessage(
role="assistant",
contents=[
@@ -244,7 +246,7 @@ async def test_real_world_scenario():
)
],
),
- ChatMessage("user", [Content.from_text(text="What about telling me a joke instead?")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="What about telling me a joke instead?")]),
ChatMessage(
role="assistant",
contents=[
diff --git a/python/packages/lab/tau2/tests/test_tau2_utils.py b/python/packages/lab/tau2/tests/test_tau2_utils.py
index 29520bda42..dff8a56e5c 100644
--- a/python/packages/lab/tau2/tests/test_tau2_utils.py
+++ b/python/packages/lab/tau2/tests/test_tau2_utils.py
@@ -91,7 +91,7 @@ def test_convert_tau2_tool_to_function_tool_multiple_tools(tau2_airline_environm
def test_convert_agent_framework_messages_to_tau2_messages_system():
"""Test converting system message."""
- messages = [ChatMessage("system", [Content.from_text(text="System instruction")])]
+ messages = [ChatMessage(role="system", contents=[Content.from_text(text="System instruction")])]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -103,7 +103,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_system():
def test_convert_agent_framework_messages_to_tau2_messages_user():
"""Test converting user message."""
- messages = [ChatMessage("user", [Content.from_text(text="Hello assistant")])]
+ messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello assistant")])]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -116,7 +116,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_user():
def test_convert_agent_framework_messages_to_tau2_messages_assistant():
"""Test converting assistant message."""
- messages = [ChatMessage("assistant", [Content.from_text(text="Hello user")])]
+ messages = [ChatMessage(role="assistant", contents=[Content.from_text(text="Hello user")])]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -131,7 +131,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_function_call():
"""Test converting message with function call."""
function_call = Content.from_function_call(call_id="call_123", name="test_function", arguments={"param": "value"})
- messages = [ChatMessage("assistant", [Content.from_text(text="I'll call a function"), function_call])]
+ messages = [ChatMessage(role="assistant", contents=[Content.from_text(text="I'll call a function"), function_call])]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -153,7 +153,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_function_result(
"""Test converting message with function result."""
function_result = Content.from_function_result(call_id="call_123", result={"success": True, "data": "result data"})
- messages = [ChatMessage("tool", [function_result])]
+ messages = [ChatMessage(role="tool", contents=[function_result])]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -173,7 +173,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_error():
call_id="call_456", result="Error occurred", exception=Exception("Test error")
)
- messages = [ChatMessage("tool", [function_result])]
+ messages = [ChatMessage(role="tool", contents=[function_result])]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -184,7 +184,9 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_error():
def test_convert_agent_framework_messages_to_tau2_messages_multiple_text_contents():
"""Test converting message with multiple text contents."""
- messages = [ChatMessage("user", [Content.from_text(text="First part"), Content.from_text(text="Second part")])]
+ messages = [
+ ChatMessage(role="user", contents=[Content.from_text(text="First part"), Content.from_text(text="Second part")])
+ ]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
@@ -200,11 +202,11 @@ def test_convert_agent_framework_messages_to_tau2_messages_complex_scenario():
function_result = Content.from_function_result(call_id="call_789", result={"output": "tool result"})
messages = [
- ChatMessage("system", [Content.from_text(text="System prompt")]),
- ChatMessage("user", [Content.from_text(text="User request")]),
- ChatMessage("assistant", [Content.from_text(text="I'll help you"), function_call]),
- ChatMessage("tool", [function_result]),
- ChatMessage("assistant", [Content.from_text(text="Based on the result...")]),
+ ChatMessage(role="system", contents=[Content.from_text(text="System prompt")]),
+ ChatMessage(role="user", contents=[Content.from_text(text="User request")]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="I'll help you"), function_call]),
+ ChatMessage(role="tool", contents=[function_result]),
+ ChatMessage(role="assistant", contents=[Content.from_text(text="Based on the result...")]),
]
tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages)
diff --git a/python/packages/mem0/agent_framework_mem0/_provider.py b/python/packages/mem0/agent_framework_mem0/_provider.py
index ac37cc1a2c..0d12f06e5f 100644
--- a/python/packages/mem0/agent_framework_mem0/_provider.py
+++ b/python/packages/mem0/agent_framework_mem0/_provider.py
@@ -120,10 +120,14 @@ async def invoked(
)
messages_list = [*request_messages_list, *response_messages_list]
+ # Extract role value - it may be a Role enum or a string
+ def get_role_value(role: Any) -> str:
+ return role.value if hasattr(role, "value") else str(role)
+
messages: list[dict[str, str]] = [
- {"role": message.role, "content": message.text}
+ {"role": get_role_value(message.role), "content": message.text}
for message in messages_list
- if message.role in {"user", "assistant", "system"} and message.text and message.text.strip()
+ if get_role_value(message.role) in {"user", "assistant", "system"} and message.text and message.text.strip()
]
if messages:
@@ -176,7 +180,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], *
line_separated_memories = "\n".join(memory.get("memory", "") for memory in memories)
return Context(
- messages=[ChatMessage("user", [f"{self.context_prompt}\n{line_separated_memories}"])]
+ messages=[ChatMessage(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")]
if line_separated_memories
else None
)
diff --git a/python/packages/mem0/tests/test_mem0_context_provider.py b/python/packages/mem0/tests/test_mem0_context_provider.py
index 0b39c7b043..349fa222c4 100644
--- a/python/packages/mem0/tests/test_mem0_context_provider.py
+++ b/python/packages/mem0/tests/test_mem0_context_provider.py
@@ -36,9 +36,9 @@ def mock_mem0_client() -> AsyncMock:
def sample_messages() -> list[ChatMessage]:
"""Create sample chat messages for testing."""
return [
- ChatMessage("user", ["Hello, how are you?"]),
- ChatMessage("assistant", ["I'm doing well, thank you!"]),
- ChatMessage("system", ["You are a helpful assistant"]),
+ ChatMessage(role="user", text="Hello, how are you?"),
+ ChatMessage(role="assistant", text="I'm doing well, thank you!"),
+ ChatMessage(role="system", text="You are a helpful assistant"),
]
@@ -191,7 +191,7 @@ class TestMem0ProviderMessagesAdding:
async def test_messages_adding_fails_without_filters(self, mock_mem0_client: AsyncMock) -> None:
"""Test that invoked fails when no filters are provided."""
provider = Mem0Provider(mem0_client=mock_mem0_client)
- message = ChatMessage("user", ["Hello!"])
+ message = ChatMessage(role="user", text="Hello!")
with pytest.raises(ServiceInitializationError) as exc_info:
await provider.invoked(message)
@@ -201,7 +201,7 @@ async def test_messages_adding_fails_without_filters(self, mock_mem0_client: Asy
async def test_messages_adding_single_message(self, mock_mem0_client: AsyncMock) -> None:
"""Test adding a single message."""
provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client)
- message = ChatMessage("user", ["Hello!"])
+ message = ChatMessage(role="user", text="Hello!")
await provider.invoked(message)
@@ -288,9 +288,9 @@ async def test_messages_adding_filters_empty_messages(self, mock_mem0_client: As
"""Test that empty or invalid messages are filtered out."""
provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client)
messages = [
- ChatMessage("user", [""]), # Empty text
- ChatMessage("user", [" "]), # Whitespace only
- ChatMessage("user", ["Valid message"]),
+ ChatMessage(role="user", text=""), # Empty text
+ ChatMessage(role="user", text=" "), # Whitespace only
+ ChatMessage(role="user", text="Valid message"),
]
await provider.invoked(messages)
@@ -303,8 +303,8 @@ async def test_messages_adding_skips_when_no_valid_messages(self, mock_mem0_clie
"""Test that mem0 client is not called when no valid messages exist."""
provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client)
messages = [
- ChatMessage("user", [""]),
- ChatMessage("user", [" "]),
+ ChatMessage(role="user", text=""),
+ ChatMessage(role="user", text=" "),
]
await provider.invoked(messages)
@@ -318,7 +318,7 @@ class TestMem0ProviderModelInvoking:
async def test_model_invoking_fails_without_filters(self, mock_mem0_client: AsyncMock) -> None:
"""Test that invoking fails when no filters are provided."""
provider = Mem0Provider(mem0_client=mock_mem0_client)
- message = ChatMessage("user", ["What's the weather?"])
+ message = ChatMessage(role="user", text="What's the weather?")
with pytest.raises(ServiceInitializationError) as exc_info:
await provider.invoking(message)
@@ -328,7 +328,7 @@ async def test_model_invoking_fails_without_filters(self, mock_mem0_client: Asyn
async def test_model_invoking_single_message(self, mock_mem0_client: AsyncMock) -> None:
"""Test invoking with a single message."""
provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client)
- message = ChatMessage("user", ["What's the weather?"])
+ message = ChatMessage(role="user", text="What's the weather?")
# Mock search results
mock_mem0_client.search.return_value = [
@@ -369,7 +369,7 @@ async def test_model_invoking_multiple_messages(
async def test_model_invoking_with_agent_id(self, mock_mem0_client: AsyncMock) -> None:
"""Test invoking with agent_id."""
provider = Mem0Provider(agent_id="agent123", mem0_client=mock_mem0_client)
- message = ChatMessage("user", ["Hello"])
+ message = ChatMessage(role="user", text="Hello")
mock_mem0_client.search.return_value = []
@@ -387,7 +387,7 @@ async def test_model_invoking_with_scope_to_per_operation_thread_id(self, mock_m
mem0_client=mock_mem0_client,
)
provider._per_operation_thread_id = "operation_thread"
- message = ChatMessage("user", ["Hello"])
+ message = ChatMessage(role="user", text="Hello")
mock_mem0_client.search.return_value = []
@@ -399,7 +399,7 @@ async def test_model_invoking_with_scope_to_per_operation_thread_id(self, mock_m
async def test_model_invoking_no_memories_returns_none_instructions(self, mock_mem0_client: AsyncMock) -> None:
"""Test that no memories returns context with None instructions."""
provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client)
- message = ChatMessage("user", ["Hello"])
+ message = ChatMessage(role="user", text="Hello")
mock_mem0_client.search.return_value = []
@@ -437,9 +437,9 @@ async def test_model_invoking_filters_empty_message_text(self, mock_mem0_client:
"""Test that empty message text is filtered out from query."""
provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client)
messages = [
- ChatMessage("user", [""]),
- ChatMessage("user", ["Valid message"]),
- ChatMessage("user", [" "]),
+ ChatMessage(role="user", text=""),
+ ChatMessage(role="user", text="Valid message"),
+ ChatMessage(role="user", text=" "),
]
mock_mem0_client.search.return_value = []
@@ -457,7 +457,7 @@ async def test_model_invoking_custom_context_prompt(self, mock_mem0_client: Asyn
context_prompt=custom_prompt,
mem0_client=mock_mem0_client,
)
- message = ChatMessage("user", ["Hello"])
+ message = ChatMessage(role="user", text="Hello")
mock_mem0_client.search.return_value = [{"memory": "Test memory"}]
diff --git a/python/packages/ollama/agent_framework_ollama/_chat_client.py b/python/packages/ollama/agent_framework_ollama/_chat_client.py
index 2891ab5bcb..aa9a1034b2 100644
--- a/python/packages/ollama/agent_framework_ollama/_chat_client.py
+++ b/python/packages/ollama/agent_framework_ollama/_chat_client.py
@@ -4,28 +4,33 @@
import sys
from collections.abc import (
AsyncIterable,
+ Awaitable,
Callable,
Mapping,
MutableMapping,
- MutableSequence,
Sequence,
)
from itertools import chain
-from typing import Any, ClassVar, Generic
+from typing import Any, ClassVar, Generic, TypedDict
from agent_framework import (
BaseChatClient,
+ ChatAndFunctionMiddlewareTypes,
ChatMessage,
+ ChatMiddlewareLayer,
ChatOptions,
ChatResponse,
ChatResponseUpdate,
Content,
+ FunctionInvocationConfiguration,
+ FunctionInvocationLayer,
FunctionTool,
+ HostedWebSearchTool,
+ ResponseStream,
+ Role,
ToolProtocol,
UsageDetails,
get_logger,
- use_chat_middleware,
- use_function_invocation,
)
from agent_framework._pydantic import AFBaseSettings
from agent_framework.exceptions import (
@@ -33,7 +38,7 @@
ServiceInvalidRequestError,
ServiceResponseException,
)
-from agent_framework.observability import use_instrumentation
+from agent_framework.observability import ChatTelemetryLayer
from ollama import AsyncClient
# Rename imported types to avoid naming conflicts with Agent Framework types
@@ -56,6 +61,7 @@
else:
from typing_extensions import TypedDict # type: ignore # pragma: no cover
+
__all__ = ["OllamaChatClient", "OllamaChatOptions"]
TResponseModel = TypeVar("TResponseModel", bound=BaseModel | None, default=None)
@@ -283,11 +289,13 @@ class OllamaSettings(AFBaseSettings):
logger = get_logger("agent_framework.ollama")
-@use_function_invocation
-@use_instrumentation
-@use_chat_middleware
-class OllamaChatClient(BaseChatClient[TOllamaChatOptions], Generic[TOllamaChatOptions]):
- """Ollama Chat completion class."""
+class OllamaChatClient(
+ ChatMiddlewareLayer[TOllamaChatOptions],
+ FunctionInvocationLayer[TOllamaChatOptions],
+ ChatTelemetryLayer[TOllamaChatOptions],
+ BaseChatClient[TOllamaChatOptions],
+):
+ """Ollama Chat completion class with middleware, telemetry, and function invocation support."""
OTEL_PROVIDER_NAME: ClassVar[str] = "ollama"
@@ -297,6 +305,8 @@ def __init__(
host: str | None = None,
client: AsyncClient | None = None,
model_id: str | None = None,
+ middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None,
+ function_invocation_configuration: FunctionInvocationConfiguration | None = None,
env_file_path: str | None = None,
env_file_encoding: str | None = None,
**kwargs: Any,
@@ -308,6 +318,8 @@ def __init__(
Can be set via the OLLAMA_HOST env variable.
client: An optional Ollama Client instance. If not provided, a new instance will be created.
model_id: The Ollama chat model ID to use. Can be set via the OLLAMA_MODEL_ID env variable.
+ middleware: Optional middleware to apply to the client.
+ function_invocation_configuration: Optional function invocation configuration override.
env_file_path: An optional path to a dotenv (.env) file to load environment variables from.
env_file_encoding: The encoding to use when reading the dotenv (.env) file. Defaults to 'utf-8'.
**kwargs: Additional keyword arguments passed to BaseChatClient.
@@ -332,58 +344,59 @@ def __init__(
# Save Host URL for serialization with to_dict()
self.host = str(self.client._client.base_url)
- super().__init__(**kwargs)
-
- @override
- async def _inner_get_response(
- self,
- *,
- messages: MutableSequence[ChatMessage],
- options: dict[str, Any],
- **kwargs: Any,
- ) -> ChatResponse:
- # prepare
- options_dict = self._prepare_options(messages, options)
-
- try:
- # execute
- response: OllamaChatResponse = await self.client.chat( # type: ignore[misc]
- stream=False,
- **options_dict,
- **kwargs,
- )
- except Exception as ex:
- raise ServiceResponseException(f"Ollama chat request failed : {ex}", ex) from ex
-
- # process
- return self._parse_response_from_ollama(response)
+ super().__init__(
+ middleware=middleware,
+ function_invocation_configuration=function_invocation_configuration,
+ **kwargs,
+ )
+ self.middleware = list(self.chat_middleware)
@override
- async def _inner_get_streaming_response(
+ def _inner_get_response(
self,
*,
- messages: MutableSequence[ChatMessage],
- options: dict[str, Any],
+ messages: Sequence[ChatMessage],
+ options: Mapping[str, Any],
+ stream: bool = False,
**kwargs: Any,
- ) -> AsyncIterable[ChatResponseUpdate]:
- # prepare
- options_dict = self._prepare_options(messages, options)
-
- try:
- # execute
- response_object: AsyncIterable[OllamaChatResponse] = await self.client.chat( # type: ignore[misc]
- stream=True,
- **options_dict,
- **kwargs,
- )
- except Exception as ex:
- raise ServiceResponseException(f"Ollama streaming chat request failed : {ex}", ex) from ex
-
- # process
- async for part in response_object:
- yield self._parse_streaming_response_from_ollama(part)
-
- def _prepare_options(self, messages: MutableSequence[ChatMessage], options: dict[str, Any]) -> dict[str, Any]:
+ ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]:
+ if stream:
+ # Streaming mode
+ async def _stream() -> AsyncIterable[ChatResponseUpdate]:
+ validated_options = await self._validate_options(options)
+ options_dict = self._prepare_options(messages, validated_options)
+ try:
+ response_object: AsyncIterable[OllamaChatResponse] = await self.client.chat( # type: ignore[misc]
+ stream=True,
+ **options_dict,
+ **kwargs,
+ )
+ except Exception as ex:
+ raise ServiceResponseException(f"Ollama streaming chat request failed : {ex}", ex) from ex
+
+ async for part in response_object:
+ yield self._parse_streaming_response_from_ollama(part)
+
+ return self._build_response_stream(_stream(), response_format=options.get("response_format"))
+
+ # Non-streaming mode
+ async def _get_response() -> ChatResponse:
+ validated_options = await self._validate_options(options)
+ options_dict = self._prepare_options(messages, validated_options)
+ try:
+ response: OllamaChatResponse = await self.client.chat( # type: ignore[misc]
+ stream=False,
+ **options_dict,
+ **kwargs,
+ )
+ except Exception as ex:
+ raise ServiceResponseException(f"Ollama chat request failed : {ex}", ex) from ex
+
+ return self._parse_response_from_ollama(response)
+
+ return _get_response()
+
+ def _prepare_options(self, messages: Sequence[ChatMessage], options: Mapping[str, Any]) -> dict[str, Any]:
# Handle instructions by prepending to messages as system message
instructions = options.get("instructions")
if instructions:
@@ -429,24 +442,24 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], options: dict
# tools
tools = options.get("tools")
- if tools and (prepared_tools := self._prepare_tools_for_ollama(tools)):
+ if tools is not None and (prepared_tools := self._prepare_tools_for_ollama(tools)):
run_options["tools"] = prepared_tools
return run_options
- def _prepare_messages_for_ollama(self, messages: MutableSequence[ChatMessage]) -> list[OllamaMessage]:
+ def _prepare_messages_for_ollama(self, messages: Sequence[ChatMessage]) -> list[OllamaMessage]:
ollama_messages = [self._prepare_message_for_ollama(msg) for msg in messages]
# Flatten the list of lists into a single list
return list(chain.from_iterable(ollama_messages))
def _prepare_message_for_ollama(self, message: ChatMessage) -> list[OllamaMessage]:
message_converters: dict[str, Callable[[ChatMessage], list[OllamaMessage]]] = {
- "system": self._format_system_message,
- "user": self._format_user_message,
- "assistant": self._format_assistant_message,
- "tool": self._format_tool_message,
+ Role.SYSTEM.value: self._format_system_message,
+ Role.USER.value: self._format_user_message,
+ Role.ASSISTANT.value: self._format_assistant_message,
+ Role.TOOL.value: self._format_tool_message,
}
- return message_converters[message.role](message)
+ return message_converters[message.role.value](message)
def _format_system_message(self, message: ChatMessage) -> list[OllamaMessage]:
return [OllamaMessage(role="system", content=message.text)]
@@ -515,8 +528,8 @@ def _parse_streaming_response_from_ollama(self, response: OllamaChatResponse) ->
contents = self._parse_contents_from_ollama(response)
return ChatResponseUpdate(
contents=contents,
- role="assistant",
- model_id=response.model,
+ role=Role.ASSISTANT,
+ ai_model_id=response.model,
created_at=response.created_at,
)
@@ -524,7 +537,7 @@ def _parse_response_from_ollama(self, response: OllamaChatResponse) -> ChatRespo
contents = self._parse_contents_from_ollama(response)
return ChatResponse(
- messages=[ChatMessage("assistant", contents)],
+ messages=[ChatMessage(role=Role.ASSISTANT, contents=contents)],
model_id=response.model,
created_at=response.created_at,
usage_details=UsageDetails(
@@ -552,6 +565,8 @@ def _prepare_tools_for_ollama(self, tools: list[ToolProtocol | MutableMapping[st
match tool:
case FunctionTool():
chat_tools.append(tool.to_json_schema_spec())
+ case HostedWebSearchTool():
+ raise ServiceInvalidRequestError("HostedWebSearchTool is not supported by the Ollama client.")
case _:
raise ServiceInvalidRequestError(
"Unsupported tool type '"
diff --git a/python/packages/ollama/tests/test_ollama_chat_client.py b/python/packages/ollama/tests/test_ollama_chat_client.py
index 9658ba7c6e..efe6d70890 100644
--- a/python/packages/ollama/tests/test_ollama_chat_client.py
+++ b/python/packages/ollama/tests/test_ollama_chat_client.py
@@ -261,7 +261,7 @@ async def test_cmc_streaming(
chat_history.append(ChatMessage(text="hello world", role="user"))
ollama_client = OllamaChatClient()
- result = ollama_client.get_streaming_response(messages=chat_history)
+ result = ollama_client.get_response(messages=chat_history, stream=True)
async for chunk in result:
assert chunk.text == "test"
@@ -278,7 +278,7 @@ async def test_cmc_streaming_reasoning(
chat_history.append(ChatMessage(text="hello world", role="user"))
ollama_client = OllamaChatClient()
- result = ollama_client.get_streaming_response(messages=chat_history)
+ result = ollama_client.get_response(messages=chat_history, stream=True)
async for chunk in result:
reasoning = "".join(c.text for c in chunk.contents if c.type == "text_reasoning")
@@ -298,7 +298,7 @@ async def test_cmc_streaming_chat_failure(
ollama_client = OllamaChatClient()
with pytest.raises(ServiceResponseException) as exc_info:
- async for _ in ollama_client.get_streaming_response(messages=chat_history):
+ async for _ in ollama_client.get_response(messages=chat_history, stream=True):
pass
assert "Ollama streaming chat request failed" in str(exc_info.value)
@@ -321,7 +321,7 @@ async def test_cmc_streaming_with_tool_call(
chat_history.append(ChatMessage(text="hello world", role="user"))
ollama_client = OllamaChatClient()
- result = ollama_client.get_streaming_response(messages=chat_history, options={"tools": [hello_world]})
+ result = ollama_client.get_response(messages=chat_history, stream=True, options={"tools": [hello_world]})
chunks: list[ChatResponseUpdate] = []
async for chunk in result:
@@ -463,8 +463,8 @@ async def test_cmc_streaming_integration_with_tool_call(
chat_history.append(ChatMessage(text="Call the hello world function and repeat what it says", role="user"))
ollama_client = OllamaChatClient()
- result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_streaming_response(
- messages=chat_history, options={"tools": [hello_world]}
+ result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_response(
+ messages=chat_history, stream=True, options={"tools": [hello_world]}
)
chunks: list[ChatResponseUpdate] = []
@@ -488,7 +488,7 @@ async def test_cmc_streaming_integration_with_chat_completion(
chat_history.append(ChatMessage(text="Say Hello World", role="user"))
ollama_client = OllamaChatClient()
- result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_streaming_response(messages=chat_history)
+ result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_response(messages=chat_history, stream=True)
full_text = ""
async for chunk in result:
diff --git a/python/packages/purview/agent_framework_purview/_middleware.py b/python/packages/purview/agent_framework_purview/_middleware.py
index a0cce1bd55..2aabd5a57b 100644
--- a/python/packages/purview/agent_framework_purview/_middleware.py
+++ b/python/packages/purview/agent_framework_purview/_middleware.py
@@ -2,7 +2,7 @@
from collections.abc import Awaitable, Callable
-from agent_framework import AgentMiddleware, AgentRunContext, ChatContext, ChatMiddleware
+from agent_framework import AgentMiddleware, AgentRunContext, ChatContext, ChatMiddleware, MiddlewareTermination
from agent_framework._logging import get_logger
from azure.core.credentials import TokenCredential
from azure.core.credentials_async import AsyncTokenCredential
@@ -60,10 +60,11 @@ async def process(
from agent_framework import AgentResponse, ChatMessage
context.result = AgentResponse(
- messages=[ChatMessage("system", [self._settings.blocked_prompt_message])]
+ messages=[ChatMessage(role="system", text=self._settings.blocked_prompt_message)]
)
- context.terminate = True
- return
+ raise MiddlewareTermination
+ except MiddlewareTermination:
+ raise
except PurviewPaymentRequiredError as ex:
logger.error(f"Purview payment required error in policy pre-check: {ex}")
if not self._settings.ignore_payment_required:
@@ -78,7 +79,7 @@ async def process(
try:
# Post (response) check only if we have a normal AgentResponse
# Use the same user_id from the request for the response evaluation
- if context.result and not context.is_streaming:
+ if context.result and not context.stream:
should_block_response, _ = await self._processor.process_messages(
context.result.messages, # type: ignore[union-attr]
Activity.UPLOAD_TEXT,
@@ -88,7 +89,7 @@ async def process(
from agent_framework import AgentResponse, ChatMessage
context.result = AgentResponse(
- messages=[ChatMessage("system", [self._settings.blocked_response_message])]
+ messages=[ChatMessage(role="system", text=self._settings.blocked_response_message)]
)
else:
# Streaming responses are not supported for post-checks
@@ -149,10 +150,11 @@ async def process(
if should_block_prompt:
from agent_framework import ChatMessage, ChatResponse
- blocked_message = ChatMessage("system", [self._settings.blocked_prompt_message])
+ blocked_message = ChatMessage(role="system", text=self._settings.blocked_prompt_message)
context.result = ChatResponse(messages=[blocked_message])
- context.terminate = True
- return
+ raise MiddlewareTermination
+ except MiddlewareTermination:
+ raise
except PurviewPaymentRequiredError as ex:
logger.error(f"Purview payment required error in policy pre-check: {ex}")
if not self._settings.ignore_payment_required:
@@ -167,7 +169,7 @@ async def process(
try:
# Post (response) evaluation only if non-streaming and we have messages result shape
# Use the same user_id from the request for the response evaluation
- if context.result and not context.is_streaming:
+ if context.result and not context.stream:
result_obj = context.result
messages = getattr(result_obj, "messages", None)
if messages:
@@ -177,7 +179,7 @@ async def process(
if should_block_response:
from agent_framework import ChatMessage, ChatResponse
- blocked_message = ChatMessage("system", [self._settings.blocked_response_message])
+ blocked_message = ChatMessage(role="system", text=self._settings.blocked_response_message)
context.result = ChatResponse(messages=[blocked_message])
else:
logger.debug("Streaming responses are not supported for Purview policy post-checks")
diff --git a/python/packages/purview/tests/test_chat_middleware.py b/python/packages/purview/tests/test_chat_middleware.py
index 763a54ac67..4befb3a738 100644
--- a/python/packages/purview/tests/test_chat_middleware.py
+++ b/python/packages/purview/tests/test_chat_middleware.py
@@ -5,7 +5,7 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from agent_framework import ChatContext, ChatMessage
+from agent_framework import ChatContext, ChatMessage, MiddlewareTermination, Role
from azure.core.credentials import AccessToken
from agent_framework_purview import PurviewChatPolicyMiddleware, PurviewSettings
@@ -36,7 +36,9 @@ def chat_context(self) -> ChatContext:
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- return ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ return ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
async def test_initialization(self, middleware: PurviewChatPolicyMiddleware) -> None:
assert middleware._client is not None
@@ -54,14 +56,14 @@ async def mock_next(ctx: ChatContext) -> None:
class Result:
def __init__(self):
- self.messages = [ChatMessage("assistant", ["Hi there"])]
+ self.messages = [ChatMessage(role=Role.ASSISTANT, text="Hi there")]
ctx.result = Result()
await middleware.process(chat_context, mock_next)
assert next_called
assert mock_proc.call_count == 2
- assert chat_context.result.messages[0].role == "assistant"
+ assert chat_context.result.messages[0].role == Role.ASSISTANT
async def test_blocks_prompt(self, middleware: PurviewChatPolicyMiddleware, chat_context: ChatContext) -> None:
with patch.object(middleware._processor, "process_messages", return_value=(True, "user-123")):
@@ -69,12 +71,12 @@ async def test_blocks_prompt(self, middleware: PurviewChatPolicyMiddleware, chat
async def mock_next(ctx: ChatContext) -> None: # should not run
raise AssertionError("next should not be called when prompt blocked")
- await middleware.process(chat_context, mock_next)
- assert chat_context.terminate
+ with pytest.raises(MiddlewareTermination):
+ await middleware.process(chat_context, mock_next)
assert chat_context.result
assert hasattr(chat_context.result, "messages")
msg = chat_context.result.messages[0]
- assert msg.role in ("system", "system")
+ assert msg.role in ("system", Role.SYSTEM)
assert "blocked" in msg.text.lower()
async def test_blocks_response(self, middleware: PurviewChatPolicyMiddleware, chat_context: ChatContext) -> None:
@@ -90,7 +92,7 @@ async def side_effect(messages, activity, user_id=None):
async def mock_next(ctx: ChatContext) -> None:
class Result:
def __init__(self):
- self.messages = [ChatMessage("assistant", ["Sensitive output"])] # pragma: no cover
+ self.messages = [ChatMessage(role=Role.ASSISTANT, text="Sensitive output")] # pragma: no cover
ctx.result = Result()
@@ -98,7 +100,7 @@ def __init__(self):
assert call_state["count"] == 2
msgs = getattr(chat_context.result, "messages", None) or chat_context.result
first_msg = msgs[0]
- assert first_msg.role in ("system", "system")
+ assert first_msg.role in ("system", Role.SYSTEM)
assert "blocked" in first_msg.text.lower()
async def test_streaming_skips_post_check(self, middleware: PurviewChatPolicyMiddleware) -> None:
@@ -107,9 +109,9 @@ async def test_streaming_skips_post_check(self, middleware: PurviewChatPolicyMid
chat_options.model = "test-model"
streaming_context = ChatContext(
chat_client=chat_client,
- messages=[ChatMessage("user", ["Hello"])],
+ messages=[ChatMessage(role=Role.USER, text="Hello")],
options=chat_options,
- is_streaming=True,
+ stream=True,
)
with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc:
@@ -139,7 +141,7 @@ async def mock_process_messages(*args, **kwargs):
async def mock_next(ctx: ChatContext) -> None:
result = MagicMock()
- result.messages = [ChatMessage("assistant", ["Response"])]
+ result.messages = [ChatMessage(role=Role.ASSISTANT, text="Response")]
ctx.result = result
await middleware.process(chat_context, mock_next)
@@ -163,7 +165,7 @@ async def mock_process_messages(messages, activity, user_id=None):
async def mock_next(ctx: ChatContext) -> None:
result = MagicMock()
- result.messages = [ChatMessage("assistant", ["Response"])]
+ result.messages = [ChatMessage(role=Role.ASSISTANT, text="Response")]
ctx.result = result
await middleware.process(chat_context, mock_next)
@@ -186,7 +188,9 @@ async def test_chat_middleware_handles_payment_required_pre_check(self, mock_cre
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- context = ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ context = ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
async def mock_process_messages(*args, **kwargs):
raise PurviewPaymentRequiredError("Payment required")
@@ -210,7 +214,9 @@ async def test_chat_middleware_handles_payment_required_post_check(self, mock_cr
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- context = ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ context = ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
call_count = 0
@@ -225,7 +231,7 @@ async def side_effect(*args, **kwargs):
async def mock_next(ctx: ChatContext) -> None:
result = MagicMock()
- result.messages = [ChatMessage("assistant", ["OK"])]
+ result.messages = [ChatMessage(role=Role.ASSISTANT, text="OK")]
ctx.result = result
with pytest.raises(PurviewPaymentRequiredError):
@@ -241,7 +247,9 @@ async def test_chat_middleware_ignores_payment_required_when_configured(self, mo
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- context = ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ context = ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
async def mock_process_messages(*args, **kwargs):
raise PurviewPaymentRequiredError("Payment required")
@@ -250,7 +258,7 @@ async def mock_process_messages(*args, **kwargs):
async def mock_next(ctx: ChatContext) -> None:
result = MagicMock()
- result.messages = [ChatMessage("assistant", ["Response"])]
+ result.messages = [ChatMessage(role=Role.ASSISTANT, text="Response")]
context.result = result
# Should not raise, just log
@@ -281,7 +289,9 @@ async def test_chat_middleware_with_ignore_exceptions(self, mock_credential: Asy
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- context = ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ context = ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
async def mock_process_messages(*args, **kwargs):
raise ValueError("Some error")
@@ -290,7 +300,7 @@ async def mock_process_messages(*args, **kwargs):
async def mock_next(ctx: ChatContext) -> None:
result = MagicMock()
- result.messages = [ChatMessage("assistant", ["Response"])]
+ result.messages = [ChatMessage(role=Role.ASSISTANT, text="Response")]
context.result = result
# Should not raise, just log
@@ -308,7 +318,9 @@ async def test_chat_middleware_raises_on_pre_check_exception_when_ignore_excepti
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- context = ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ context = ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
with patch.object(middleware._processor, "process_messages", side_effect=ValueError("boom")):
@@ -328,7 +340,9 @@ async def test_chat_middleware_raises_on_post_check_exception_when_ignore_except
chat_client = DummyChatClient()
chat_options = MagicMock()
chat_options.model = "test-model"
- context = ChatContext(chat_client=chat_client, messages=[ChatMessage("user", ["Hello"])], options=chat_options)
+ context = ChatContext(
+ chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options
+ )
call_count = 0
@@ -343,7 +357,7 @@ async def side_effect(*args, **kwargs):
async def mock_next(ctx: ChatContext) -> None:
result = MagicMock()
- result.messages = [ChatMessage("assistant", ["OK"])]
+ result.messages = [ChatMessage(role=Role.ASSISTANT, text="OK")]
ctx.result = result
with pytest.raises(ValueError, match="post"):
diff --git a/python/packages/purview/tests/test_middleware.py b/python/packages/purview/tests/test_middleware.py
index 32f712b0b9..8fda41ff65 100644
--- a/python/packages/purview/tests/test_middleware.py
+++ b/python/packages/purview/tests/test_middleware.py
@@ -5,7 +5,7 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from agent_framework import AgentResponse, AgentRunContext, ChatMessage
+from agent_framework import AgentResponse, AgentRunContext, ChatMessage, MiddlewareTermination, Role
from azure.core.credentials import AccessToken
from agent_framework_purview import PurviewPolicyMiddleware, PurviewSettings
@@ -49,7 +49,7 @@ async def test_middleware_allows_clean_prompt(
self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock
) -> None:
"""Test middleware allows prompt that passes policy check."""
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello, how are you?"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello, how are you?")])
with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")):
next_called = False
@@ -57,19 +57,20 @@ async def test_middleware_allows_clean_prompt(
async def mock_next(ctx: AgentRunContext) -> None:
nonlocal next_called
next_called = True
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["I'm good, thanks!"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="I'm good, thanks!")])
await middleware.process(context, mock_next)
assert next_called
assert context.result is not None
- assert not context.terminate
async def test_middleware_blocks_prompt_on_policy_violation(
self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock
) -> None:
"""Test middleware blocks prompt that violates policy."""
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Sensitive information"])])
+ context = AgentRunContext(
+ agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Sensitive information")]
+ )
with patch.object(middleware._processor, "process_messages", return_value=(True, "user-123")):
next_called = False
@@ -78,18 +79,18 @@ async def mock_next(ctx: AgentRunContext) -> None:
nonlocal next_called
next_called = True
- await middleware.process(context, mock_next)
+ with pytest.raises(MiddlewareTermination):
+ await middleware.process(context, mock_next)
assert not next_called
assert context.result is not None
- assert context.terminate
assert len(context.result.messages) == 1
- assert context.result.messages[0].role == "system"
+ assert context.result.messages[0].role == Role.SYSTEM
assert "blocked by policy" in context.result.messages[0].text.lower()
async def test_middleware_checks_response(self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock) -> None:
"""Test middleware checks agent response for policy violations."""
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello")])
call_count = 0
@@ -102,14 +103,16 @@ async def mock_process_messages(messages, activity, user_id=None):
with patch.object(middleware._processor, "process_messages", side_effect=mock_process_messages):
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["Here's some sensitive information"])])
+ ctx.result = AgentResponse(
+ messages=[ChatMessage(role=Role.ASSISTANT, text="Here's some sensitive information")]
+ )
await middleware.process(context, mock_next)
assert call_count == 2
assert context.result is not None
assert len(context.result.messages) == 1
- assert context.result.messages[0].role == "system"
+ assert context.result.messages[0].role == Role.SYSTEM
assert "blocked by policy" in context.result.messages[0].text.lower()
async def test_middleware_handles_result_without_messages(
@@ -119,7 +122,7 @@ async def test_middleware_handles_result_without_messages(
# Set ignore_exceptions to True so AttributeError is caught and logged
middleware._settings.ignore_exceptions = True
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello")])
with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")):
@@ -136,12 +139,12 @@ async def test_middleware_processor_receives_correct_activity(
"""Test middleware passes correct activity type to processor."""
from agent_framework_purview._models import Activity
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Test"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Test")])
with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_process:
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["Response"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Response")])
await middleware.process(context, mock_next)
@@ -153,13 +156,13 @@ async def test_middleware_streaming_skips_post_check(
self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock
) -> None:
"""Test that streaming results skip post-check evaluation."""
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello"])])
- context.is_streaming = True
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello")])
+ context.stream = True
with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc:
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["streaming"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="streaming")])
await middleware.process(context, mock_next)
@@ -171,7 +174,7 @@ async def test_middleware_payment_required_in_pre_check_raises_by_default(
"""Test that 402 in pre-check is raised when ignore_payment_required=False."""
from agent_framework_purview._exceptions import PurviewPaymentRequiredError
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello")])
with patch.object(
middleware._processor,
@@ -191,7 +194,7 @@ async def test_middleware_payment_required_in_post_check_raises_by_default(
"""Test that 402 in post-check is raised when ignore_payment_required=False."""
from agent_framework_purview._exceptions import PurviewPaymentRequiredError
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello")])
call_count = 0
@@ -205,7 +208,7 @@ async def side_effect(*args, **kwargs):
with patch.object(middleware._processor, "process_messages", side_effect=side_effect):
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["OK"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="OK")])
with pytest.raises(PurviewPaymentRequiredError):
await middleware.process(context, mock_next)
@@ -216,7 +219,7 @@ async def test_middleware_post_check_exception_raises_when_ignore_exceptions_fal
"""Test that post-check exceptions are propagated when ignore_exceptions=False."""
middleware._settings.ignore_exceptions = False
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Hello"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Hello")])
call_count = 0
@@ -230,7 +233,7 @@ async def side_effect(*args, **kwargs):
with patch.object(middleware._processor, "process_messages", side_effect=side_effect):
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["OK"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="OK")])
with pytest.raises(ValueError, match="Post-check blew up"):
await middleware.process(context, mock_next)
@@ -242,21 +245,19 @@ async def test_middleware_handles_pre_check_exception(
# Set ignore_exceptions to True
middleware._settings.ignore_exceptions = True
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Test"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Test")])
with patch.object(
middleware._processor, "process_messages", side_effect=Exception("Pre-check error")
) as mock_process:
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["Response"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Response")])
await middleware.process(context, mock_next)
# Should have been called twice (pre-check raises, then post-check also raises)
assert mock_process.call_count == 2
- # Context should not be terminated
- assert not context.terminate
# Result should be set by mock_next
assert context.result is not None
@@ -267,7 +268,7 @@ async def test_middleware_handles_post_check_exception(
# Set ignore_exceptions to True
middleware._settings.ignore_exceptions = True
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Test"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Test")])
call_count = 0
@@ -281,7 +282,7 @@ async def mock_process_messages(*args, **kwargs):
with patch.object(middleware._processor, "process_messages", side_effect=mock_process_messages):
async def mock_next(ctx: AgentRunContext) -> None:
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["Response"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Response")])
await middleware.process(context, mock_next)
@@ -298,7 +299,7 @@ async def test_middleware_with_ignore_exceptions_true(self, mock_credential: Asy
mock_agent = MagicMock()
mock_agent.name = "test-agent"
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Test"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Test")])
# Mock processor to raise an exception
async def mock_process_messages(*args, **kwargs):
@@ -307,7 +308,7 @@ async def mock_process_messages(*args, **kwargs):
with patch.object(middleware._processor, "process_messages", side_effect=mock_process_messages):
async def mock_next(ctx):
- ctx.result = AgentResponse(messages=[ChatMessage("assistant", ["Response"])])
+ ctx.result = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Response")])
# Should not raise, just log
await middleware.process(context, mock_next)
@@ -322,7 +323,7 @@ async def test_middleware_with_ignore_exceptions_false(self, mock_credential: As
mock_agent = MagicMock()
mock_agent.name = "test-agent"
- context = AgentRunContext(agent=mock_agent, messages=[ChatMessage("user", ["Test"])])
+ context = AgentRunContext(agent=mock_agent, messages=[ChatMessage(role=Role.USER, text="Test")])
# Mock processor to raise an exception
async def mock_process_messages(*args, **kwargs):
diff --git a/python/packages/purview/tests/test_processor.py b/python/packages/purview/tests/test_processor.py
index 3dfd78d981..f122c6e059 100644
--- a/python/packages/purview/tests/test_processor.py
+++ b/python/packages/purview/tests/test_processor.py
@@ -83,8 +83,8 @@ async def test_processor_initialization(
async def test_process_messages_with_defaults(self, processor: ScopedContentProcessor) -> None:
"""Test process_messages with settings that have defaults."""
messages = [
- ChatMessage("user", ["Hello"]),
- ChatMessage("assistant", ["Hi there"]),
+ ChatMessage(role="user", text="Hello"),
+ ChatMessage(role="assistant", text="Hi there"),
]
with patch.object(processor, "_map_messages", return_value=([], None)) as mock_map:
@@ -98,7 +98,7 @@ async def test_process_messages_blocks_content(
self, processor: ScopedContentProcessor, process_content_request_factory
) -> None:
"""Test process_messages returns True when content should be blocked."""
- messages = [ChatMessage("user", ["Sensitive content"])]
+ messages = [ChatMessage(role="user", text="Sensitive content")]
mock_request = process_content_request_factory("Sensitive content")
@@ -139,7 +139,7 @@ async def test_map_messages_without_defaults_gets_token_info(self, mock_client:
"""Test _map_messages gets token info when settings lack some defaults."""
settings = PurviewSettings(app_name="Test App", tenant_id="12345678-1234-1234-1234-123456789012")
processor = ScopedContentProcessor(mock_client, settings)
- messages = [ChatMessage("user", ["Test"], message_id="msg-123")]
+ messages = [ChatMessage(role="user", text="Test", message_id="msg-123")]
requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT)
@@ -156,7 +156,7 @@ async def test_map_messages_raises_on_missing_tenant_id(self, mock_client: Async
return_value={"user_id": "test-user", "client_id": "test-client"}
)
- messages = [ChatMessage("user", ["Test"], message_id="msg-123")]
+ messages = [ChatMessage(role="user", text="Test", message_id="msg-123")]
with pytest.raises(ValueError, match="Tenant id required"):
await processor._map_messages(messages, Activity.UPLOAD_TEXT)
@@ -355,7 +355,7 @@ async def test_map_messages_with_provided_user_id_fallback(self, mock_client: As
)
processor = ScopedContentProcessor(mock_client, settings)
- messages = [ChatMessage("user", ["Test message"])]
+ messages = [ChatMessage(role="user", text="Test message")]
requests, user_id = await processor._map_messages(
messages, Activity.UPLOAD_TEXT, provided_user_id="32345678-1234-1234-1234-123456789012"
@@ -376,7 +376,7 @@ async def test_map_messages_returns_empty_when_no_user_id(self, mock_client: Asy
)
processor = ScopedContentProcessor(mock_client, settings)
- messages = [ChatMessage("user", ["Test message"])]
+ messages = [ChatMessage(role="user", text="Test message")]
requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT)
@@ -479,7 +479,7 @@ async def test_user_id_from_token_when_no_other_source(self, mock_client: AsyncM
settings = PurviewSettings(app_name="Test App") # No tenant_id or app_location
processor = ScopedContentProcessor(mock_client, settings)
- messages = [ChatMessage("user", ["Test"])]
+ messages = [ChatMessage(role="user", text="Test")]
requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT)
@@ -550,7 +550,7 @@ async def test_provided_user_id_used_as_last_resort(
"""Test provided_user_id parameter is used as last resort."""
processor = ScopedContentProcessor(mock_client, settings)
- messages = [ChatMessage("user", ["Test"])]
+ messages = [ChatMessage(role="user", text="Test")]
requests, user_id = await processor._map_messages(
messages, Activity.UPLOAD_TEXT, provided_user_id="44444444-4444-4444-4444-444444444444"
@@ -562,7 +562,7 @@ async def test_invalid_provided_user_id_ignored(self, mock_client: AsyncMock, se
"""Test invalid provided_user_id is ignored."""
processor = ScopedContentProcessor(mock_client, settings)
- messages = [ChatMessage("user", ["Test"])]
+ messages = [ChatMessage(role="user", text="Test")]
requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT, provided_user_id="not-a-guid")
@@ -577,8 +577,8 @@ async def test_multiple_messages_same_user_id(self, mock_client: AsyncMock, sett
ChatMessage(
role="user", text="First", additional_properties={"user_id": "55555555-5555-5555-5555-555555555555"}
),
- ChatMessage("assistant", ["Response"]),
- ChatMessage("user", ["Second"]),
+ ChatMessage(role="assistant", text="Response"),
+ ChatMessage(role="user", text="Second"),
]
requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT)
@@ -594,7 +594,7 @@ async def test_first_valid_user_id_in_messages_is_used(
processor = ScopedContentProcessor(mock_client, settings)
messages = [
- ChatMessage("user", ["First"], author_name="Not a GUID"),
+ ChatMessage(role="user", text="First", author_name="Not a GUID"),
ChatMessage(
role="assistant",
text="Response",
@@ -654,7 +654,7 @@ async def test_protection_scopes_cached_on_first_call(
scope_identifier="scope-123", scopes=[]
)
- messages = [ChatMessage("user", ["Test"])]
+ messages = [ChatMessage(role="user", text="Test")]
await processor.process_messages(messages, Activity.UPLOAD_TEXT, user_id="12345678-1234-1234-1234-123456789012")
@@ -676,7 +676,7 @@ async def test_payment_required_exception_cached_at_tenant_level(
mock_client.get_protection_scopes.side_effect = PurviewPaymentRequiredError("Payment required")
- messages = [ChatMessage("user", ["Test"])]
+ messages = [ChatMessage(role="user", text="Test")]
with pytest.raises(PurviewPaymentRequiredError):
await processor.process_messages(
diff --git a/python/packages/redis/agent_framework_redis/_chat_message_store.py b/python/packages/redis/agent_framework_redis/_chat_message_store.py
index a68bc9f1d8..4b50c63571 100644
--- a/python/packages/redis/agent_framework_redis/_chat_message_store.py
+++ b/python/packages/redis/agent_framework_redis/_chat_message_store.py
@@ -225,7 +225,7 @@ async def add_messages(self, messages: Sequence[ChatMessage]) -> None:
Example:
.. code-block:: python
- messages = [ChatMessage("user", ["Hello"]), ChatMessage("assistant", ["Hi there!"])]
+ messages = [ChatMessage(role="user", text="Hello"), ChatMessage(role="assistant", text="Hi there!")]
await store.add_messages(messages)
"""
if not messages:
diff --git a/python/packages/redis/agent_framework_redis/_provider.py b/python/packages/redis/agent_framework_redis/_provider.py
index ce3090b92a..500d024f4e 100644
--- a/python/packages/redis/agent_framework_redis/_provider.py
+++ b/python/packages/redis/agent_framework_redis/_provider.py
@@ -503,9 +503,10 @@ async def invoked(
messages: list[dict[str, Any]] = []
for message in messages_list:
- if message.role in {"user", "assistant", "system"} and message.text and message.text.strip():
+ role_value = message.role.value if hasattr(message.role, "value") else message.role
+ if role_value in {"user", "assistant", "system"} and message.text and message.text.strip():
shaped: dict[str, Any] = {
- "role": message.role,
+ "role": role_value,
"content": message.text,
"conversation_id": self._conversation_id,
"message_id": message.message_id,
@@ -541,7 +542,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], *
)
return Context(
- messages=[ChatMessage("user", [f"{self.context_prompt}\n{line_separated_memories}"])]
+ messages=[ChatMessage(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")]
if line_separated_memories
else None
)
diff --git a/python/packages/redis/tests/test_redis_chat_message_store.py b/python/packages/redis/tests/test_redis_chat_message_store.py
index 0bbb200dfe..71e6eba155 100644
--- a/python/packages/redis/tests/test_redis_chat_message_store.py
+++ b/python/packages/redis/tests/test_redis_chat_message_store.py
@@ -19,9 +19,9 @@ class TestRedisChatMessageStore:
def sample_messages(self):
"""Sample chat messages for testing."""
return [
- ChatMessage("user", ["Hello"], message_id="msg1"),
- ChatMessage("assistant", ["Hi there!"], message_id="msg2"),
- ChatMessage("user", ["How are you?"], message_id="msg3"),
+ ChatMessage(role="user", text="Hello", message_id="msg1"),
+ ChatMessage(role="assistant", text="Hi there!", message_id="msg2"),
+ ChatMessage(role="user", text="How are you?", message_id="msg3"),
]
@pytest.fixture
@@ -250,7 +250,7 @@ async def test_add_messages_with_max_limit(self, mock_redis_client):
store = RedisChatMessageStore(redis_url="redis://localhost:6379", thread_id="test123", max_messages=3)
store._redis_client = mock_redis_client
- message = ChatMessage("user", ["Test"])
+ message = ChatMessage(role="user", text="Test")
await store.add_messages([message])
# Should trim after adding to keep only last 3 messages
@@ -269,8 +269,8 @@ async def test_list_messages_with_data(self, redis_store, mock_redis_client, sam
"""Test listing messages with data in Redis."""
# Create proper serialized messages using the actual serialization method
test_messages = [
- ChatMessage("user", ["Hello"], message_id="msg1"),
- ChatMessage("assistant", ["Hi there!"], message_id="msg2"),
+ ChatMessage(role="user", text="Hello", message_id="msg1"),
+ ChatMessage(role="assistant", text="Hi there!", message_id="msg2"),
]
serialized_messages = [redis_store._serialize_message(msg) for msg in test_messages]
mock_redis_client.lrange.return_value = serialized_messages
@@ -278,9 +278,9 @@ async def test_list_messages_with_data(self, redis_store, mock_redis_client, sam
messages = await redis_store.list_messages()
assert len(messages) == 2
- assert messages[0].role == "user"
+ assert messages[0].role.value == "user"
assert messages[0].text == "Hello"
- assert messages[1].role == "assistant"
+ assert messages[1].role.value == "assistant"
assert messages[1].text == "Hi there!"
async def test_list_messages_with_initial_messages(self, sample_messages):
@@ -422,7 +422,7 @@ async def test_message_serialization_with_complex_content(self):
serialized = store._serialize_message(message)
deserialized = store._deserialize_message(serialized)
- assert deserialized.role == "assistant"
+ assert deserialized.role.value == "assistant"
assert deserialized.text == "Hello World"
assert deserialized.author_name == "TestBot"
assert deserialized.message_id == "complex_msg"
@@ -444,7 +444,7 @@ async def test_redis_connection_error_handling(self):
store = RedisChatMessageStore(redis_url="redis://localhost:6379", thread_id="test123")
store._redis_client = mock_client
- message = ChatMessage("user", ["Test"])
+ message = ChatMessage(role="user", text="Test")
# Should propagate Redis connection errors
with pytest.raises(Exception, match="Connection failed"):
@@ -485,7 +485,7 @@ async def test_setitem(self, redis_store, mock_redis_client, sample_messages):
mock_redis_client.llen.return_value = 2
mock_redis_client.lset = AsyncMock()
- new_message = ChatMessage("user", ["Updated message"])
+ new_message = ChatMessage(role="user", text="Updated message")
await redis_store.setitem(0, new_message)
mock_redis_client.lset.assert_called_once()
@@ -497,13 +497,13 @@ async def test_setitem_index_error(self, redis_store, mock_redis_client):
"""Test setitem raises IndexError for invalid index."""
mock_redis_client.llen.return_value = 0
- new_message = ChatMessage("user", ["Test"])
+ new_message = ChatMessage(role="user", text="Test")
with pytest.raises(IndexError):
await redis_store.setitem(0, new_message)
async def test_append(self, redis_store, mock_redis_client):
"""Test append method delegates to add_messages."""
- message = ChatMessage("user", ["Appended message"])
+ message = ChatMessage(role="user", text="Appended message")
await redis_store.append(message)
# Should call pipeline operations via add_messages
diff --git a/python/packages/redis/tests/test_redis_provider.py b/python/packages/redis/tests/test_redis_provider.py
index e5db9d25fd..41ce7b37b8 100644
--- a/python/packages/redis/tests/test_redis_provider.py
+++ b/python/packages/redis/tests/test_redis_provider.py
@@ -115,16 +115,16 @@ class TestRedisProviderMessages:
@pytest.fixture
def sample_messages(self) -> list[ChatMessage]:
return [
- ChatMessage("user", ["Hello, how are you?"]),
- ChatMessage("assistant", ["I'm doing well, thank you!"]),
- ChatMessage("system", ["You are a helpful assistant"]),
+ ChatMessage(role="user", text="Hello, how are you?"),
+ ChatMessage(role="assistant", text="I'm doing well, thank you!"),
+ ChatMessage(role="system", text="You are a helpful assistant"),
]
# Writes require at least one scoping filter to avoid unbounded operations
async def test_messages_adding_requires_filters(self, patch_index_from_dict): # noqa: ARG002
provider = RedisProvider()
with pytest.raises(ServiceInitializationError):
- await provider.invoked("thread123", ChatMessage("user", ["Hello"]))
+ await provider.invoked("thread123", ChatMessage(role="user", text="Hello"))
# Captures the per-operation thread id when provided
async def test_thread_created_sets_per_operation_id(self, patch_index_from_dict): # noqa: ARG002
@@ -157,7 +157,7 @@ class TestRedisProviderModelInvoking:
async def test_model_invoking_requires_filters(self, patch_index_from_dict): # noqa: ARG002
provider = RedisProvider()
with pytest.raises(ServiceInitializationError):
- await provider.invoking(ChatMessage("user", ["Hi"]))
+ await provider.invoking(ChatMessage(role="user", text="Hi"))
# Ensures text-only search path is used and context is composed from hits
async def test_textquery_path_and_context_contents(
@@ -168,7 +168,7 @@ async def test_textquery_path_and_context_contents(
provider = RedisProvider(user_id="u1")
# Act
- ctx = await provider.invoking([ChatMessage("user", ["q1"])])
+ ctx = await provider.invoking([ChatMessage(role="user", text="q1")])
# Assert: TextQuery used (not HybridQuery), filter_expression included
assert patch_queries["TextQuery"].call_count == 1
@@ -190,7 +190,7 @@ async def test_model_invoking_empty_results_returns_empty_context(
): # noqa: ARG002
mock_index.query = AsyncMock(return_value=[])
provider = RedisProvider(user_id="u1")
- ctx = await provider.invoking([ChatMessage("user", ["any"])])
+ ctx = await provider.invoking([ChatMessage(role="user", text="any")])
assert ctx.messages == []
# Ensures hybrid vector-text search is used when a vectorizer and vector field are configured
@@ -198,7 +198,7 @@ async def test_hybridquery_path_with_vectorizer(self, mock_index: AsyncMock, pat
mock_index.query = AsyncMock(return_value=[{"content": "Hit"}])
provider = RedisProvider(user_id="u1", redis_vectorizer=CUSTOM_VECTORIZER, vector_field_name="vec")
- ctx = await provider.invoking([ChatMessage("user", ["hello"])])
+ ctx = await provider.invoking([ChatMessage(role="user", text="hello")])
# Assert: HybridQuery used with vector and vector field
assert patch_queries["HybridQuery"].call_count == 1
@@ -240,9 +240,9 @@ async def test_messages_adding_adds_partition_defaults_and_roles(
)
msgs = [
- ChatMessage("user", ["u"]),
- ChatMessage("assistant", ["a"]),
- ChatMessage("system", ["s"]),
+ ChatMessage(role="user", text="u"),
+ ChatMessage(role="assistant", text="a"),
+ ChatMessage(role="system", text="s"),
]
await provider.invoked(msgs)
@@ -265,8 +265,8 @@ async def test_messages_adding_ignores_blank_and_disallowed_roles(
): # noqa: ARG002
provider = RedisProvider(user_id="u1", scope_to_per_operation_thread_id=True)
msgs = [
- ChatMessage("user", [" "]),
- ChatMessage("tool", ["tool output"]),
+ ChatMessage(role="user", text=" "),
+ ChatMessage(role="tool", text="tool output"),
]
await provider.invoked(msgs)
# No valid messages -> no load
@@ -279,8 +279,8 @@ async def test_messages_adding_triggers_index_create_once_when_drop_true(
self, mock_index: AsyncMock, patch_index_from_dict
): # noqa: ARG002
provider = RedisProvider(user_id="u1")
- await provider.invoked(ChatMessage("user", ["m1"]))
- await provider.invoked(ChatMessage("user", ["m2"]))
+ await provider.invoked(ChatMessage(role="user", text="m1"))
+ await provider.invoked(ChatMessage(role="user", text="m2"))
# create only on first call
assert mock_index.create.await_count == 1
@@ -291,7 +291,7 @@ async def test_model_invoking_triggers_create_when_drop_false_and_not_exists(
mock_index.exists = AsyncMock(return_value=False)
provider = RedisProvider(user_id="u1")
mock_index.query = AsyncMock(return_value=[{"content": "C"}])
- await provider.invoking([ChatMessage("user", ["q"])])
+ await provider.invoking([ChatMessage(role="user", text="q")])
assert mock_index.create.await_count == 1
@@ -321,7 +321,7 @@ async def test_messages_adding_populates_vector_field_when_vectorizer_present(
vector_field_name="vec",
)
- await provider.invoked(ChatMessage("user", ["hello"]))
+ await provider.invoked(ChatMessage(role="user", text="hello"))
assert mock_index.load.await_count == 1
(loaded_args, _kwargs) = mock_index.load.call_args
docs = loaded_args[0]
diff --git a/python/pyproject.toml b/python/pyproject.toml
index a14354cbe4..92ccaad945 100644
--- a/python/pyproject.toml
+++ b/python/pyproject.toml
@@ -170,13 +170,13 @@ notice-rgx = "^# Copyright \\(c\\) Microsoft\\. All rights reserved\\."
min-file-size = 1
[tool.pytest.ini_options]
-testpaths = 'packages/**/tests'
+testpaths = ['packages/**/tests', 'packages/**/ag_ui_tests']
norecursedirs = '**/lab/**'
addopts = "-ra -q -r fEX"
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
filterwarnings = []
-timeout = 120
+timeout = 60
markers = [
"azure: marks tests as Azure provider specific",
"azure-ai: marks tests as Azure AI provider specific",
@@ -259,7 +259,8 @@ pytest --import-mode=importlib
--ignore-glob=packages/devui/**
-rs
-n logical --dist loadfile --dist worksteal
-packages/**/tests
+ packages/**/tests
+ packages/**/ag_ui_tests
"""
[tool.poe.tasks.all-tests]
@@ -269,7 +270,8 @@ pytest --import-mode=importlib
--ignore-glob=packages/devui/**
-rs
-n logical --dist loadfile --dist worksteal
-packages/**/tests
+ packages/**/tests
+ packages/**/ag_ui_tests
"""
[tool.poe.tasks.venv]
diff --git a/python/samples/README.md b/python/samples/README.md
index a2c539be02..fc64dced52 100644
--- a/python/samples/README.md
+++ b/python/samples/README.md
@@ -95,7 +95,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen
| File | Description |
|------|-------------|
| [`getting_started/agents/custom/custom_agent.py`](./getting_started/agents/custom/custom_agent.py) | Custom Agent Implementation Example |
-| [`getting_started/agents/custom/custom_chat_client.py`](./getting_started/agents/custom/custom_chat_client.py) | Custom Chat Client Implementation Example |
+| [`getting_started/chat_client/custom_chat_client.py`](./getting_started/chat_client/custom_chat_client.py) | Custom Chat Client Implementation Example |
### Ollama
diff --git a/python/samples/autogen-migration/README.md b/python/samples/autogen-migration/README.md
index 616d3c345e..509b518f8a 100644
--- a/python/samples/autogen-migration/README.md
+++ b/python/samples/autogen-migration/README.md
@@ -52,7 +52,7 @@ python samples/autogen-migration/orchestrations/04_magentic_one.py
## Tips for Migration
- **Default behavior differences**: AutoGen's `AssistantAgent` is single-turn by default (`max_tool_iterations=1`), while AF's `ChatAgent` is multi-turn and continues tool execution automatically.
-- **Thread management**: AF agents are stateless by default. Use `agent.get_new_thread()` and pass it to `run()`/`run_stream()` to maintain conversation state, similar to AutoGen's conversation context.
+- **Thread management**: AF agents are stateless by default. Use `agent.get_new_thread()` and pass it to `run()` to maintain conversation state, similar to AutoGen's conversation context.
- **Tools**: AutoGen uses `FunctionTool` wrappers; AF uses `@tool` decorators with automatic schema inference.
- **Orchestration patterns**:
- `RoundRobinGroupChat` → `SequentialBuilder` or `WorkflowBuilder`
diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py
index 38df1424db..e1d70882cd 100644
--- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py
+++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py
@@ -48,7 +48,7 @@ async def run_autogen() -> None:
# Run the team and display the conversation.
print("[AutoGen] Round-robin conversation:")
- await Console(team.run_stream(task="Create a brief summary about electric vehicles"))
+ await Console(team.run(task="Create a brief summary about electric vehicles"), stream=True)
async def run_agent_framework() -> None:
@@ -80,7 +80,7 @@ async def run_agent_framework() -> None:
# Run the workflow
print("[Agent Framework] Sequential conversation:")
current_executor = None
- async for event in workflow.run_stream("Create a brief summary about electric vehicles"):
+ async for event in workflow.run("Create a brief summary about electric vehicles", stream=True):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
@@ -152,7 +152,7 @@ async def check_approval(
# Run the workflow
print("[Agent Framework with Cycle] Cyclic conversation:")
current_executor = None
- async for event in workflow.run_stream("Create a brief summary about electric vehicles"):
+ async for event in workflow.run("Create a brief summary about electric vehicles", stream=True):
if isinstance(event, WorkflowOutputEvent):
print("\n---------- Workflow Output ----------")
print(event.data)
diff --git a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py
index f8c170cbef..69e36f7c17 100644
--- a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py
+++ b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py
@@ -54,7 +54,7 @@ async def run_autogen() -> None:
# Run with a question that requires expert selection
print("[AutoGen] Selector group chat conversation:")
- await Console(team.run_stream(task="How do I connect to a PostgreSQL database using Python?"))
+ await Console(team.run(task="How do I connect to a PostgreSQL database using Python?", stream=True))
async def run_agent_framework() -> None:
@@ -99,7 +99,7 @@ async def run_agent_framework() -> None:
# Run with a question that requires expert selection
print("[Agent Framework] Group chat conversation:")
current_executor = None
- async for event in workflow.run_stream("How do I connect to a PostgreSQL database using Python?"):
+ async for event in workflow.run("How do I connect to a PostgreSQL database using Python?", stream=True):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py
index 09d8ac0486..59f878b365 100644
--- a/python/samples/autogen-migration/orchestrations/03_swarm.py
+++ b/python/samples/autogen-migration/orchestrations/03_swarm.py
@@ -75,7 +75,7 @@ async def run_autogen() -> None:
# Run with human-in-the-loop pattern
print("[AutoGen] Swarm handoff conversation:")
- task_result = await Console(team.run_stream(task=scripted_responses[response_index]))
+ task_result = await Console(team.run(task=scripted_responses[response_index], stream=True))
last_message = task_result.messages[-1]
response_index += 1
@@ -87,7 +87,7 @@ async def run_autogen() -> None:
):
user_message = scripted_responses[response_index]
task_result = await Console(
- team.run_stream(task=HandoffMessage(source="user", target=last_message.source, content=user_message))
+ team.run(task=HandoffMessage(source="user", target=last_message.source, content=user_message), stream=True)
)
last_message = task_result.messages[-1]
response_index += 1
@@ -161,7 +161,7 @@ async def run_agent_framework() -> None:
stream_line_open = False
pending_requests: list[RequestInfoEvent] = []
- async for event in workflow.run_stream(scripted_responses[0]):
+ async for event in workflow.run(scripted_responses[0], stream=True):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py
index 30ccd0aa01..1bbebe4b67 100644
--- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py
+++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py
@@ -62,7 +62,7 @@ async def run_autogen() -> None:
# Run complex task and display the conversation
print("[AutoGen] Magentic One conversation:")
- await Console(team.run_stream(task="Research Python async patterns and write a simple example"))
+ await Console(team.run(task="Research Python async patterns and write a simple example", stream=True))
async def run_agent_framework() -> None:
@@ -112,7 +112,7 @@ async def run_agent_framework() -> None:
last_message_id: str | None = None
output_event: WorkflowOutputEvent | None = None
print("[Agent Framework] Magentic conversation:")
- async for event in workflow.run_stream("Research Python async patterns and write a simple example"):
+ async for event in workflow.run("Research Python async patterns and write a simple example", stream=True):
if isinstance(event, AgentRunUpdateEvent):
message_id = event.data.message_id
if message_id != last_message_id:
diff --git a/python/samples/autogen-migration/single_agent/03_assistant_agent_thread_and_stream.py b/python/samples/autogen-migration/single_agent/03_assistant_agent_thread_and_stream.py
index c2d79f4b86..8cb516fe85 100644
--- a/python/samples/autogen-migration/single_agent/03_assistant_agent_thread_and_stream.py
+++ b/python/samples/autogen-migration/single_agent/03_assistant_agent_thread_and_stream.py
@@ -32,7 +32,7 @@ async def run_autogen() -> None:
print("\n[AutoGen] Streaming response:")
# Stream response with Console for token streaming
- await Console(agent.run_stream(task="Count from 1 to 5"))
+ await Console(agent.run(task="Count from 1 to 5", stream=True))
async def run_agent_framework() -> None:
@@ -60,7 +60,7 @@ async def run_agent_framework() -> None:
print("\n[Agent Framework] Streaming response:")
# Stream response
print(" ", end="")
- async for chunk in agent.run_stream("Count from 1 to 5"):
+ async for chunk in agent.run("Count from 1 to 5", thread=thread, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print()
diff --git a/python/samples/autogen-migration/single_agent/04_agent_as_tool.py b/python/samples/autogen-migration/single_agent/04_agent_as_tool.py
index 014b7b8adf..52edc1eec7 100644
--- a/python/samples/autogen-migration/single_agent/04_agent_as_tool.py
+++ b/python/samples/autogen-migration/single_agent/04_agent_as_tool.py
@@ -43,7 +43,7 @@ async def run_autogen() -> None:
# Run coordinator with streaming - it will delegate to writer
print("[AutoGen]")
- await Console(coordinator.run_stream(task="Create a tagline for a coffee shop"))
+ await Console(coordinator.run(task="Create a tagline for a coffee shop", stream=True))
async def run_agent_framework() -> None:
@@ -80,7 +80,7 @@ async def run_agent_framework() -> None:
# Track accumulated function calls (they stream in incrementally)
accumulated_calls: dict[str, FunctionCallContent] = {}
- async for chunk in coordinator.run_stream("Create a tagline for a coffee shop"):
+ async for chunk in coordinator.run("Create a tagline for a coffee shop", stream=True):
# Stream text tokens
if chunk.text:
print(chunk.text, end="", flush=True)
diff --git a/python/samples/concepts/README.md b/python/samples/concepts/README.md
new file mode 100644
index 0000000000..8e3c0282fa
--- /dev/null
+++ b/python/samples/concepts/README.md
@@ -0,0 +1,10 @@
+# Concept Samples
+
+This folder contains samples that dive deep into specific Agent Framework concepts.
+
+## Samples
+
+| Sample | Description |
+|--------|-------------|
+| [response_stream.py](response_stream.py) | Deep dive into `ResponseStream` - the streaming abstraction for AI responses. Covers the four hook types (transform hooks, cleanup hooks, finalizer, result hooks), two consumption patterns (iteration vs direct finalization), and the `wrap()` API for layering streams without double-consumption. |
+| [typed_options.py](typed_options.py) | Demonstrates TypedDict-based chat options for type-safe configuration with IDE autocomplete support. |
diff --git a/python/samples/concepts/response_stream.py b/python/samples/concepts/response_stream.py
new file mode 100644
index 0000000000..98d5169760
--- /dev/null
+++ b/python/samples/concepts/response_stream.py
@@ -0,0 +1,360 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+from collections.abc import AsyncIterable, Sequence
+
+from agent_framework import ChatResponse, ChatResponseUpdate, Content, ResponseStream, Role
+
+"""ResponseStream: A Deep Dive
+
+This sample explores the ResponseStream class - a powerful abstraction for working with
+streaming responses in the Agent Framework.
+
+=== Why ResponseStream Exists ===
+
+When working with AI models, responses can be delivered in two ways:
+1. **Non-streaming**: Wait for the complete response, then return it all at once
+2. **Streaming**: Receive incremental updates as they're generated
+
+Streaming provides a better user experience (faster time-to-first-token, progressive rendering)
+but introduces complexity:
+- How do you process updates as they arrive?
+- How do you also get a final, complete response?
+- How do you ensure the underlying stream is only consumed once?
+- How do you add custom logic (hooks) at different stages?
+
+ResponseStream solves all these problems by wrapping an async iterable and providing:
+- Multiple consumption patterns (iteration OR direct finalization)
+- Hook points for transformation, cleanup, finalization, and result processing
+- The `wrap()` API to layer behavior without double-consuming the stream
+
+=== The Four Hook Types ===
+
+ResponseStream provides four ways to inject custom logic. All can be passed via constructor
+or added later via fluent methods:
+
+1. **Transform Hooks** (`transform_hooks=[]` or `.with_transform_hook()`)
+ - Called for EACH update as it's yielded during iteration
+ - Can transform updates before they're returned to the consumer
+ - Multiple hooks are called in order, each receiving the previous hook's output
+ - Only triggered during iteration (not when calling get_final_response directly)
+
+2. **Cleanup Hooks** (`cleanup_hooks=[]` or `.with_cleanup_hook()`)
+ - Called ONCE when iteration completes (stream fully consumed), BEFORE finalizer
+ - Used for cleanup: closing connections, releasing resources, logging
+ - Cannot modify the stream or response
+ - Triggered regardless of how the stream ends (normal completion or exception)
+
+3. **Finalizer** (`finalizer=` constructor parameter)
+ - Called ONCE when `get_final_response()` is invoked
+ - Receives the list of collected updates and converts to the final type
+ - There is only ONE finalizer per stream (set at construction)
+
+4. **Result Hooks** (`result_hooks=[]` or `.with_result_hook()`)
+ - Called ONCE after the finalizer produces its result
+ - Transform the final response before returning
+ - Multiple result hooks are called in order, each receiving the previous result
+ - Can return None to keep the previous value unchanged
+
+=== Two Consumption Patterns ===
+
+**Pattern 1: Async Iteration**
+```python
+async for update in response_stream:
+ print(update.text) # Process each update
+# Stream is now consumed; updates are stored internally
+```
+- Transform hooks are called for each yielded item
+- Cleanup hooks are called after the last item
+- The stream collects all updates internally for later finalization
+- Does not run the finalizer automatically
+
+**Pattern 2: Direct Finalization**
+```python
+final = await response_stream.get_final_response()
+```
+- If the stream hasn't been iterated, it auto-iterates (consuming all updates)
+- The finalizer converts collected updates to a final response
+- Result hooks transform the response
+- You get the complete response without ever seeing individual updates
+
+** Pattern 3: Combined Usage **
+
+When you first iterate the stream and then call `get_final_response()`, the following occurs:
+- Iteration yields updates with transform hooks applied
+- Cleanup hooks run after iteration completes
+- Calling `get_final_response()` uses the already collected updates to produce the final response
+- Note that it does not re-iterate the stream since it's already been consumed
+
+```python
+async for update in response_stream:
+ print(update.text) # See each update
+final = await response_stream.get_final_response() # Get the aggregated result
+```
+
+=== Chaining with .map() and .with_finalizer() ===
+
+When building a ChatAgent on top of a ChatClient, we face a challenge:
+- The ChatClient returns a ResponseStream[ChatResponseUpdate, ChatResponse]
+- The ChatAgent needs to return a ResponseStream[AgentResponseUpdate, AgentResponse]
+- We can't iterate the ChatClient's stream twice!
+
+The `.map()` and `.with_finalizer()` methods solve this by creating new ResponseStreams that:
+- Delegate iteration to the inner stream (only consuming it once)
+- Maintain their OWN separate transform hooks, result hooks, and cleanup hooks
+- Allow type-safe transformation of updates and final responses
+
+**`.map(transform)`**: Creates a new stream that transforms each update.
+- Returns a new ResponseStream with the transformed update type
+- Falls back to the inner stream's finalizer if no new finalizer is set
+
+**`.with_finalizer(finalizer)`**: Creates a new stream with a different finalizer.
+- Returns a new ResponseStream with the new final type
+- The inner stream's finalizer and result_hooks ARE still called (see below)
+
+**IMPORTANT**: When chaining these methods via `get_final_response()`:
+1. The inner stream's finalizer runs first (on the original updates)
+2. The inner stream's result_hooks run (on the inner final result)
+3. The outer stream's finalizer runs (on the transformed updates)
+4. The outer stream's result_hooks run (on the outer final result)
+
+This ensures that post-processing hooks registered on the inner stream (e.g., context
+provider notifications, telemetry, thread updates) are still executed even when the
+stream is wrapped/mapped.
+
+```python
+# ChatAgent does something like this internally:
+chat_stream = chat_client.get_response(messages, stream=True)
+agent_stream = (
+ chat_stream
+ .map(_to_agent_update, _to_agent_response)
+ .with_result_hook(_notify_thread) # Outer hook runs AFTER inner hooks
+)
+```
+
+This ensures:
+- The underlying ChatClient stream is only consumed once
+- The agent can add its own transform hooks, result hooks, and cleanup logic
+- Each layer (ChatClient, ChatAgent, middleware) can add independent behavior
+- Inner stream post-processing (like context provider notification) still runs
+- Types flow naturally through the chain
+"""
+
+
+async def main() -> None:
+ """Demonstrate the various ResponseStream patterns and capabilities."""
+
+ # =========================================================================
+ # Example 1: Basic ResponseStream with iteration
+ # =========================================================================
+ print("=== Example 1: Basic Iteration ===\n")
+
+ async def generate_updates() -> AsyncIterable[ChatResponseUpdate]:
+ """Simulate a streaming response from an AI model."""
+ words = ["Hello", " ", "from", " ", "the", " ", "streaming", " ", "response", "!"]
+ for word in words:
+ await asyncio.sleep(0.05) # Simulate network delay
+ yield ChatResponseUpdate(contents=[Content.from_text(word)], role=Role.ASSISTANT)
+
+ def combine_updates(updates: Sequence[ChatResponseUpdate]) -> ChatResponse:
+ """Finalizer that combines all updates into a single response."""
+ return ChatResponse.from_chat_response_updates(updates)
+
+ stream = ResponseStream(generate_updates(), finalizer=combine_updates)
+
+ print("Iterating through updates:")
+ async for update in stream:
+ print(f" Update: '{update.text}'")
+
+ # After iteration, we can still get the final response
+ final = await stream.get_final_response()
+ print(f"\nFinal response: '{final.text}'")
+
+ # =========================================================================
+ # Example 2: Using get_final_response() without iteration
+ # =========================================================================
+ print("\n=== Example 2: Direct Finalization (No Iteration) ===\n")
+
+ # Create a fresh stream (streams can only be consumed once)
+ stream2 = ResponseStream(generate_updates(), finalizer=combine_updates)
+
+ # Skip iteration entirely - get_final_response() auto-consumes the stream
+ final2 = await stream2.get_final_response()
+ print(f"Got final response directly: '{final2.text}'")
+ print(f"Number of updates collected internally: {len(stream2.updates)}")
+
+ # =========================================================================
+ # Example 3: Transform hooks - transform updates during iteration
+ # =========================================================================
+ print("\n=== Example 3: Transform Hooks ===\n")
+
+ update_count = {"value": 0}
+
+ def counting_hook(update: ChatResponseUpdate) -> ChatResponseUpdate:
+ """Hook that counts and annotates each update."""
+ update_count["value"] += 1
+ # Return the update (or a modified version)
+ return update
+
+ def uppercase_hook(update: ChatResponseUpdate) -> ChatResponseUpdate:
+ """Hook that converts text to uppercase."""
+ if update.text:
+ return ChatResponseUpdate(
+ contents=[Content.from_text(update.text.upper())], role=update.role, response_id=update.response_id
+ )
+ return update
+
+ # Pass transform_hooks directly to constructor
+ stream3 = ResponseStream(
+ generate_updates(),
+ finalizer=combine_updates,
+ transform_hooks=[counting_hook, uppercase_hook], # First counts, then uppercases
+ )
+
+ print("Iterating with hooks applied:")
+ async for update in stream3:
+ print(f" Received: '{update.text}'") # Will be uppercase
+
+ print(f"\nTotal updates processed: {update_count['value']}")
+
+ # =========================================================================
+ # Example 4: Cleanup hooks - cleanup after stream consumption
+ # =========================================================================
+ print("\n=== Example 4: Cleanup Hooks ===\n")
+
+ cleanup_performed = {"value": False}
+
+ async def cleanup_hook() -> None:
+ """Cleanup hook for releasing resources after stream consumption."""
+ print(" [Cleanup] Cleaning up resources...")
+ cleanup_performed["value"] = True
+
+ # Pass cleanup_hooks directly to constructor
+ stream4 = ResponseStream(
+ generate_updates(),
+ finalizer=combine_updates,
+ cleanup_hooks=[cleanup_hook],
+ )
+
+ print("Starting iteration (cleanup happens after):")
+ async for update in stream4:
+ pass # Just consume the stream
+ print(f"Cleanup was performed: {cleanup_performed['value']}")
+
+ # =========================================================================
+ # Example 5: Result hooks - transform the final response
+ # =========================================================================
+ print("\n=== Example 5: Result Hooks ===\n")
+
+ def add_metadata_hook(response: ChatResponse) -> ChatResponse:
+ """Result hook that adds metadata to the response."""
+ response.additional_properties["processed"] = True
+ response.additional_properties["word_count"] = len((response.text or "").split())
+ return response
+
+ def wrap_in_quotes_hook(response: ChatResponse) -> ChatResponse:
+ """Result hook that wraps the response text in quotes."""
+ if response.text:
+ return ChatResponse(
+ messages=f'"{response.text}"',
+ role=Role.ASSISTANT,
+ additional_properties=response.additional_properties,
+ )
+ return response
+
+ # Finalizer converts updates to response, then result hooks transform it
+ stream5 = ResponseStream(
+ generate_updates(),
+ finalizer=combine_updates,
+ result_hooks=[add_metadata_hook, wrap_in_quotes_hook], # First adds metadata, then wraps in quotes
+ )
+
+ final5 = await stream5.get_final_response()
+ print(f"Final text: {final5.text}")
+ print(f"Metadata: {final5.additional_properties}")
+
+ # =========================================================================
+ # Example 6: The wrap() API - layering without double-consumption
+ # =========================================================================
+ print("\n=== Example 6: wrap() API for Layering ===\n")
+
+ # Simulate what ChatClient returns
+ inner_stream = ResponseStream(generate_updates(), finalizer=combine_updates)
+
+ # Simulate what ChatAgent does: wrap the inner stream
+ def to_agent_format(update: ChatResponseUpdate) -> ChatResponseUpdate:
+ """Map ChatResponseUpdate to agent format (simulated transformation)."""
+ # In real code, this would convert to AgentResponseUpdate
+ return ChatResponseUpdate(
+ contents=[Content.from_text(f"[AGENT] {update.text}")], role=update.role, response_id=update.response_id
+ )
+
+ def to_agent_response(updates: Sequence[ChatResponseUpdate]) -> ChatResponse:
+ """Finalizer that converts updates to agent response (simulated)."""
+ # In real code, this would create an AgentResponse
+ text = "".join(u.text or "" for u in updates)
+ return ChatResponse(
+ text=f"[AGENT FINAL] {text}",
+ role=Role.ASSISTANT,
+ additional_properties={"layer": "agent"},
+ )
+
+ # .map() creates a new stream that:
+ # 1. Delegates iteration to inner_stream (only consuming it once)
+ # 2. Transforms each update via the transform function
+ # 3. Uses the provided finalizer (required since update type may change)
+ outer_stream = inner_stream.map(to_agent_format, to_agent_response)
+
+ print("Iterating the mapped stream:")
+ async for update in outer_stream:
+ print(f" {update.text}")
+
+ final_outer = await outer_stream.get_final_response()
+ print(f"\nMapped final: {final_outer.text}")
+ print(f"Mapped metadata: {final_outer.additional_properties}")
+
+ # Important: the inner stream was only consumed once!
+ print(f"Inner stream consumed: {inner_stream._consumed}")
+
+ # =========================================================================
+ # Example 7: Combining all patterns
+ # =========================================================================
+ print("\n=== Example 7: Full Integration ===\n")
+
+ stats = {"updates": 0, "characters": 0}
+
+ def track_stats(update: ChatResponseUpdate) -> ChatResponseUpdate:
+ """Track statistics as updates flow through."""
+ stats["updates"] += 1
+ stats["characters"] += len(update.text or "")
+ return update
+
+ def log_cleanup() -> None:
+ """Log when stream consumption completes."""
+ print(f" [Cleanup] Stream complete: {stats['updates']} updates, {stats['characters']} chars")
+
+ def add_stats_to_response(response: ChatResponse) -> ChatResponse:
+ """Result hook to include the statistics in the final response."""
+ response.additional_properties["stats"] = stats.copy()
+ return response
+
+ # All hooks can be passed via constructor
+ full_stream = ResponseStream(
+ generate_updates(),
+ finalizer=combine_updates,
+ transform_hooks=[track_stats],
+ result_hooks=[add_stats_to_response],
+ cleanup_hooks=[log_cleanup],
+ )
+
+ print("Processing with all hooks active:")
+ async for update in full_stream:
+ print(f" -> '{update.text}'")
+
+ final_full = await full_stream.get_final_response()
+ print(f"\nFinal: '{final_full.text}'")
+ print(f"Stats: {final_full.additional_properties['stats']}")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/samples/concepts/tools/README.md b/python/samples/concepts/tools/README.md
new file mode 100644
index 0000000000..3a270b25aa
--- /dev/null
+++ b/python/samples/concepts/tools/README.md
@@ -0,0 +1,499 @@
+# Tools and Middleware: Request Flow Architecture
+
+This document describes the complete request flow when using an Agent with middleware and tools, from the initial `Agent.run()` call through middleware layers, function invocation, and back to the caller.
+
+## Overview
+
+The Agent Framework uses a layered architecture with three distinct middleware/processing layers:
+
+1. **Agent Middleware Layer** - Wraps the entire agent execution
+2. **Chat Middleware Layer** - Wraps calls to the chat client
+3. **Function Middleware Layer** - Wraps individual tool/function invocations
+
+Each layer provides interception points where you can modify inputs, inspect outputs, or alter behavior.
+
+## Flow Diagram
+
+```mermaid
+sequenceDiagram
+ participant User
+ participant Agent as Agent.run()
+ participant AML as AgentMiddlewareLayer
+ participant AMP as AgentMiddlewarePipeline
+ participant RawAgent as RawChatAgent.run()
+ participant CML as ChatMiddlewareLayer
+ participant CMP as ChatMiddlewarePipeline
+ participant FIL as FunctionInvocationLayer
+ participant Client as BaseChatClient._inner_get_response()
+ participant LLM as LLM Service
+ participant FMP as FunctionMiddlewarePipeline
+ participant Tool as FunctionTool.invoke()
+
+ User->>Agent: run(messages, thread, options, middleware)
+
+ Note over Agent,AML: Agent Middleware Layer
+ Agent->>AML: run() with middleware param
+ AML->>AML: categorize_middleware() → split by type
+ AML->>AMP: execute(AgentRunContext)
+
+ loop Agent Middleware Chain
+ AMP->>AMP: middleware[i].process(context, next)
+ Note right of AMP: Can modify: messages, options, thread
+ end
+
+ AMP->>RawAgent: run() via final_handler
+
+ alt Non-Streaming (stream=False)
+ RawAgent->>RawAgent: _prepare_run_context() [async]
+ Note right of RawAgent: Builds: thread_messages, chat_options, tools
+ RawAgent->>CML: chat_client.get_response(stream=False)
+ else Streaming (stream=True)
+ RawAgent->>RawAgent: ResponseStream.from_awaitable()
+ Note right of RawAgent: Defers async prep to stream consumption
+ RawAgent-->>User: Returns ResponseStream immediately
+ Note over RawAgent,CML: Async work happens on iteration
+ RawAgent->>RawAgent: _prepare_run_context() [deferred]
+ RawAgent->>CML: chat_client.get_response(stream=True)
+ end
+
+ Note over CML,CMP: Chat Middleware Layer
+ CML->>CMP: execute(ChatContext)
+
+ loop Chat Middleware Chain
+ CMP->>CMP: middleware[i].process(context, next)
+ Note right of CMP: Can modify: messages, options
+ end
+
+ CMP->>FIL: get_response() via final_handler
+
+ Note over FIL,Tool: Function Invocation Loop
+ loop Max Iterations (default: 40)
+ FIL->>Client: _inner_get_response(messages, options)
+ Client->>LLM: API Call
+ LLM-->>Client: Response (may include tool_calls)
+ Client-->>FIL: ChatResponse
+
+ alt Response has function_calls
+ FIL->>FIL: _extract_function_calls()
+ FIL->>FIL: _try_execute_function_calls()
+
+ Note over FIL,Tool: Function Middleware Layer
+ loop For each function_call
+ FIL->>FMP: execute(FunctionInvocationContext)
+ loop Function Middleware Chain
+ FMP->>FMP: middleware[i].process(context, next)
+ Note right of FMP: Can modify: arguments
+ end
+ FMP->>Tool: invoke(arguments)
+ Tool-->>FMP: result
+ FMP-->>FIL: Content.from_function_result()
+ end
+
+ FIL->>FIL: Append tool results to messages
+
+ alt tool_choice == "required"
+ Note right of FIL: Return immediately with function call + result
+ FIL-->>CMP: ChatResponse
+ else tool_choice == "auto" or other
+ Note right of FIL: Continue loop for text response
+ end
+ else No function_calls
+ FIL-->>CMP: ChatResponse
+ end
+ end
+
+ CMP-->>CML: ChatResponse
+ Note right of CMP: Can observe/modify result
+
+ CML-->>RawAgent: ChatResponse / ResponseStream
+
+ alt Non-Streaming
+ RawAgent->>RawAgent: _finalize_response_and_update_thread()
+ else Streaming
+ Note right of RawAgent: .map() transforms updates
+ Note right of RawAgent: .with_result_hook() runs post-processing
+ end
+
+ RawAgent-->>AMP: AgentResponse / ResponseStream
+ Note right of AMP: Can observe/modify result
+ AMP-->>AML: AgentResponse
+ AML-->>Agent: AgentResponse
+ Agent-->>User: AgentResponse / ResponseStream
+```
+
+## Layer Details
+
+### 1. Agent Middleware Layer (`AgentMiddlewareLayer`)
+
+**Entry Point:** `Agent.run(messages, thread, options, middleware)`
+
+**Context Object:** `AgentRunContext`
+
+| Field | Type | Description |
+|-------|------|-------------|
+| `agent` | `AgentProtocol` | The agent being invoked |
+| `messages` | `list[ChatMessage]` | Input messages (mutable) |
+| `thread` | `AgentThread \| None` | Conversation thread |
+| `options` | `Mapping[str, Any]` | Chat options dict |
+| `stream` | `bool` | Whether streaming is enabled |
+| `metadata` | `dict` | Shared data between middleware |
+| `result` | `AgentResponse \| None` | Set after `next()` is called |
+| `kwargs` | `Mapping[str, Any]` | Additional run arguments |
+
+**Key Operations:**
+1. `categorize_middleware()` separates middleware by type (agent, chat, function)
+2. Chat and function middleware are forwarded to `chat_client`
+3. `AgentMiddlewarePipeline.execute()` runs the agent middleware chain
+4. Final handler calls `RawChatAgent.run()`
+
+**What Can Be Modified:**
+- `context.messages` - Add, remove, or modify input messages
+- `context.options` - Change model parameters, temperature, etc.
+- `context.thread` - Replace or modify the thread
+- `context.result` - Override the final response (after `next()`)
+
+### 2. Chat Middleware Layer (`ChatMiddlewareLayer`)
+
+**Entry Point:** `chat_client.get_response(messages, options)`
+
+**Context Object:** `ChatContext`
+
+| Field | Type | Description |
+|-------|------|-------------|
+| `chat_client` | `ChatClientProtocol` | The chat client |
+| `messages` | `Sequence[ChatMessage]` | Messages to send |
+| `options` | `Mapping[str, Any]` | Chat options |
+| `stream` | `bool` | Whether streaming |
+| `metadata` | `dict` | Shared data between middleware |
+| `result` | `ChatResponse \| None` | Set after `next()` is called |
+| `kwargs` | `Mapping[str, Any]` | Additional arguments |
+
+**Key Operations:**
+1. `ChatMiddlewarePipeline.execute()` runs the chat middleware chain
+2. Final handler calls `FunctionInvocationLayer.get_response()`
+3. Stream hooks can be registered for streaming responses
+
+**What Can Be Modified:**
+- `context.messages` - Inject system prompts, filter content
+- `context.options` - Change model, temperature, tool_choice
+- `context.result` - Override the response (after `next()`)
+
+### 3. Function Invocation Layer (`FunctionInvocationLayer`)
+
+**Entry Point:** `FunctionInvocationLayer.get_response()`
+
+This layer manages the tool execution loop:
+
+1. **Calls** `BaseChatClient._inner_get_response()` to get LLM response
+2. **Extracts** function calls from the response
+3. **Executes** functions through the Function Middleware Pipeline
+4. **Appends** results to messages and loops back to step 1
+
+**Configuration:** `FunctionInvocationConfiguration`
+
+| Setting | Default | Description |
+|---------|---------|-------------|
+| `enabled` | `True` | Enable auto-invocation |
+| `max_iterations` | `40` | Maximum tool execution loops |
+| `max_consecutive_errors_per_request` | `3` | Error threshold before stopping |
+| `terminate_on_unknown_calls` | `False` | Raise error for unknown tools |
+| `additional_tools` | `[]` | Extra tools to register |
+| `include_detailed_errors` | `False` | Include exceptions in results |
+
+**`tool_choice` Behavior:**
+
+The `tool_choice` option controls how the model uses available tools:
+
+| Value | Behavior |
+|-------|----------|
+| `"auto"` | Model decides whether to call a tool or respond with text. After tool execution, the loop continues to get a text response. |
+| `"none"` | Model is prevented from calling tools, will only respond with text. |
+| `"required"` | Model **must** call a tool. After tool execution, returns immediately with the function call and result—**no additional model call** is made. |
+| `{"mode": "required", "required_function_name": "fn"}` | Model must call the specified function. Same return behavior as `"required"`. |
+
+**Why `tool_choice="required"` returns immediately:**
+
+When you set `tool_choice="required"`, your intent is to force one or more tool calls (not all models supports multiple, either by name or when using `required` without a name). The framework respects this by:
+1. Getting the model's function call(s)
+2. Executing the tool(s)
+3. Returning the response(s) with both the function call message(s) and the function result(s)
+
+This avoids an infinite loop (model forced to call tools → executes → model forced to call tools again) and gives you direct access to the tool result.
+
+```python
+# With tool_choice="required", response contains function call + result only
+response = await client.get_response(
+ "What's the weather?",
+ options={"tool_choice": "required", "tools": [get_weather]}
+)
+
+# response.messages contains:
+# [0] Assistant message with function_call content
+# [1] Tool message with function_result content
+# (No text response from model)
+
+# To get a text response after tool execution, use tool_choice="auto"
+response = await client.get_response(
+ "What's the weather?",
+ options={"tool_choice": "auto", "tools": [get_weather]}
+)
+# response.text contains the model's interpretation of the weather data
+```
+
+### 4. Function Middleware Layer (`FunctionMiddlewarePipeline`)
+
+**Entry Point:** Called per function invocation within `_auto_invoke_function()`
+
+**Context Object:** `FunctionInvocationContext`
+
+| Field | Type | Description |
+|-------|------|-------------|
+| `function` | `FunctionTool` | The function being invoked |
+| `arguments` | `BaseModel` | Validated Pydantic arguments |
+| `metadata` | `dict` | Shared data between middleware |
+| `result` | `Any` | Set after `next()` is called |
+| `kwargs` | `Mapping[str, Any]` | Runtime kwargs |
+
+**What Can Be Modified:**
+- `context.arguments` - Modify validated arguments before execution
+- `context.result` - Override the function result (after `next()`)
+- Raise `MiddlewareTermination` to skip execution and terminate the function invocation loop
+
+**Special Behavior:** When `MiddlewareTermination` is raised in function middleware, it signals that the function invocation loop should exit **without making another LLM call**. This is useful when middleware determines that no further processing is needed (e.g., a termination condition is met).
+
+```python
+class TerminatingMiddleware(FunctionMiddleware):
+ async def process(self, context: FunctionInvocationContext, next):
+ if self.should_terminate(context):
+ context.result = "terminated by middleware"
+ raise MiddlewareTermination # Exit function invocation loop
+ await next(context)
+```
+
+## Arguments Added/Altered at Each Layer
+
+### Agent Layer → Chat Layer
+
+```python
+# RawChatAgent._prepare_run_context() builds:
+{
+ "thread": AgentThread, # Validated/created thread
+ "input_messages": [...], # Normalized input messages
+ "thread_messages": [...], # Messages from thread + context + input
+ "agent_name": "...", # Agent name for attribution
+ "chat_options": {
+ "model_id": "...",
+ "conversation_id": "...", # From thread.service_thread_id
+ "tools": [...], # Normalized tools + MCP tools
+ "temperature": ...,
+ "max_tokens": ...,
+ # ... other options
+ },
+ "filtered_kwargs": {...}, # kwargs minus 'chat_options'
+ "finalize_kwargs": {...}, # kwargs with 'thread' added
+}
+```
+
+### Chat Layer → Function Layer
+
+```python
+# Passed through to FunctionInvocationLayer:
+{
+ "messages": [...], # Prepared messages
+ "options": {...}, # Mutable copy of chat_options
+ "function_middleware": [...], # Function middleware from kwargs
+}
+```
+
+### Function Layer → Tool Invocation
+
+```python
+# FunctionInvocationContext receives:
+{
+ "function": FunctionTool, # The tool to invoke
+ "arguments": BaseModel, # Validated from function_call.arguments
+ "kwargs": {
+ # Runtime kwargs (filtered, no conversation_id)
+ },
+}
+```
+
+### Tool Result → Back Up
+
+```python
+# Content.from_function_result() creates:
+{
+ "type": "function_result",
+ "call_id": "...", # From function_call.call_id
+ "result": ..., # Serialized tool output
+ "exception": "..." | None, # Error message if failed
+}
+```
+
+## Middleware Control Flow
+
+There are three ways to exit a middleware's `process()` method:
+
+### 1. Return Normally (with or without calling `next`)
+
+Returns control to the upstream middleware, allowing its post-processing code to run.
+
+```python
+class CachingMiddleware(FunctionMiddleware):
+ async def process(self, context: FunctionInvocationContext, next):
+ # Option A: Return early WITHOUT calling next (skip downstream)
+ if cached := self.cache.get(context.function.name):
+ context.result = cached
+ return # Upstream post-processing still runs
+
+ # Option B: Call next, then return normally
+ await next(context)
+ self.cache[context.function.name] = context.result
+ return # Normal completion
+```
+
+### 2. Raise `MiddlewareTermination`
+
+Immediately exits the entire middleware chain. Upstream middleware's post-processing code is **skipped**.
+
+```python
+class BlockedFunctionMiddleware(FunctionMiddleware):
+ async def process(self, context: FunctionInvocationContext, next):
+ if context.function.name in self.blocked_functions:
+ context.result = "Function blocked by policy"
+ raise MiddlewareTermination("Blocked") # Skips ALL post-processing
+ await next(context)
+```
+
+### 3. Raise Any Other Exception
+
+Bubbles up to the caller. The middleware chain is aborted and the exception propagates.
+
+```python
+class ValidationMiddleware(FunctionMiddleware):
+ async def process(self, context: FunctionInvocationContext, next):
+ if not self.is_valid(context.arguments):
+ raise ValueError("Invalid arguments") # Bubbles up to user
+ await next(context)
+```
+
+## `return` vs `raise MiddlewareTermination`
+
+The key difference is what happens to **upstream middleware's post-processing**:
+
+```python
+class MiddlewareA(AgentMiddleware):
+ async def process(self, context, next):
+ print("A: before")
+ await next(context)
+ print("A: after") # Does this run?
+
+class MiddlewareB(AgentMiddleware):
+ async def process(self, context, next):
+ print("B: before")
+ context.result = "early result"
+ # Choose one:
+ return # Option 1
+ # raise MiddlewareTermination() # Option 2
+```
+
+With middleware registered as `[MiddlewareA, MiddlewareB]`:
+
+| Exit Method | Output |
+|-------------|--------|
+| `return` | `A: before` → `B: before` → `A: after` |
+| `raise MiddlewareTermination` | `A: before` → `B: before` (no `A: after`) |
+
+**Use `return`** when you want upstream middleware to still process the result (e.g., logging, metrics).
+
+**Use `raise MiddlewareTermination`** when you want to completely bypass all remaining processing (e.g., blocking a request, returning cached response without any modification).
+
+## Calling `next()` or Not
+
+The decision to call `next(context)` determines whether downstream middleware and the actual operation execute:
+
+### Without calling `next()` - Skip downstream
+
+```python
+async def process(self, context, next):
+ context.result = "replacement result"
+ return # Downstream middleware and actual execution are SKIPPED
+```
+
+- Downstream middleware: ❌ NOT executed
+- Actual operation (LLM call, function invocation): ❌ NOT executed
+- Upstream middleware post-processing: ✅ Still runs (unless `MiddlewareTermination` raised)
+- Result: Whatever you set in `context.result`
+
+### With calling `next()` - Full execution
+
+```python
+async def process(self, context, next):
+ # Pre-processing
+ await next(context) # Execute downstream + actual operation
+ # Post-processing (context.result now contains real result)
+ return
+```
+
+- Downstream middleware: ✅ Executed
+- Actual operation: ✅ Executed
+- Upstream middleware post-processing: ✅ Runs
+- Result: The actual result (possibly modified in post-processing)
+
+### Summary Table
+
+| Exit Method | Call `next()`? | Downstream Executes? | Actual Op Executes? | Upstream Post-Processing? |
+|-------------|----------------|---------------------|---------------------|--------------------------|
+| `return` (or implicit) | Yes | ✅ | ✅ | ✅ Yes |
+| `return` | No | ❌ | ❌ | ✅ Yes |
+| `raise MiddlewareTermination` | No | ❌ | ❌ | ❌ No |
+| `raise MiddlewareTermination` | Yes | ✅ | ✅ | ❌ No |
+| `raise OtherException` | Either | Depends | Depends | ❌ No (exception propagates) |
+
+> **Note:** The first row (`return` after calling `next()`) is the default behavior. Python functions implicitly return `None` at the end, so simply calling `await next(context)` without an explicit `return` statement achieves this pattern.
+
+## Streaming vs Non-Streaming
+
+The `run()` method handles streaming and non-streaming differently:
+
+### Non-Streaming (`stream=False`)
+
+Returns `Awaitable[AgentResponse]`:
+
+```python
+async def _run_non_streaming():
+ ctx = await self._prepare_run_context(...) # Async preparation
+ response = await self.chat_client.get_response(stream=False, ...)
+ await self._finalize_response_and_update_thread(...)
+ return AgentResponse(...)
+```
+
+### Streaming (`stream=True`)
+
+Returns `ResponseStream[AgentResponseUpdate, AgentResponse]` **synchronously**:
+
+```python
+# Async preparation is deferred using ResponseStream.from_awaitable()
+async def _get_stream():
+ ctx = await self._prepare_run_context(...) # Deferred until iteration
+ return self.chat_client.get_response(stream=True, ...)
+
+return (
+ ResponseStream.from_awaitable(_get_stream())
+ .map(
+ transform=map_chat_to_agent_update, # Transform each update
+ finalizer=self._finalize_response_updates, # Build final response
+ )
+ .with_result_hook(_post_hook) # Post-processing after finalization
+)
+```
+
+Key points:
+- `ResponseStream.from_awaitable()` wraps an async function, deferring execution until the stream is consumed
+- `.map()` transforms `ChatResponseUpdate` → `AgentResponseUpdate` and provides the finalizer
+- `.with_result_hook()` runs after finalization (e.g., notify thread of new messages)
+
+## See Also
+
+- [Middleware Samples](../../getting_started/middleware/) - Examples of custom middleware
+- [Function Tool Samples](../../getting_started/tools/) - Creating and using tools
diff --git a/python/samples/getting_started/chat_client/typed_options.py b/python/samples/concepts/typed_options.py
similarity index 100%
rename from python/samples/getting_started/chat_client/typed_options.py
rename to python/samples/concepts/typed_options.py
diff --git a/python/samples/demos/chatkit-integration/README.md b/python/samples/demos/chatkit-integration/README.md
index 688d24aebf..9636c4b190 100644
--- a/python/samples/demos/chatkit-integration/README.md
+++ b/python/samples/demos/chatkit-integration/README.md
@@ -118,7 +118,7 @@ agent_messages = await converter.to_agent_input(user_message_item)
# Running agent and streaming back to ChatKit
async for event in stream_agent_response(
- self.weather_agent.run_stream(agent_messages),
+ self.weather_agent.run(agent_messages, stream=True),
thread_id=thread.id,
):
yield event
diff --git a/python/samples/demos/chatkit-integration/app.py b/python/samples/demos/chatkit-integration/app.py
index 11b3140769..84ac060033 100644
--- a/python/samples/demos/chatkit-integration/app.py
+++ b/python/samples/demos/chatkit-integration/app.py
@@ -18,7 +18,7 @@
import uvicorn
# Agent Framework imports
-from agent_framework import AgentResponseUpdate, ChatAgent, ChatMessage, FunctionResultContent, tool
+from agent_framework import AgentResponseUpdate, ChatAgent, ChatMessage, FunctionResultContent, Role, tool
from agent_framework.azure import AzureOpenAIChatClient
# Agent Framework ChatKit integration
@@ -281,7 +281,7 @@ async def _update_thread_title(
title_prompt = [
ChatMessage(
- role="user",
+ role=Role.USER,
text=(
f"Generate a very short, concise title (max 40 characters) for a conversation "
f"that starts with:\n\n{conversation_context}\n\n"
@@ -366,7 +366,7 @@ async def respond(
logger.info(f"Running agent with {len(agent_messages)} message(s)")
# Run the Agent Framework agent with streaming
- agent_stream = self.weather_agent.run_stream(agent_messages)
+ agent_stream = self.weather_agent.run(agent_messages, stream=True)
# Create an intercepting stream that extracts function results while passing through updates
async def intercept_stream() -> AsyncIterator[AgentResponseUpdate]:
@@ -458,12 +458,12 @@ async def action(
weather_data: WeatherData | None = None
# Create an agent message asking about the weather
- agent_messages = [ChatMessage("user", [f"What's the weather in {city_label}?"])]
+ agent_messages = [ChatMessage(role=Role.USER, text=f"What's the weather in {city_label}?")]
logger.debug(f"Processing weather query: {agent_messages[0].text}")
# Run the Agent Framework agent with streaming
- agent_stream = self.weather_agent.run_stream(agent_messages)
+ agent_stream = self.weather_agent.run(agent_messages, stream=True)
# Create an intercepting stream that extracts function results while passing through updates
async def intercept_stream() -> AsyncIterator[AgentResponseUpdate]:
diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py
index 665be0667e..e32916a864 100644
--- a/python/samples/demos/workflow_evaluation/create_workflow.py
+++ b/python/samples/demos/workflow_evaluation/create_workflow.py
@@ -189,7 +189,7 @@ async def _run_workflow_with_client(query: str, chat_client: AzureAIClient) -> d
workflow, agent_map = await _create_workflow(chat_client.project_client, chat_client.credential)
# Process workflow events
- events = workflow.run_stream(query)
+ events = workflow.run(query, stream=True)
workflow_output = await _process_workflow_events(events, conversation_ids, response_ids)
return {
diff --git a/python/samples/getting_started/agents/anthropic/anthropic_advanced.py b/python/samples/getting_started/agents/anthropic/anthropic_advanced.py
index 7ba38d12b7..4737903ca5 100644
--- a/python/samples/getting_started/agents/anthropic/anthropic_advanced.py
+++ b/python/samples/getting_started/agents/anthropic/anthropic_advanced.py
@@ -38,7 +38,7 @@ async def main() -> None:
query = "Can you compare Python decorators with C# attributes?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
for content in chunk.contents:
if isinstance(content, TextReasoningContent):
print(f"\033[32m{content.text}\033[0m", end="", flush=True)
diff --git a/python/samples/getting_started/agents/anthropic/anthropic_basic.py b/python/samples/getting_started/agents/anthropic/anthropic_basic.py
index 18a49d5e88..1600d725b6 100644
--- a/python/samples/getting_started/agents/anthropic/anthropic_basic.py
+++ b/python/samples/getting_started/agents/anthropic/anthropic_basic.py
@@ -55,7 +55,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland and in Paris?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/anthropic/anthropic_foundry.py b/python/samples/getting_started/agents/anthropic/anthropic_foundry.py
index 728e4915c3..ac7c9ac95d 100644
--- a/python/samples/getting_started/agents/anthropic/anthropic_foundry.py
+++ b/python/samples/getting_started/agents/anthropic/anthropic_foundry.py
@@ -49,7 +49,7 @@ async def main() -> None:
query = "Can you compare Python decorators with C# attributes?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
for content in chunk.contents:
if isinstance(content, TextReasoningContent):
print(f"\033[32m{content.text}\033[0m", end="", flush=True)
diff --git a/python/samples/getting_started/agents/anthropic/anthropic_skills.py b/python/samples/getting_started/agents/anthropic/anthropic_skills.py
index 009f485761..fa420269c0 100644
--- a/python/samples/getting_started/agents/anthropic/anthropic_skills.py
+++ b/python/samples/getting_started/agents/anthropic/anthropic_skills.py
@@ -53,7 +53,7 @@ async def main() -> None:
print(f"User: {query}")
print("Agent: ", end="", flush=True)
files: list[HostedFileContent] = []
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
for content in chunk.contents:
match content.type:
case "text":
diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py b/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py
index 77465c3c52..d9a80a3732 100644
--- a/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py
+++ b/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py
@@ -68,7 +68,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Tokyo?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_agent_as_tool.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_agent_as_tool.py
index 041f632d2f..b336e02d9d 100644
--- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_agent_as_tool.py
+++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_agent_as_tool.py
@@ -22,7 +22,7 @@ async def logging_middleware(
context: FunctionInvocationContext,
next: Callable[[FunctionInvocationContext], Awaitable[None]],
) -> None:
- """Middleware that logs tool invocations to show the delegation flow."""
+ """MiddlewareTypes that logs tool invocations to show the delegation flow."""
print(f"[Calling tool: {context.function.name}]")
print(f"[Request: {context.arguments}]")
diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py
index 72e290e1b4..7e2b13635f 100644
--- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py
+++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py
@@ -11,7 +11,7 @@
Content,
HostedCodeInterpreterTool,
HostedFileContent,
- tool,
+ TextContent,
)
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
@@ -178,7 +178,7 @@ async def streaming_example() -> None:
file_contents_found: list[HostedFileContent] = []
text_chunks: list[str] = []
- async for update in agent.run_stream(QUERY):
+ async for update in agent.run(QUERY, stream=True):
if isinstance(update, AgentResponseUpdate):
for content in update.contents:
if content.type == "text":
diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py
index 3e2b520ede..b0c83dc206 100644
--- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py
+++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py
@@ -78,7 +78,7 @@ async def streaming_example() -> None:
text_chunks: list[str] = []
file_ids_found: list[str] = []
- async for update in agent.run_stream(QUERY):
+ async for update in agent.run(QUERY, stream=True):
if isinstance(update, AgentResponseUpdate):
for content in update.contents:
if content.type == "text":
diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_reasoning.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_reasoning.py
index 0cb6955620..06da57ea60 100644
--- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_reasoning.py
+++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_reasoning.py
@@ -68,7 +68,7 @@ async def streaming_example() -> None:
shown_reasoning_label = False
shown_text_label = False
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
for content in chunk.contents:
if content.type == "text_reasoning":
if not shown_reasoning_label:
diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py
index e06232cf56..34bd782a9b 100644
--- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py
+++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py
@@ -66,7 +66,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py
index 52da0c450c..20ccfe8de6 100644
--- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py
+++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py
@@ -87,7 +87,7 @@ async def main() -> None:
print("Agent: ", end="", flush=True)
# Stream the response and collect citations
citations: list[Annotation] = []
- async for chunk in agent.run_stream(user_input):
+ async for chunk in agent.run(user_input, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
# Collect citations from Azure AI Search responses
diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding_citations.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding_citations.py
index b1483b141b..fd1f321741 100644
--- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding_citations.py
+++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding_citations.py
@@ -58,7 +58,7 @@ async def main() -> None:
# Stream the response and collect citations
citations: list[Annotation] = []
- async for chunk in agent.run_stream(user_input):
+ async for chunk in agent.run(user_input, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py
index 665c707adc..385ca4dc92 100644
--- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py
+++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py
@@ -4,7 +4,6 @@
import os
from agent_framework import (
- AgentResponseUpdate,
HostedCodeInterpreterTool,
HostedFileContent,
)
@@ -60,10 +59,7 @@ async def main() -> None:
# Collect file_ids from the response
file_ids: list[str] = []
- async for chunk in agent.run_stream(query):
- if not isinstance(chunk, AgentResponseUpdate):
- continue
-
+ async for chunk in agent.run(query, stream=True):
for content in chunk.contents:
if content.type == "text":
print(content.text, end="", flush=True)
diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py
index 243ba55bf3..2bc74ef83c 100644
--- a/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py
+++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py
@@ -58,7 +58,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py
index b37af8f8de..3445bbcbc0 100644
--- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py
+++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py
@@ -55,7 +55,7 @@ async def main() -> None:
print(f"User: {query}")
print("Agent: ", end="", flush=True)
generated_code = ""
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
code_interpreter_chunk = get_code_interpreter_chunk(chunk)
diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py
index feb2ab5f89..e1e9fab2f5 100644
--- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py
+++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py
@@ -60,7 +60,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py
index af79b0465c..de20e03c4a 100644
--- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py
+++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py
@@ -58,7 +58,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py
index 7d346c8fc8..ec96a10dcd 100644
--- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py
+++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py
@@ -30,10 +30,10 @@ async def handle_approvals_without_thread(query: str, agent: "AgentProtocol"):
f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}"
f" with arguments: {user_input_needed.function_call.arguments}"
)
- new_inputs.append(ChatMessage("assistant", [user_input_needed]))
+ new_inputs.append(ChatMessage(role="assistant", contents=[user_input_needed]))
user_approval = input("Approve function call? (y/n): ")
new_inputs.append(
- ChatMessage("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")])
+ ChatMessage(role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")])
)
result = await agent.run(new_inputs)
@@ -71,8 +71,8 @@ async def handle_approvals_with_thread_streaming(query: str, agent: "AgentProtoc
new_input_added = True
while new_input_added:
new_input_added = False
- new_input.append(ChatMessage("user", [query]))
- async for update in agent.run_stream(new_input, thread=thread, store=True):
+ new_input.append(ChatMessage(role="user", text=query))
+ async for update in agent.run(new_input, thread=thread, options={"store": True}, stream=True):
if update.user_input_requests:
for user_input_needed in update.user_input_requests:
print(
diff --git a/python/samples/getting_started/agents/copilotstudio/copilotstudio_basic.py b/python/samples/getting_started/agents/copilotstudio/copilotstudio_basic.py
index e3b571a664..760ed4d127 100644
--- a/python/samples/getting_started/agents/copilotstudio/copilotstudio_basic.py
+++ b/python/samples/getting_started/agents/copilotstudio/copilotstudio_basic.py
@@ -39,7 +39,7 @@ async def streaming_example() -> None:
query = "What is the capital of Spain?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/custom/README.md b/python/samples/getting_started/agents/custom/README.md
index 62e426b7af..eba87c4350 100644
--- a/python/samples/getting_started/agents/custom/README.md
+++ b/python/samples/getting_started/agents/custom/README.md
@@ -7,20 +7,63 @@ This folder contains examples demonstrating how to implement custom agents and c
| File | Description |
|------|-------------|
| [`custom_agent.py`](custom_agent.py) | Shows how to create custom agents by extending the `BaseAgent` class. Demonstrates the `EchoAgent` implementation with both streaming and non-streaming responses, proper thread management, and message history handling. |
-| [`custom_chat_client.py`](custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows the `EchoingChatClient` implementation and how to integrate it with `ChatAgent` using the `create_agent()` method. |
+| [`custom_chat_client.py`](../../chat_client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `ChatAgent` using the `as_agent()` method. |
## Key Takeaways
### Custom Agents
- Custom agents give you complete control over the agent's behavior
-- You must implement both `run()` (for complete responses) and `run_stream()` (for streaming responses)
+- You must implement both `run()` for both the `stream=True` and `stream=False` cases
- Use `self._normalize_messages()` to handle different input message formats
- Use `self._notify_thread_of_new_messages()` to properly manage conversation history
### Custom Chat Clients
- Custom chat clients allow you to integrate any backend service or create new LLM providers
-- You must implement both `_inner_get_response()` and `_inner_get_streaming_response()`
+- You must implement `_inner_get_response()` with a stream parameter to handle both streaming and non-streaming responses
- Custom chat clients can be used with `ChatAgent` to leverage all agent framework features
-- Use the `create_agent()` method to easily create agents from your custom chat clients
+- Use the `as_agent()` method to easily create agents from your custom chat clients
-Both approaches allow you to extend the framework for your specific use cases while maintaining compatibility with the broader Agent Framework ecosystem.
\ No newline at end of file
+Both approaches allow you to extend the framework for your specific use cases while maintaining compatibility with the broader Agent Framework ecosystem.
+
+## Understanding Raw Client Classes
+
+The framework provides `Raw...Client` classes (e.g., `RawOpenAIChatClient`, `RawOpenAIResponsesClient`, `RawAzureAIClient`) that are intermediate implementations without middleware, telemetry, or function invocation support.
+
+### Warning: Raw Clients Should Not Normally Be Used Directly
+
+**The `Raw...Client` classes should not normally be used directly.** They do not include the middleware, telemetry, or function invocation support that you most likely need. If you do use them, you should carefully consider which additional layers to apply.
+
+### Layer Ordering
+
+There is a defined ordering for applying layers that you should follow:
+
+1. **ChatMiddlewareLayer** - Should be applied **first** because it also prepares function middleware
+2. **FunctionInvocationLayer** - Handles tool/function calling loop
+3. **ChatTelemetryLayer** - Must be **inside** the function calling loop for correct per-call telemetry
+4. **Raw...Client** - The base implementation (e.g., `RawOpenAIChatClient`)
+
+Example of correct layer composition:
+
+```python
+class MyCustomClient(
+ ChatMiddlewareLayer[TOptions],
+ FunctionInvocationLayer[TOptions],
+ ChatTelemetryLayer[TOptions],
+ RawOpenAIChatClient[TOptions], # or BaseChatClient for custom implementations
+ Generic[TOptions],
+):
+ """Custom client with all layers correctly applied."""
+ pass
+```
+
+### Use Fully-Featured Clients Instead
+
+For most use cases, use the fully-featured public client classes which already have all layers correctly composed:
+
+- `OpenAIChatClient` - OpenAI Chat completions with all layers
+- `OpenAIResponsesClient` - OpenAI Responses API with all layers
+- `AzureOpenAIChatClient` - Azure OpenAI Chat with all layers
+- `AzureOpenAIResponsesClient` - Azure OpenAI Responses with all layers
+- `AzureAIClient` - Azure AI Project with all layers
+
+These clients handle the layer composition correctly and provide the full feature set out of the box.
diff --git a/python/samples/getting_started/agents/custom/custom_agent.py b/python/samples/getting_started/agents/custom/custom_agent.py
index cc3c376964..c29424dcbf 100644
--- a/python/samples/getting_started/agents/custom/custom_agent.py
+++ b/python/samples/getting_started/agents/custom/custom_agent.py
@@ -11,6 +11,8 @@
BaseAgent,
ChatMessage,
Content,
+ Role,
+ TextContent,
)
"""
@@ -25,7 +27,7 @@ class EchoAgent(BaseAgent):
"""A simple custom agent that echoes user messages with a prefix.
This demonstrates how to create a fully custom agent by extending BaseAgent
- and implementing the required run() and run_stream() methods.
+ and implementing the required run() method with stream support.
"""
echo_prefix: str = "Echo: "
@@ -53,30 +55,45 @@ def __init__(
**kwargs,
)
- async def run(
+ def run(
self,
messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
*,
+ stream: bool = False,
thread: AgentThread | None = None,
**kwargs: Any,
- ) -> AgentResponse:
- """Execute the agent and return a complete response.
+ ) -> "AsyncIterable[AgentResponseUpdate] | asyncio.Future[AgentResponse]":
+ """Execute the agent and return a response.
Args:
messages: The message(s) to process.
+ stream: If True, return an async iterable of updates. If False, return an awaitable response.
thread: The conversation thread (optional).
**kwargs: Additional keyword arguments.
Returns:
- An AgentResponse containing the agent's reply.
+ When stream=False: An awaitable AgentResponse containing the agent's reply.
+ When stream=True: An async iterable of AgentResponseUpdate objects.
"""
+ if stream:
+ return self._run_stream(messages=messages, thread=thread, **kwargs)
+ return self._run(messages=messages, thread=thread, **kwargs)
+
+ async def _run(
+ self,
+ messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
+ *,
+ thread: AgentThread | None = None,
+ **kwargs: Any,
+ ) -> AgentResponse:
+ """Non-streaming implementation."""
# Normalize input messages to a list
normalized_messages = self._normalize_messages(messages)
if not normalized_messages:
response_message = ChatMessage(
- "assistant",
- [Content.from_text(text="Hello! I'm a custom echo agent. Send me a message and I'll echo it back.")],
+ role=Role.ASSISTANT,
+ contents=[Content.from_text(text="Hello! I'm a custom echo agent. Send me a message and I'll echo it back.")],
)
else:
# For simplicity, echo the last user message
@@ -86,7 +103,7 @@ async def run(
else:
echo_text = f"{self.echo_prefix}[Non-text message received]"
- response_message = ChatMessage("assistant", [Content.from_text(text=echo_text)])
+ response_message = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=echo_text)])
# Notify the thread of new messages if provided
if thread is not None:
@@ -94,23 +111,14 @@ async def run(
return AgentResponse(messages=[response_message])
- async def run_stream(
+ async def _run_stream(
self,
messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
**kwargs: Any,
) -> AsyncIterable[AgentResponseUpdate]:
- """Execute the agent and yield streaming response updates.
-
- Args:
- messages: The message(s) to process.
- thread: The conversation thread (optional).
- **kwargs: Additional keyword arguments.
-
- Yields:
- AgentResponseUpdate objects containing chunks of the response.
- """
+ """Streaming implementation."""
# Normalize input messages to a list
normalized_messages = self._normalize_messages(messages)
@@ -132,7 +140,7 @@ async def run_stream(
yield AgentResponseUpdate(
contents=[Content.from_text(text=chunk_text)],
- role="assistant",
+ role=Role.ASSISTANT,
)
# Small delay to simulate streaming
@@ -140,7 +148,7 @@ async def run_stream(
# Notify the thread of the complete response if provided
if thread is not None:
- complete_response = ChatMessage("assistant", [Content.from_text(text=response_text)])
+ complete_response = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=response_text)])
await self._notify_thread_of_new_messages(thread, normalized_messages, complete_response)
@@ -167,7 +175,7 @@ async def main() -> None:
query2 = "This is a streaming test"
print(f"\nUser: {query2}")
print("Agent: ", end="", flush=True)
- async for chunk in echo_agent.run_stream(query2):
+ async for chunk in echo_agent.run(query2, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print()
diff --git a/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py b/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py
index d23591eb02..0e2fa722b6 100644
--- a/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py
+++ b/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py
@@ -61,7 +61,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Tokyo?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/ollama/ollama_agent_basic.py b/python/samples/getting_started/agents/ollama/ollama_agent_basic.py
index 80b17e3b39..6477e620f0 100644
--- a/python/samples/getting_started/agents/ollama/ollama_agent_basic.py
+++ b/python/samples/getting_started/agents/ollama/ollama_agent_basic.py
@@ -54,7 +54,7 @@ async def streaming_example() -> None:
query = "What time is it in San Francisco? Use a tool call"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py b/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py
index 3250926030..ee22f5775b 100644
--- a/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py
+++ b/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py
@@ -2,7 +2,6 @@
import asyncio
-from agent_framework import TextReasoningContent
from agent_framework.ollama import OllamaChatClient
"""
@@ -18,7 +17,7 @@
"""
-async def reasoning_example() -> None:
+async def main() -> None:
print("=== Response Reasoning Example ===")
agent = OllamaChatClient().as_agent(
@@ -30,16 +29,10 @@ async def reasoning_example() -> None:
print(f"User: {query}")
# Enable Reasoning on per request level
result = await agent.run(query)
- reasoning = "".join((c.text or "") for c in result.messages[-1].contents if isinstance(c, TextReasoningContent))
+ reasoning = "".join((c.text or "") for c in result.messages[-1].contents if c.type == "text_reasoning")
print(f"Reasoning: {reasoning}")
print(f"Answer: {result}\n")
-async def main() -> None:
- print("=== Basic Ollama Chat Client Agent Reasoning ===")
-
- await reasoning_example()
-
-
if __name__ == "__main__":
asyncio.run(main())
diff --git a/python/samples/getting_started/agents/ollama/ollama_chat_client.py b/python/samples/getting_started/agents/ollama/ollama_chat_client.py
index 67c71ff249..07dd5cc368 100644
--- a/python/samples/getting_started/agents/ollama/ollama_chat_client.py
+++ b/python/samples/getting_started/agents/ollama/ollama_chat_client.py
@@ -33,7 +33,7 @@ async def main() -> None:
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_time):
+ async for chunk in client.get_response(message, tools=get_time, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py b/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py
index b555b7789f..da2468cb22 100644
--- a/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py
+++ b/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py
@@ -68,7 +68,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/openai/openai_assistants_basic.py b/python/samples/getting_started/agents/openai/openai_assistants_basic.py
index eb267b4a88..2fa4f79094 100644
--- a/python/samples/getting_started/agents/openai/openai_assistants_basic.py
+++ b/python/samples/getting_started/agents/openai/openai_assistants_basic.py
@@ -72,7 +72,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/openai/openai_assistants_with_code_interpreter.py b/python/samples/getting_started/agents/openai/openai_assistants_with_code_interpreter.py
index b4a25b8465..0599e796ea 100644
--- a/python/samples/getting_started/agents/openai/openai_assistants_with_code_interpreter.py
+++ b/python/samples/getting_started/agents/openai/openai_assistants_with_code_interpreter.py
@@ -60,7 +60,7 @@ async def main() -> None:
print(f"User: {query}")
print("Agent: ", end="", flush=True)
generated_code = ""
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
code_interpreter_chunk = get_code_interpreter_chunk(chunk)
diff --git a/python/samples/getting_started/agents/openai/openai_assistants_with_file_search.py b/python/samples/getting_started/agents/openai/openai_assistants_with_file_search.py
index 035b6e88f2..0046be1206 100644
--- a/python/samples/getting_started/agents/openai/openai_assistants_with_file_search.py
+++ b/python/samples/getting_started/agents/openai/openai_assistants_with_file_search.py
@@ -3,7 +3,7 @@
import asyncio
import os
-from agent_framework import HostedFileSearchTool, HostedVectorStoreContent
+from agent_framework import Content, HostedFileSearchTool
from agent_framework.openai import OpenAIAssistantProvider
from openai import AsyncOpenAI
@@ -15,7 +15,7 @@
"""
-async def create_vector_store(client: AsyncOpenAI) -> tuple[str, HostedVectorStoreContent]:
+async def create_vector_store(client: AsyncOpenAI) -> tuple[str, Content]:
"""Create a vector store with sample documents."""
file = await client.files.create(
file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="user_data"
@@ -28,7 +28,7 @@ async def create_vector_store(client: AsyncOpenAI) -> tuple[str, HostedVectorSto
if result.last_error is not None:
raise Exception(f"Vector store file processing failed with status: {result.last_error.message}")
- return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id)
+ return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id)
async def delete_vector_store(client: AsyncOpenAI, file_id: str, vector_store_id: str) -> None:
@@ -56,8 +56,10 @@ async def main() -> None:
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(
- query, tool_resources={"file_search": {"vector_store_ids": [vector_store.vector_store_id]}}
+ async for chunk in agent.run(
+ query,
+ stream=True,
+ options={"tool_resources": {"file_search": {"vector_store_ids": [vector_store.vector_store_id]}}},
):
if chunk.text:
print(chunk.text, end="", flush=True)
diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_basic.py b/python/samples/getting_started/agents/openai/openai_chat_client_basic.py
index 49cfb29447..b7137b2d43 100644
--- a/python/samples/getting_started/agents/openai/openai_chat_client_basic.py
+++ b/python/samples/getting_started/agents/openai/openai_chat_client_basic.py
@@ -54,7 +54,7 @@ async def streaming_example() -> None:
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py
index 945b2deff8..f1f39db38a 100644
--- a/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py
+++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py
@@ -74,8 +74,9 @@ async def streaming_example() -> None:
print(f"User: {query}")
chunks: list[str] = []
- async for chunk in agent.run_stream(
+ async for chunk in agent.run(
query,
+ stream=True,
options={
"response_format": {
"type": "json_schema",
diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py
index c317e163ad..eb1072f945 100644
--- a/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py
+++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py
@@ -34,7 +34,7 @@ async def main() -> None:
if stream:
print("Assistant: ", end="")
- async for chunk in agent.run_stream(message):
+ async for chunk in agent.run(message, stream=True):
if chunk.text:
print(chunk.text, end="")
print("")
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py
index 4e7fcbf07d..06ecb55473 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py
@@ -1,10 +1,11 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
+from collections.abc import Awaitable, Callable
from random import randint
from typing import Annotated
-from agent_framework import ChatAgent, tool
+from agent_framework import ChatAgent, ChatContext, ChatMessage, ChatResponse, Role, chat_middleware, tool
from agent_framework.openai import OpenAIResponsesClient
from pydantic import Field
@@ -16,6 +17,47 @@
"""
+@chat_middleware
+async def security_and_override_middleware(
+ context: ChatContext,
+ next: Callable[[ChatContext], Awaitable[None]],
+) -> None:
+ """Function-based middleware that implements security filtering and response override."""
+ print("[SecurityMiddleware] Processing input...")
+
+ # Security check - block sensitive information
+ blocked_terms = ["password", "secret", "api_key", "token"]
+
+ for message in context.messages:
+ if message.text:
+ message_lower = message.text.lower()
+ for term in blocked_terms:
+ if term in message_lower:
+ print(f"[SecurityMiddleware] BLOCKED: Found '{term}' in message")
+
+ # Override the response instead of calling AI
+ context.result = ChatResponse(
+ messages=[
+ ChatMessage(
+ role=Role.ASSISTANT,
+ text="I cannot process requests containing sensitive information. "
+ "Please rephrase your question without including passwords, secrets, or other "
+ "sensitive data.",
+ )
+ ]
+ )
+
+ # Set terminate flag to stop execution
+ context.terminate = True
+ return
+
+ # Continue to next middleware or AI execution
+ await next(context)
+
+ print("[SecurityMiddleware] Response generated.")
+ print(type(context.result))
+
+
# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py.
@tool(approval_mode="never_require")
def get_weather(
@@ -47,25 +89,29 @@ async def streaming_example() -> None:
print("=== Streaming Response Example ===")
agent = ChatAgent(
- chat_client=OpenAIResponsesClient(),
+ chat_client=OpenAIResponsesClient(
+ middleware=[security_and_override_middleware],
+ ),
instructions="You are a helpful weather agent.",
- tools=get_weather,
+ # tools=get_weather,
)
query = "What's the weather like in Portland?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
+ response = agent.run(query, stream=True)
+ async for chunk in response:
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
+ print(f"Final Result: {await response.get_final_response()}")
async def main() -> None:
print("=== Basic OpenAI Responses Client Agent Example ===")
- await non_streaming_example()
await streaming_example()
+ await non_streaming_example()
if __name__ == "__main__":
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_image_generation.py b/python/samples/getting_started/agents/openai/openai_responses_client_image_generation.py
index 9d9fcbf546..635b99e85f 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_image_generation.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_image_generation.py
@@ -3,7 +3,7 @@
import asyncio
import base64
-from agent_framework import Content, HostedImageGenerationTool, ImageGenerationToolResultContent
+from agent_framework import HostedImageGenerationTool
from agent_framework.openai import OpenAIResponsesClient
"""
@@ -70,7 +70,7 @@ async def main() -> None:
# Show information about the generated image
for message in result.messages:
for content in message.contents:
- if isinstance(content, ImageGenerationToolResultContent) and content.outputs:
+ if content.type == "image_generation" and content.outputs:
for output in content.outputs:
if output.type in ("data", "uri") and output.uri:
show_image_info(output.uri)
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py b/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py
index 06080db943..d920ba32c6 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py
@@ -55,7 +55,7 @@ async def streaming_reasoning_example() -> None:
print(f"User: {query}")
print(f"{agent.name}: ", end="", flush=True)
usage = None
- async for chunk in agent.run_stream(query):
+ async for chunk in agent.run(query, stream=True):
if chunk.contents:
for content in chunk.contents:
if content.type == "text_reasoning":
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py b/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py
index c5373b69f7..52e1e42eda 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py
@@ -67,7 +67,7 @@ async def main():
await output_dir.mkdir(exist_ok=True)
print(" Streaming response:")
- async for update in agent.run_stream(query):
+ async for update in agent.run(query, stream=True):
for content in update.contents:
# Handle partial images
# The final partial image IS the complete, full-quality image. Each partial
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_agent_as_tool.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_agent_as_tool.py
index 13b472e2a3..d90202a9af 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_agent_as_tool.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_agent_as_tool.py
@@ -21,7 +21,7 @@ async def logging_middleware(
context: FunctionInvocationContext,
next: Callable[[FunctionInvocationContext], Awaitable[None]],
) -> None:
- """Middleware that logs tool invocations to show the delegation flow."""
+ """MiddlewareTypes that logs tool invocations to show the delegation flow."""
print(f"[Calling tool: {context.function.name}]")
print(f"[Request: {context.arguments}]")
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py
index 5a73752bd9..29f8fa358a 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py
@@ -4,9 +4,6 @@
from agent_framework import (
ChatAgent,
- CodeInterpreterToolCallContent,
- CodeInterpreterToolResultContent,
- Content,
HostedCodeInterpreterTool,
)
from agent_framework.openai import OpenAIResponsesClient
@@ -35,8 +32,8 @@ async def main() -> None:
print(f"Result: {result}\n")
for message in result.messages:
- code_blocks = [c for c in message.contents if isinstance(c, CodeInterpreterToolCallContent)]
- outputs = [c for c in message.contents if isinstance(c, CodeInterpreterToolResultContent)]
+ code_blocks = [c for c in message.contents if c.type == "code_interpreter_tool_input"]
+ outputs = [c for c in message.contents if c.type == "code_interpreter_tool_result"]
if code_blocks:
code_inputs = code_blocks[0].inputs or []
for content in code_inputs:
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py
index 3bac4d2cab..3784c5a715 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py
@@ -2,7 +2,7 @@
import asyncio
-from agent_framework import ChatAgent, HostedFileSearchTool, HostedVectorStoreContent
+from agent_framework import ChatAgent, Content, HostedFileSearchTool
from agent_framework.openai import OpenAIResponsesClient
"""
@@ -15,7 +15,7 @@
# Helper functions
-async def create_vector_store(client: OpenAIResponsesClient) -> tuple[str, HostedVectorStoreContent]:
+async def create_vector_store(client: OpenAIResponsesClient) -> tuple[str, Content]:
"""Create a vector store with sample documents."""
file = await client.client.files.create(
file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="user_data"
@@ -28,7 +28,7 @@ async def create_vector_store(client: OpenAIResponsesClient) -> tuple[str, Hoste
if result.last_error is not None:
raise Exception(f"Vector store file processing failed with status: {result.last_error.message}")
- return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id)
+ return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id)
async def delete_vector_store(client: OpenAIResponsesClient, file_id: str, vector_store_id: str) -> None:
@@ -55,7 +55,7 @@ async def main() -> None:
if stream:
print("Assistant: ", end="")
- async for chunk in agent.run_stream(message):
+ async for chunk in agent.run(message, stream=True):
if chunk.text:
print(chunk.text, end="")
print("")
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py
index 264971d8e7..30a8e55881 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py
@@ -29,10 +29,10 @@ async def handle_approvals_without_thread(query: str, agent: "AgentProtocol"):
f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}"
f" with arguments: {user_input_needed.function_call.arguments}"
)
- new_inputs.append(ChatMessage("assistant", [user_input_needed]))
+ new_inputs.append(ChatMessage(role="assistant", contents=[user_input_needed]))
user_approval = input("Approve function call? (y/n): ")
new_inputs.append(
- ChatMessage("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")])
+ ChatMessage(role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")])
)
result = await agent.run(new_inputs)
@@ -70,8 +70,8 @@ async def handle_approvals_with_thread_streaming(query: str, agent: "AgentProtoc
new_input_added = True
while new_input_added:
new_input_added = False
- new_input.append(ChatMessage("user", [query]))
- async for update in agent.run_stream(new_input, thread=thread, store=True):
+ new_input.append(ChatMessage(role="user", text=query))
+ async for update in agent.run(new_input, thread=thread, stream=True, options={"store": True}):
if update.user_input_requests:
for user_input_needed in update.user_input_requests:
print(
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py
index e2709d2159..50ebcf9ad7 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py
@@ -35,7 +35,7 @@ async def streaming_with_mcp(show_raw_stream: bool = False) -> None:
query1 = "How to create an Azure storage account using az cli?"
print(f"User: {query1}")
print(f"{agent.name}: ", end="")
- async for chunk in agent.run_stream(query1):
+ async for chunk in agent.run(query1, stream=True):
if show_raw_stream:
print("Streamed event: ", chunk.raw_representation.raw_representation) # type:ignore
elif chunk.text:
@@ -46,7 +46,7 @@ async def streaming_with_mcp(show_raw_stream: bool = False) -> None:
query2 = "What is Microsoft Agent Framework?"
print(f"User: {query2}")
print(f"{agent.name}: ", end="")
- async for chunk in agent.run_stream(query2):
+ async for chunk in agent.run(query2, stream=True):
if show_raw_stream:
print("Streamed event: ", chunk.raw_representation.raw_representation) # type:ignore
elif chunk.text:
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py
index 9ed6afd11a..106a721e0f 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py
@@ -74,8 +74,9 @@ async def streaming_example() -> None:
print(f"User: {query}")
chunks: list[str] = []
- async for chunk in agent.run_stream(
+ async for chunk in agent.run(
query,
+ stream=True,
options={
"response_format": {
"type": "json_schema",
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_structured_output.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_structured_output.py
index c893f271b1..04277640cf 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_structured_output.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_structured_output.py
@@ -62,7 +62,7 @@ async def streaming_example() -> None:
# Get structured response from streaming agent using AgentResponse.from_agent_response_generator
# This method collects all streaming updates and combines them into a single AgentResponse
result = await AgentResponse.from_agent_response_generator(
- agent.run_stream(query, options={"response_format": OutputStruct}),
+ agent.run(query, stream=True, options={"response_format": OutputStruct}),
output_format_type=OutputStruct,
)
diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py
index 03ee48015f..24e0368512 100644
--- a/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py
+++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py
@@ -34,7 +34,7 @@ async def main() -> None:
if stream:
print("Assistant: ", end="")
- async for chunk in agent.run_stream(message):
+ async for chunk in agent.run(message, stream=True):
if chunk.text:
print(chunk.text, end="")
print("")
diff --git a/python/samples/getting_started/chat_client/README.md b/python/samples/getting_started/chat_client/README.md
index 4b36865769..20060f691d 100644
--- a/python/samples/getting_started/chat_client/README.md
+++ b/python/samples/getting_started/chat_client/README.md
@@ -14,6 +14,7 @@ This folder contains simple examples demonstrating direct usage of various chat
| [`openai_assistants_client.py`](openai_assistants_client.py) | Direct usage of OpenAI Assistants Client for basic chat interactions with OpenAI assistants. |
| [`openai_chat_client.py`](openai_chat_client.py) | Direct usage of OpenAI Chat Client for chat interactions with OpenAI models. |
| [`openai_responses_client.py`](openai_responses_client.py) | Direct usage of OpenAI Responses Client for structured response generation with OpenAI models. |
+| [`custom_chat_client.py`](custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `ChatAgent` using the `as_agent()` method. |
## Environment Variables
@@ -37,4 +38,4 @@ Depending on which client you're using, set the appropriate environment variable
- `OLLAMA_HOST`: Your Ollama server URL (defaults to `http://localhost:11434` if not set)
- `OLLAMA_MODEL_ID`: The Ollama model to use for chat (e.g., `llama3.2`, `llama2`, `codellama`)
-> **Note**: For Ollama, ensure you have Ollama installed and running locally with at least one model downloaded. Visit [https://ollama.com/](https://ollama.com/) for installation instructions.
\ No newline at end of file
+> **Note**: For Ollama, ensure you have Ollama installed and running locally with at least one model downloaded. Visit [https://ollama.com/](https://ollama.com/) for installation instructions.
diff --git a/python/samples/getting_started/chat_client/azure_ai_chat_client.py b/python/samples/getting_started/chat_client/azure_ai_chat_client.py
index 97aa015f13..b699add89e 100644
--- a/python/samples/getting_started/chat_client/azure_ai_chat_client.py
+++ b/python/samples/getting_started/chat_client/azure_ai_chat_client.py
@@ -36,7 +36,7 @@ async def main() -> None:
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/chat_client/azure_assistants_client.py b/python/samples/getting_started/chat_client/azure_assistants_client.py
index 99f4de5b9c..599593f54c 100644
--- a/python/samples/getting_started/chat_client/azure_assistants_client.py
+++ b/python/samples/getting_started/chat_client/azure_assistants_client.py
@@ -36,7 +36,7 @@ async def main() -> None:
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/chat_client/azure_chat_client.py b/python/samples/getting_started/chat_client/azure_chat_client.py
index 77b3358a39..13a299ca30 100644
--- a/python/samples/getting_started/chat_client/azure_chat_client.py
+++ b/python/samples/getting_started/chat_client/azure_chat_client.py
@@ -36,7 +36,7 @@ async def main() -> None:
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/chat_client/azure_responses_client.py b/python/samples/getting_started/chat_client/azure_responses_client.py
index 17a1ab335a..a0c3fa69df 100644
--- a/python/samples/getting_started/chat_client/azure_responses_client.py
+++ b/python/samples/getting_started/chat_client/azure_responses_client.py
@@ -42,21 +42,19 @@ async def main() -> None:
stream = True
print(f"User: {message}")
if stream:
- response = await ChatResponse.from_update_generator(
- client.get_streaming_response(message, tools=get_weather, options={"response_format": OutputStruct}),
+ response = await ChatResponse.from_chat_response_generator(
+ client.get_response(message, tools=get_weather, options={"response_format": OutputStruct}, stream=True),
output_format_type=OutputStruct,
)
- try:
- result = response.value
+ if result := response.try_parse_value(OutputStruct):
print(f"Assistant: {result}")
- except Exception:
+ else:
print(f"Assistant: {response.text}")
else:
response = await client.get_response(message, tools=get_weather, options={"response_format": OutputStruct})
- try:
- result = response.value
+ if result := response.try_parse_value(OutputStruct):
print(f"Assistant: {result}")
- except Exception:
+ else:
print(f"Assistant: {response.text}")
diff --git a/python/samples/getting_started/agents/custom/custom_chat_client.py b/python/samples/getting_started/chat_client/custom_chat_client.py
similarity index 65%
rename from python/samples/getting_started/agents/custom/custom_chat_client.py
rename to python/samples/getting_started/chat_client/custom_chat_client.py
index a6c38fcbca..b55b7a38d6 100644
--- a/python/samples/getting_started/agents/custom/custom_chat_client.py
+++ b/python/samples/getting_started/chat_client/custom_chat_client.py
@@ -3,40 +3,54 @@
import asyncio
import random
import sys
-from collections.abc import AsyncIterable, MutableSequence
-from typing import Any, ClassVar, Generic
+from collections.abc import AsyncIterable, Awaitable, Mapping, Sequence
+from typing import Any, ClassVar, Generic, TypedDict
from agent_framework import (
BaseChatClient,
ChatMessage,
+ ChatMiddlewareLayer,
+ ChatOptions,
ChatResponse,
ChatResponseUpdate,
Content,
- use_chat_middleware,
- use_function_invocation,
+ FunctionInvocationLayer,
+ ResponseStream,
+ Role,
)
from agent_framework._clients import TOptions_co
+from agent_framework.observability import ChatTelemetryLayer
+if sys.version_info >= (3, 13):
+ from typing import TypeVar
+else:
+ from typing_extensions import TypeVar
if sys.version_info >= (3, 12):
from typing import override # type: ignore # pragma: no cover
else:
from typing_extensions import override # type: ignore[import] # pragma: no cover
+
"""
Custom Chat Client Implementation Example
-This sample demonstrates implementing a custom chat client by extending BaseChatClient class,
-showing integration with ChatAgent and both streaming and non-streaming responses.
+This sample demonstrates implementing a custom chat client and optionally composing
+middleware, telemetry, and function invocation layers explicitly.
"""
+TOptions_co = TypeVar(
+ "TOptions_co",
+ bound=TypedDict, # type: ignore[valid-type]
+ default="ChatOptions",
+ covariant=True,
+)
+
-@use_function_invocation
-@use_chat_middleware
class EchoingChatClient(BaseChatClient[TOptions_co], Generic[TOptions_co]):
"""A custom chat client that echoes messages back with modifications.
This demonstrates how to implement a custom chat client by extending BaseChatClient
- and implementing the required _inner_get_response() and _inner_get_streaming_response() methods.
+ and implementing the required _inner_get_response() method.
"""
OTEL_PROVIDER_NAME: ClassVar[str] = "EchoingChatClient"
@@ -52,13 +66,14 @@ def __init__(self, *, prefix: str = "Echo:", **kwargs: Any) -> None:
self.prefix = prefix
@override
- async def _inner_get_response(
+ def _inner_get_response(
self,
*,
- messages: MutableSequence[ChatMessage],
- options: dict[str, Any],
+ messages: Sequence[ChatMessage],
+ stream: bool = False,
+ options: Mapping[str, Any],
**kwargs: Any,
- ) -> ChatResponse:
+ ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]:
"""Echo back the user's message with a prefix."""
if not messages:
response_text = "No messages to echo!"
@@ -66,7 +81,7 @@ async def _inner_get_response(
# Echo the last user message
last_user_message = None
for message in reversed(messages):
- if message.role == "user":
+ if message.role == Role.USER:
last_user_message = message
break
@@ -75,39 +90,46 @@ async def _inner_get_response(
else:
response_text = f"{self.prefix} [No text message found]"
- response_message = ChatMessage("assistant", [Content.from_text(text=response_text)])
+ response_message = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(response_text)])
- return ChatResponse(
+ response = ChatResponse(
messages=[response_message],
model_id="echo-model-v1",
response_id=f"echo-resp-{random.randint(1000, 9999)}",
)
- @override
- async def _inner_get_streaming_response(
- self,
- *,
- messages: MutableSequence[ChatMessage],
- options: dict[str, Any],
- **kwargs: Any,
- ) -> AsyncIterable[ChatResponseUpdate]:
- """Stream back the echoed message character by character."""
- # Get the complete response first
- response = await self._inner_get_response(messages=messages, options=options, **kwargs)
+ if not stream:
+
+ async def _get_response() -> ChatResponse:
+ return response
- if response.messages:
- response_text = response.messages[0].text or ""
+ return _get_response()
- # Stream character by character
- for char in response_text:
+ async def _stream() -> AsyncIterable[ChatResponseUpdate]:
+ response_text_local = response_message.text or ""
+ for char in response_text_local:
yield ChatResponseUpdate(
- contents=[Content.from_text(text=char)],
- role="assistant",
+ contents=[Content.from_text(char)],
+ role=Role.ASSISTANT,
response_id=f"echo-stream-resp-{random.randint(1000, 9999)}",
model_id="echo-model-v1",
)
await asyncio.sleep(0.05)
+ return ResponseStream(_stream(), finalizer=lambda updates: response)
+
+
+class EchoingChatClientWithLayers( # type: ignore[misc,type-var]
+ ChatMiddlewareLayer[TOptions_co],
+ ChatTelemetryLayer[TOptions_co],
+ FunctionInvocationLayer[TOptions_co],
+ EchoingChatClient[TOptions_co],
+ Generic[TOptions_co],
+):
+ """Echoing chat client that explicitly composes middleware, telemetry, and function layers."""
+
+ OTEL_PROVIDER_NAME: ClassVar[str] = "EchoingChatClientWithLayers"
+
async def main() -> None:
"""Demonstrates how to implement and use a custom chat client with ChatAgent."""
@@ -116,7 +138,7 @@ async def main() -> None:
# Create the custom chat client
print("--- EchoingChatClient Example ---")
- echo_client = EchoingChatClient(prefix="🔊 Echo:")
+ echo_client = EchoingChatClientWithLayers(prefix="🔊 Echo:")
# Use the chat client directly
print("Using chat client directly:")
@@ -141,7 +163,7 @@ async def main() -> None:
query2 = "Stream this message back to me"
print(f"\nUser: {query2}")
print("Agent: ", end="", flush=True)
- async for chunk in echo_agent.run_stream(query2):
+ async for chunk in echo_agent.run(query2, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
print()
diff --git a/python/samples/getting_started/chat_client/openai_assistants_client.py b/python/samples/getting_started/chat_client/openai_assistants_client.py
index 88aec44ed2..9ff13f39ab 100644
--- a/python/samples/getting_started/chat_client/openai_assistants_client.py
+++ b/python/samples/getting_started/chat_client/openai_assistants_client.py
@@ -34,7 +34,7 @@ async def main() -> None:
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/chat_client/openai_chat_client.py b/python/samples/getting_started/chat_client/openai_chat_client.py
index da50ae59bf..279d3eb186 100644
--- a/python/samples/getting_started/chat_client/openai_chat_client.py
+++ b/python/samples/getting_started/chat_client/openai_chat_client.py
@@ -34,7 +34,7 @@ async def main() -> None:
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if chunk.text:
print(chunk.text, end="")
print("")
diff --git a/python/samples/getting_started/chat_client/openai_responses_client.py b/python/samples/getting_started/chat_client/openai_responses_client.py
index c9d476faa3..a84066ea87 100644
--- a/python/samples/getting_started/chat_client/openai_responses_client.py
+++ b/python/samples/getting_started/chat_client/openai_responses_client.py
@@ -30,14 +30,14 @@ def get_weather(
async def main() -> None:
client = OpenAIResponsesClient()
message = "What's the weather in Amsterdam and in Paris?"
- stream = False
+ stream = True
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
- if chunk.text:
- print(chunk.text, end="")
- print("")
+ response = client.get_response(message, stream=True, tools=get_weather)
+ # TODO: review names of the methods, could be related to things like HTTP clients?
+ response.with_update_hook(lambda chunk: print(chunk.text, end=""))
+ await response.get_final_response()
else:
response = await client.get_response(message, tools=get_weather)
print(f"Assistant: {response}")
diff --git a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py
index a1c389fb2a..6e3e40a216 100644
--- a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py
+++ b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py
@@ -130,7 +130,7 @@ async def main() -> None:
print("Agent: ", end="", flush=True)
# Stream response
- async for chunk in agent.run_stream(user_input):
+ async for chunk in agent.run(user_input, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
diff --git a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py
index a504de7447..4fce526a1f 100644
--- a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py
+++ b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py
@@ -86,7 +86,7 @@ async def main() -> None:
print("Agent: ", end="", flush=True)
# Stream response
- async for chunk in agent.run_stream(user_input):
+ async for chunk in agent.run(user_input, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
diff --git a/python/samples/getting_started/devui/weather_agent_azure/agent.py b/python/samples/getting_started/devui/weather_agent_azure/agent.py
index 71525c24a1..b4dd667bed 100644
--- a/python/samples/getting_started/devui/weather_agent_azure/agent.py
+++ b/python/samples/getting_started/devui/weather_agent_azure/agent.py
@@ -14,6 +14,8 @@
ChatResponseUpdate,
Content,
FunctionInvocationContext,
+ Role,
+ TextContent,
chat_middleware,
function_middleware,
tool,
@@ -42,7 +44,7 @@ async def security_filter_middleware(
# Check only the last message (most recent user input)
last_message = context.messages[-1] if context.messages else None
- if last_message and last_message.role == "user" and last_message.text:
+ if last_message and last_message.role == Role.USER and last_message.text:
message_lower = last_message.text.lower()
for term in blocked_terms:
if term in message_lower:
@@ -52,12 +54,12 @@ async def security_filter_middleware(
"or other sensitive data."
)
- if context.is_streaming:
+ if context.stream:
# Streaming mode: return async generator
async def blocked_stream() -> AsyncIterable[ChatResponseUpdate]:
yield ChatResponseUpdate(
contents=[Content.from_text(text=error_message)],
- role="assistant",
+ role=Role.ASSISTANT,
)
context.result = blocked_stream()
@@ -66,7 +68,7 @@ async def blocked_stream() -> AsyncIterable[ChatResponseUpdate]:
context.result = ChatResponse(
messages=[
ChatMessage(
- role="assistant",
+ role=Role.ASSISTANT,
text=error_message,
)
]
diff --git a/python/samples/getting_started/middleware/agent_and_run_level_middleware.py b/python/samples/getting_started/middleware/agent_and_run_level_middleware.py
index ff4735c01c..32fd7a2e52 100644
--- a/python/samples/getting_started/middleware/agent_and_run_level_middleware.py
+++ b/python/samples/getting_started/middleware/agent_and_run_level_middleware.py
@@ -18,7 +18,7 @@
from pydantic import Field
"""
-Agent-Level and Run-Level Middleware Example
+Agent-Level and Run-Level MiddlewareTypes Example
This sample demonstrates the difference between agent-level and run-level middleware:
@@ -107,7 +107,7 @@ async def debugging_middleware(
"""Run-level debugging middleware for troubleshooting specific runs."""
print("[Debug] Debug mode enabled for this run")
print(f"[Debug] Messages count: {len(context.messages)}")
- print(f"[Debug] Is streaming: {context.is_streaming}")
+ print(f"[Debug] Is streaming: {context.stream}")
# Log existing metadata from agent middleware
if context.metadata:
@@ -163,7 +163,7 @@ async def function_logging_middleware(
async def main() -> None:
"""Example demonstrating agent-level and run-level middleware."""
- print("=== Agent-Level and Run-Level Middleware Example ===\n")
+ print("=== Agent-Level and Run-Level MiddlewareTypes Example ===\n")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
diff --git a/python/samples/getting_started/middleware/chat_middleware.py b/python/samples/getting_started/middleware/chat_middleware.py
index 548b1186fa..e7e807f27e 100644
--- a/python/samples/getting_started/middleware/chat_middleware.py
+++ b/python/samples/getting_started/middleware/chat_middleware.py
@@ -18,7 +18,7 @@
from pydantic import Field
"""
-Chat Middleware Example
+Chat MiddlewareTypes Example
This sample demonstrates how to use chat middleware to observe and override
inputs sent to AI models. Chat middleware intercepts chat requests before they reach
@@ -31,8 +31,8 @@
The example covers:
- Class-based chat middleware inheriting from ChatMiddleware
- Function-based chat middleware with @chat_middleware decorator
-- Middleware registration at agent level (applies to all runs)
-- Middleware registration at run level (applies to specific run only)
+- MiddlewareTypes registration at agent level (applies to all runs)
+- MiddlewareTypes registration at run level (applies to specific run only)
"""
@@ -137,7 +137,7 @@ async def security_and_override_middleware(
async def class_based_chat_middleware() -> None:
"""Demonstrate class-based middleware at agent level."""
print("\n" + "=" * 60)
- print("Class-based Chat Middleware (Agent Level)")
+ print("Class-based Chat MiddlewareTypes (Agent Level)")
print("=" * 60)
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
@@ -161,7 +161,7 @@ async def class_based_chat_middleware() -> None:
async def function_based_chat_middleware() -> None:
"""Demonstrate function-based middleware at agent level."""
print("\n" + "=" * 60)
- print("Function-based Chat Middleware (Agent Level)")
+ print("Function-based Chat MiddlewareTypes (Agent Level)")
print("=" * 60)
async with (
@@ -191,7 +191,7 @@ async def function_based_chat_middleware() -> None:
async def run_level_middleware() -> None:
"""Demonstrate middleware registration at run level."""
print("\n" + "=" * 60)
- print("Run-level Chat Middleware")
+ print("Run-level Chat MiddlewareTypes")
print("=" * 60)
async with (
@@ -204,14 +204,14 @@ async def run_level_middleware() -> None:
) as agent,
):
# Scenario 1: Run without any middleware
- print("\n--- Scenario 1: No Middleware ---")
+ print("\n--- Scenario 1: No MiddlewareTypes ---")
query = "What's the weather in Tokyo?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Response: {result.text if result.text else 'No response'}")
# Scenario 2: Run with specific middleware for this call only (both enhancement and security)
- print("\n--- Scenario 2: With Run-level Middleware ---")
+ print("\n--- Scenario 2: With Run-level MiddlewareTypes ---")
print(f"User: {query}")
result = await agent.run(
query,
@@ -223,7 +223,7 @@ async def run_level_middleware() -> None:
print(f"Response: {result.text if result.text else 'No response'}")
# Scenario 3: Security test with run-level middleware
- print("\n--- Scenario 3: Security Test with Run-level Middleware ---")
+ print("\n--- Scenario 3: Security Test with Run-level MiddlewareTypes ---")
query = "Can you help me with my secret API key?"
print(f"User: {query}")
result = await agent.run(
@@ -235,7 +235,7 @@ async def run_level_middleware() -> None:
async def main() -> None:
"""Run all chat middleware examples."""
- print("Chat Middleware Examples")
+ print("Chat MiddlewareTypes Examples")
print("========================")
await class_based_chat_middleware()
diff --git a/python/samples/getting_started/middleware/class_based_middleware.py b/python/samples/getting_started/middleware/class_based_middleware.py
index 63ccfc998b..65fa279f19 100644
--- a/python/samples/getting_started/middleware/class_based_middleware.py
+++ b/python/samples/getting_started/middleware/class_based_middleware.py
@@ -20,7 +20,7 @@
from pydantic import Field
"""
-Class-based Middleware Example
+Class-based MiddlewareTypes Example
This sample demonstrates how to implement middleware using class-based approach by inheriting
from AgentMiddleware and FunctionMiddleware base classes. The example includes:
@@ -95,7 +95,7 @@ async def process(
async def main() -> None:
"""Example demonstrating class-based middleware."""
- print("=== Class-based Middleware Example ===")
+ print("=== Class-based MiddlewareTypes Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
diff --git a/python/samples/getting_started/middleware/decorator_middleware.py b/python/samples/getting_started/middleware/decorator_middleware.py
index 0ac600fd19..f16407918c 100644
--- a/python/samples/getting_started/middleware/decorator_middleware.py
+++ b/python/samples/getting_started/middleware/decorator_middleware.py
@@ -12,7 +12,7 @@
from azure.identity.aio import AzureCliCredential
"""
-Decorator Middleware Example
+Decorator MiddlewareTypes Example
This sample demonstrates how to use @agent_middleware and @function_middleware decorators
to explicitly mark middleware functions without requiring type annotations.
@@ -52,22 +52,22 @@ def get_current_time() -> str:
@agent_middleware # Decorator marks this as agent middleware - no type annotations needed
async def simple_agent_middleware(context, next): # type: ignore - parameters intentionally untyped to demonstrate decorator functionality
"""Agent middleware that runs before and after agent execution."""
- print("[Agent Middleware] Before agent execution")
+ print("[Agent MiddlewareTypes] Before agent execution")
await next(context)
- print("[Agent Middleware] After agent execution")
+ print("[Agent MiddlewareTypes] After agent execution")
@function_middleware # Decorator marks this as function middleware - no type annotations needed
async def simple_function_middleware(context, next): # type: ignore - parameters intentionally untyped to demonstrate decorator functionality
"""Function middleware that runs before and after function calls."""
- print(f"[Function Middleware] Before calling: {context.function.name}") # type: ignore
+ print(f"[Function MiddlewareTypes] Before calling: {context.function.name}") # type: ignore
await next(context)
- print(f"[Function Middleware] After calling: {context.function.name}") # type: ignore
+ print(f"[Function MiddlewareTypes] After calling: {context.function.name}") # type: ignore
async def main() -> None:
"""Example demonstrating decorator-based middleware."""
- print("=== Decorator Middleware Example ===")
+ print("=== Decorator MiddlewareTypes Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
diff --git a/python/samples/getting_started/middleware/exception_handling_with_middleware.py b/python/samples/getting_started/middleware/exception_handling_with_middleware.py
index 5efe9fe662..bc752e3615 100644
--- a/python/samples/getting_started/middleware/exception_handling_with_middleware.py
+++ b/python/samples/getting_started/middleware/exception_handling_with_middleware.py
@@ -10,7 +10,7 @@
from pydantic import Field
"""
-Exception Handling with Middleware
+Exception Handling with MiddlewareTypes
This sample demonstrates how to use middleware for centralized exception handling in function calls.
The example shows:
@@ -54,7 +54,7 @@ async def exception_handling_middleware(
async def main() -> None:
"""Example demonstrating exception handling with middleware."""
- print("=== Exception Handling Middleware Example ===")
+ print("=== Exception Handling MiddlewareTypes Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
diff --git a/python/samples/getting_started/middleware/function_based_middleware.py b/python/samples/getting_started/middleware/function_based_middleware.py
index d58ac46c87..21defef491 100644
--- a/python/samples/getting_started/middleware/function_based_middleware.py
+++ b/python/samples/getting_started/middleware/function_based_middleware.py
@@ -16,7 +16,7 @@
from pydantic import Field
"""
-Function-based Middleware Example
+Function-based MiddlewareTypes Example
This sample demonstrates how to implement middleware using simple async functions instead of classes.
The example includes:
@@ -80,7 +80,7 @@ async def logging_function_middleware(
async def main() -> None:
"""Example demonstrating function-based middleware."""
- print("=== Function-based Middleware Example ===")
+ print("=== Function-based MiddlewareTypes Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
diff --git a/python/samples/getting_started/middleware/middleware_termination.py b/python/samples/getting_started/middleware/middleware_termination.py
index cbd82897b4..ea32bc606b 100644
--- a/python/samples/getting_started/middleware/middleware_termination.py
+++ b/python/samples/getting_started/middleware/middleware_termination.py
@@ -17,7 +17,7 @@
from pydantic import Field
"""
-Middleware Termination Example
+MiddlewareTypes Termination Example
This sample demonstrates how middleware can terminate execution using the `context.terminate` flag.
The example includes:
@@ -40,7 +40,7 @@ def get_weather(
class PreTerminationMiddleware(AgentMiddleware):
- """Middleware that terminates execution before calling the agent."""
+ """MiddlewareTypes that terminates execution before calling the agent."""
def __init__(self, blocked_words: list[str]):
self.blocked_words = [word.lower() for word in blocked_words]
@@ -79,7 +79,7 @@ async def process(
class PostTerminationMiddleware(AgentMiddleware):
- """Middleware that allows processing but terminates after reaching max responses across multiple runs."""
+ """MiddlewareTypes that allows processing but terminates after reaching max responses across multiple runs."""
def __init__(self, max_responses: int = 1):
self.max_responses = max_responses
@@ -109,7 +109,7 @@ async def process(
async def pre_termination_middleware() -> None:
"""Demonstrate pre-termination middleware that blocks requests with certain words."""
- print("\n--- Example 1: Pre-termination Middleware ---")
+ print("\n--- Example 1: Pre-termination MiddlewareTypes ---")
async with (
AzureCliCredential() as credential,
AzureAIAgentClient(credential=credential).as_agent(
@@ -136,7 +136,7 @@ async def pre_termination_middleware() -> None:
async def post_termination_middleware() -> None:
"""Demonstrate post-termination middleware that limits responses across multiple runs."""
- print("\n--- Example 2: Post-termination Middleware ---")
+ print("\n--- Example 2: Post-termination MiddlewareTypes ---")
async with (
AzureCliCredential() as credential,
AzureAIAgentClient(credential=credential).as_agent(
@@ -170,7 +170,7 @@ async def post_termination_middleware() -> None:
async def main() -> None:
"""Example demonstrating middleware termination functionality."""
- print("=== Middleware Termination Example ===")
+ print("=== MiddlewareTypes Termination Example ===")
await pre_termination_middleware()
await post_termination_middleware()
diff --git a/python/samples/getting_started/middleware/override_result_with_middleware.py b/python/samples/getting_started/middleware/override_result_with_middleware.py
index fe55f993ed..06351d1803 100644
--- a/python/samples/getting_started/middleware/override_result_with_middleware.py
+++ b/python/samples/getting_started/middleware/override_result_with_middleware.py
@@ -1,7 +1,8 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
-from collections.abc import AsyncIterable, Awaitable, Callable
+import re
+from collections.abc import Awaitable, Callable
from random import randint
from typing import Annotated
@@ -9,16 +10,19 @@
AgentResponse,
AgentResponseUpdate,
AgentRunContext,
+ ChatContext,
ChatMessage,
- Content,
+ ChatResponse,
+ ChatResponseUpdate,
+ ResponseStream,
+ Role,
tool,
)
-from agent_framework.azure import AzureAIAgentClient
-from azure.identity.aio import AzureCliCredential
+from agent_framework.openai import OpenAIResponsesClient
from pydantic import Field
"""
-Result Override with Middleware (Regular and Streaming)
+Result Override with MiddlewareTypes (Regular and Streaming)
This sample demonstrates how to use middleware to intercept and modify function results
after execution, supporting both regular and streaming agent responses. The example shows:
@@ -26,7 +30,7 @@
- How to execute the original function first and then modify its result
- Replacing function outputs with custom messages or transformed data
- Using middleware for result filtering, formatting, or enhancement
-- Detecting streaming vs non-streaming execution using context.is_streaming
+- Detecting streaming vs non-streaming execution using context.stream
- Overriding streaming results with custom async generators
The weather override middleware lets the original weather function execute normally,
@@ -45,10 +49,8 @@ def get_weather(
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
-async def weather_override_middleware(
- context: AgentRunContext, next: Callable[[AgentRunContext], Awaitable[None]]
-) -> None:
- """Middleware that overrides weather results for both streaming and non-streaming cases."""
+async def weather_override_middleware(context: ChatContext, next: Callable[[ChatContext], Awaitable[None]]) -> None:
+ """Chat middleware that overrides weather results for both streaming and non-streaming cases."""
# Let the original agent execution complete first
await next(context)
@@ -57,56 +59,159 @@ async def weather_override_middleware(
if context.result is not None:
# Create custom weather message
chunks = [
- "Weather Advisory - ",
"due to special atmospheric conditions, ",
"all locations are experiencing perfect weather today! ",
"Temperature is a comfortable 22°C with gentle breezes. ",
"Perfect day for outdoor activities!",
]
- if context.is_streaming:
- # For streaming: create an async generator that yields chunks
- async def override_stream() -> AsyncIterable[AgentResponseUpdate]:
- for chunk in chunks:
- yield AgentResponseUpdate(contents=[Content.from_text(text=chunk)])
+ if context.stream and isinstance(context.result, ResponseStream):
+ index = {"value": 0}
+
+ def _update_hook(update: ChatResponseUpdate) -> ChatResponseUpdate:
+ for content in update.contents or []:
+ if not content.text:
+ continue
+ content.text = f"Weather Advisory: [{index['value']}] {content.text}"
+ index["value"] += 1
+ return update
- context.result = override_stream()
+ context.result.with_update_hook(_update_hook)
else:
- # For non-streaming: just replace with the string message
- custom_message = "".join(chunks)
- context.result = AgentResponse(messages=[ChatMessage("assistant", [custom_message])])
+ # For non-streaming: just replace with a new message
+ current_text = context.result.text or ""
+ custom_message = f"Weather Advisory: [0] {''.join(chunks)} Original message was: {current_text}"
+ context.result = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text=custom_message)])
+
+
+async def validate_weather_middleware(context: ChatContext, next: Callable[[ChatContext], Awaitable[None]]) -> None:
+ """Chat middleware that simulates result validation for both streaming and non-streaming cases."""
+ await next(context)
+
+ validation_note = "Validation: weather data verified."
+
+ if context.result is None:
+ return
+
+ if context.stream and isinstance(context.result, ResponseStream):
+
+ def _append_validation_note(response: ChatResponse) -> ChatResponse:
+ response.messages.append(ChatMessage(role=Role.ASSISTANT, text=validation_note))
+ return response
+
+ context.result.with_finalizer(_append_validation_note)
+ elif isinstance(context.result, ChatResponse):
+ context.result.messages.append(ChatMessage(role=Role.ASSISTANT, text=validation_note))
+
+
+async def agent_cleanup_middleware(
+ context: AgentRunContext, next: Callable[[AgentRunContext], Awaitable[None]]
+) -> None:
+ """Agent middleware that validates chat middleware effects and cleans the result."""
+ await next(context)
+
+ if context.result is None:
+ return
+
+ validation_note = "Validation: weather data verified."
+
+ state = {"found_prefix": False}
+
+ def _sanitize(response: AgentResponse) -> AgentResponse:
+ found_prefix = state["found_prefix"]
+ found_validation = False
+ cleaned_messages: list[ChatMessage] = []
+
+ for message in response.messages:
+ text = message.text
+ if text is None:
+ cleaned_messages.append(message)
+ continue
+
+ if validation_note in text:
+ found_validation = True
+ text = text.replace(validation_note, "").strip()
+ if not text:
+ continue
+
+ if "Weather Advisory:" in text:
+ found_prefix = True
+ text = text.replace("Weather Advisory:", "")
+
+ text = re.sub(r"\[\d+\]\s*", "", text)
+
+ cleaned_messages.append(
+ ChatMessage(
+ role=message.role,
+ text=text.strip(),
+ author_name=message.author_name,
+ message_id=message.message_id,
+ additional_properties=message.additional_properties,
+ raw_representation=message.raw_representation,
+ )
+ )
+
+ if not found_prefix:
+ raise RuntimeError("Expected chat middleware prefix not found in agent response.")
+ if not found_validation:
+ raise RuntimeError("Expected validation note not found in agent response.")
+
+ cleaned_messages.append(ChatMessage(role=Role.ASSISTANT, text=" Agent: OK"))
+ response.messages = cleaned_messages
+ return response
+
+ if context.stream and isinstance(context.result, ResponseStream):
+
+ def _clean_update(update: AgentResponseUpdate) -> AgentResponseUpdate:
+ for content in update.contents or []:
+ if not content.text:
+ continue
+ text = content.text
+ if "Weather Advisory:" in text:
+ state["found_prefix"] = True
+ text = text.replace("Weather Advisory:", "")
+ text = re.sub(r"\[\d+\]\s*", "", text)
+ content.text = text
+ return update
+
+ context.result.with_update_hook(_clean_update)
+ context.result.with_finalizer(_sanitize)
+ elif isinstance(context.result, AgentResponse):
+ context.result = _sanitize(context.result)
async def main() -> None:
"""Example demonstrating result override with middleware for both streaming and non-streaming."""
- print("=== Result Override Middleware Example ===")
+ print("=== Result Override MiddlewareTypes Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
- async with (
- AzureCliCredential() as credential,
- AzureAIAgentClient(credential=credential).as_agent(
- name="WeatherAgent",
- instructions="You are a helpful weather assistant. Use the weather tool to get current conditions.",
- tools=get_weather,
- middleware=[weather_override_middleware],
- ) as agent,
- ):
- # Non-streaming example
- print("\n--- Non-streaming Example ---")
- query = "What's the weather like in Seattle?"
- print(f"User: {query}")
- result = await agent.run(query)
- print(f"Agent: {result}")
-
- # Streaming example
- print("\n--- Streaming Example ---")
- query = "What's the weather like in Portland?"
- print(f"User: {query}")
- print("Agent: ", end="", flush=True)
- async for chunk in agent.run_stream(query):
- if chunk.text:
- print(chunk.text, end="", flush=True)
+ agent = OpenAIResponsesClient(
+ middleware=[validate_weather_middleware, weather_override_middleware],
+ ).as_agent(
+ name="WeatherAgent",
+ instructions="You are a helpful weather assistant. Use the weather tool to get current conditions.",
+ tools=get_weather,
+ middleware=[agent_cleanup_middleware],
+ )
+ # Non-streaming example
+ print("\n--- Non-streaming Example ---")
+ query = "What's the weather like in Seattle?"
+ print(f"User: {query}")
+ result = await agent.run(query)
+ print(f"Agent: {result}")
+
+ # Streaming example
+ print("\n--- Streaming Example ---")
+ query = "What's the weather like in Portland?"
+ print(f"User: {query}")
+ print("Agent: ", end="", flush=True)
+ response = agent.run(query, stream=True)
+ async for chunk in response:
+ if chunk.text:
+ print(chunk.text, end="", flush=True)
+ print("\n")
+ print(f"Final Result: {(await response.get_final_response()).text}")
if __name__ == "__main__":
diff --git a/python/samples/getting_started/middleware/runtime_context_delegation.py b/python/samples/getting_started/middleware/runtime_context_delegation.py
index 44ee2a7893..d4669239a6 100644
--- a/python/samples/getting_started/middleware/runtime_context_delegation.py
+++ b/python/samples/getting_started/middleware/runtime_context_delegation.py
@@ -16,9 +16,9 @@
Patterns Demonstrated:
-1. **Pattern 1: Single Agent with Middleware & Closure** (Lines 130-180)
+1. **Pattern 1: Single Agent with MiddlewareTypes & Closure** (Lines 130-180)
- Best for: Single agent with multiple tools
- - How: Middleware stores kwargs in container, tools access via closure
+ - How: MiddlewareTypes stores kwargs in container, tools access via closure
- Pros: Simple, explicit state management
- Cons: Requires container instance per agent
@@ -28,7 +28,7 @@
- Pros: Automatic, works with nested delegation, clean separation
- Cons: None - this is the recommended pattern for hierarchical agents
-3. **Pattern 3: Mixed - Hierarchical with Middleware** (Lines 250-300)
+3. **Pattern 3: Mixed - Hierarchical with MiddlewareTypes** (Lines 250-300)
- Best for: Complex scenarios needing both delegation and state management
- How: Combines automatic kwargs propagation with middleware processing
- Pros: Maximum flexibility, can transform/validate context at each level
@@ -36,7 +36,7 @@
Key Concepts:
- Runtime Context: Session-specific data like API tokens, user IDs, tenant info
-- Middleware: Intercepts function calls to access/modify kwargs
+- MiddlewareTypes: Intercepts function calls to access/modify kwargs
- Closure: Functions capturing variables from outer scope
- kwargs Propagation: Automatic forwarding of runtime context through delegation chains
"""
@@ -56,7 +56,7 @@ async def inject_context_middleware(
context: FunctionInvocationContext,
next: Callable[[FunctionInvocationContext], Awaitable[None]],
) -> None:
- """Middleware that extracts runtime context from kwargs and stores in container.
+ """MiddlewareTypes that extracts runtime context from kwargs and stores in container.
This middleware runs before tool execution and makes runtime context
available to tools via the container instance.
@@ -68,7 +68,7 @@ async def inject_context_middleware(
# Log what we captured (for demonstration)
if self.api_token or self.user_id:
- print("[Middleware] Captured runtime context:")
+ print("[MiddlewareTypes] Captured runtime context:")
print(f" - API Token: {'[PRESENT]' if self.api_token else '[NOT PROVIDED]'}")
print(f" - User ID: {'[PRESENT]' if self.user_id else '[NOT PROVIDED]'}")
print(f" - Session Metadata Keys: {list(self.session_metadata.keys())}")
@@ -140,7 +140,7 @@ async def send_notification(
async def pattern_1_single_agent_with_closure() -> None:
"""Pattern 1: Single agent with middleware and closure for runtime context."""
print("\n" + "=" * 70)
- print("PATTERN 1: Single Agent with Middleware & Closure")
+ print("PATTERN 1: Single Agent with MiddlewareTypes & Closure")
print("=" * 70)
print("Use case: Single agent with multiple tools sharing runtime context")
print()
@@ -234,7 +234,7 @@ async def pattern_1_single_agent_with_closure() -> None:
print(f"\nAgent: {result4.text}")
- print("\n✓ Pattern 1 complete - Middleware & closure pattern works for single agents")
+ print("\n✓ Pattern 1 complete - MiddlewareTypes & closure pattern works for single agents")
# Pattern 2: Hierarchical agents with automatic kwargs propagation
@@ -353,7 +353,7 @@ async def sms_kwargs_tracker(
class AuthContextMiddleware:
- """Middleware that validates and transforms runtime context."""
+ """MiddlewareTypes that validates and transforms runtime context."""
def __init__(self) -> None:
self.validated_tokens: list[str] = []
@@ -387,7 +387,7 @@ async def protected_operation(operation: Annotated[str, Field(description="Opera
async def pattern_3_hierarchical_with_middleware() -> None:
"""Pattern 3: Hierarchical agents with middleware processing at each level."""
print("\n" + "=" * 70)
- print("PATTERN 3: Hierarchical with Middleware Processing")
+ print("PATTERN 3: Hierarchical with MiddlewareTypes Processing")
print("=" * 70)
print("Use case: Multi-level validation/transformation of runtime context")
print()
@@ -433,7 +433,7 @@ async def pattern_3_hierarchical_with_middleware() -> None:
)
print(f"\n[Validation Summary] Validated tokens: {len(auth_middleware.validated_tokens)}")
- print("✓ Pattern 3 complete - Middleware can validate/transform context at each level")
+ print("✓ Pattern 3 complete - MiddlewareTypes can validate/transform context at each level")
async def main() -> None:
diff --git a/python/samples/getting_started/middleware/shared_state_middleware.py b/python/samples/getting_started/middleware/shared_state_middleware.py
index f2a5232262..f48ec3807d 100644
--- a/python/samples/getting_started/middleware/shared_state_middleware.py
+++ b/python/samples/getting_started/middleware/shared_state_middleware.py
@@ -14,7 +14,7 @@
from pydantic import Field
"""
-Shared State Function-based Middleware Example
+Shared State Function-based MiddlewareTypes Example
This sample demonstrates how to implement function-based middleware within a class to share state.
The example includes:
@@ -88,7 +88,7 @@ async def result_enhancer_middleware(
async def main() -> None:
"""Example demonstrating shared state function-based middleware."""
- print("=== Shared State Function-based Middleware Example ===")
+ print("=== Shared State Function-based MiddlewareTypes Example ===")
# Create middleware container with shared state
middleware_container = MiddlewareContainer()
diff --git a/python/samples/getting_started/middleware/thread_behavior_middleware.py b/python/samples/getting_started/middleware/thread_behavior_middleware.py
index 5cca8cb635..93f72d567a 100644
--- a/python/samples/getting_started/middleware/thread_behavior_middleware.py
+++ b/python/samples/getting_started/middleware/thread_behavior_middleware.py
@@ -14,7 +14,7 @@
from pydantic import Field
"""
-Thread Behavior Middleware Example
+Thread Behavior MiddlewareTypes Example
This sample demonstrates how middleware can access and track thread state across multiple agent runs.
The example shows:
@@ -48,13 +48,13 @@ async def thread_tracking_middleware(
context: AgentRunContext,
next: Callable[[AgentRunContext], Awaitable[None]],
) -> None:
- """Middleware that tracks and logs thread behavior across runs."""
+ """MiddlewareTypes that tracks and logs thread behavior across runs."""
thread_messages = []
if context.thread and context.thread.message_store:
thread_messages = await context.thread.message_store.list_messages()
- print(f"[Middleware pre-execution] Current input messages: {len(context.messages)}")
- print(f"[Middleware pre-execution] Thread history messages: {len(thread_messages)}")
+ print(f"[MiddlewareTypes pre-execution] Current input messages: {len(context.messages)}")
+ print(f"[MiddlewareTypes pre-execution] Thread history messages: {len(thread_messages)}")
# Call next to execute the agent
await next(context)
@@ -64,12 +64,12 @@ async def thread_tracking_middleware(
if context.thread and context.thread.message_store:
updated_thread_messages = await context.thread.message_store.list_messages()
- print(f"[Middleware post-execution] Updated thread messages: {len(updated_thread_messages)}")
+ print(f"[MiddlewareTypes post-execution] Updated thread messages: {len(updated_thread_messages)}")
async def main() -> None:
"""Example demonstrating thread behavior in middleware across multiple runs."""
- print("=== Thread Behavior Middleware Example ===")
+ print("=== Thread Behavior MiddlewareTypes Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
diff --git a/python/samples/getting_started/observability/advanced_manual_setup_console_output.py b/python/samples/getting_started/observability/advanced_manual_setup_console_output.py
index 1ac8fae8da..0b6a908b0d 100644
--- a/python/samples/getting_started/observability/advanced_manual_setup_console_output.py
+++ b/python/samples/getting_started/observability/advanced_manual_setup_console_output.py
@@ -107,7 +107,7 @@ async def run_chat_client() -> None:
message = "What's the weather in Amsterdam and in Paris?"
print(f"User: {message}")
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/observability/advanced_zero_code.py b/python/samples/getting_started/observability/advanced_zero_code.py
index 5f60af0327..5ac0c70c22 100644
--- a/python/samples/getting_started/observability/advanced_zero_code.py
+++ b/python/samples/getting_started/observability/advanced_zero_code.py
@@ -81,7 +81,7 @@ async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) ->
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/observability/agent_observability.py b/python/samples/getting_started/observability/agent_observability.py
index 1c5828d56e..278b508de6 100644
--- a/python/samples/getting_started/observability/agent_observability.py
+++ b/python/samples/getting_started/observability/agent_observability.py
@@ -50,9 +50,10 @@ async def main():
for question in questions:
print(f"\nUser: {question}")
print(f"{agent.name}: ", end="")
- async for update in agent.run_stream(
+ async for update in agent.run(
question,
thread=thread,
+ stream=True,
):
if update.text:
print(update.text, end="")
diff --git a/python/samples/getting_started/observability/agent_with_foundry_tracing.py b/python/samples/getting_started/observability/agent_with_foundry_tracing.py
index 72fd74facf..0e84a171fa 100644
--- a/python/samples/getting_started/observability/agent_with_foundry_tracing.py
+++ b/python/samples/getting_started/observability/agent_with_foundry_tracing.py
@@ -87,10 +87,7 @@ async def main():
for question in questions:
print(f"\nUser: {question}")
print(f"{agent.name}: ", end="")
- async for update in agent.run_stream(
- question,
- thread=thread,
- ):
+ async for update in agent.run(question, thread=thread, stream=True):
if update.text:
print(update.text, end="")
diff --git a/python/samples/getting_started/observability/azure_ai_agent_observability.py b/python/samples/getting_started/observability/azure_ai_agent_observability.py
index 56aa228386..08ac327913 100644
--- a/python/samples/getting_started/observability/azure_ai_agent_observability.py
+++ b/python/samples/getting_started/observability/azure_ai_agent_observability.py
@@ -67,10 +67,7 @@ async def main():
for question in questions:
print(f"\nUser: {question}")
print(f"{agent.name}: ", end="")
- async for update in agent.run_stream(
- question,
- thread=thread,
- ):
+ async for update in agent.run(question, thread=thread, stream=True):
if update.text:
print(update.text, end="")
diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py
index f900b8cf6e..014f387033 100644
--- a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py
+++ b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py
@@ -71,7 +71,7 @@ async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) ->
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, tools=get_weather, stream=True):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py
index 0929114a60..a5b0b3d7a8 100644
--- a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py
+++ b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py
@@ -71,7 +71,7 @@ async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) ->
print(f"User: {message}")
if stream:
print("Assistant: ", end="")
- async for chunk in client.get_streaming_response(message, tools=get_weather):
+ async for chunk in client.get_response(message, stream=True, tools=get_weather):
if str(chunk):
print(str(chunk), end="")
print("")
diff --git a/python/samples/getting_started/observability/workflow_observability.py b/python/samples/getting_started/observability/workflow_observability.py
index 7cd5174025..96a3565476 100644
--- a/python/samples/getting_started/observability/workflow_observability.py
+++ b/python/samples/getting_started/observability/workflow_observability.py
@@ -92,7 +92,7 @@ async def run_sequential_workflow() -> None:
print(f"Starting workflow with input: '{input_text}'")
output_event = None
- async for event in workflow.run_stream("Hello world"):
+ async for event in workflow.run("Hello world", stream=True):
if isinstance(event, WorkflowOutputEvent):
# The WorkflowOutputEvent contains the final result.
output_event = event
diff --git a/python/samples/getting_started/purview_agent/sample_purview_agent.py b/python/samples/getting_started/purview_agent/sample_purview_agent.py
index cb79042979..b5231c2a5f 100644
--- a/python/samples/getting_started/purview_agent/sample_purview_agent.py
+++ b/python/samples/getting_started/purview_agent/sample_purview_agent.py
@@ -157,7 +157,7 @@ async def run_with_agent_middleware() -> None:
middleware=[purview_agent_middleware],
)
- print("-- Agent Middleware Path --")
+ print("-- Agent MiddlewareTypes Path --")
first: AgentResponse = await agent.run(
ChatMessage("user", ["Tell me a joke about a pirate."], additional_properties={"user_id": user_id})
)
@@ -200,7 +200,7 @@ async def run_with_chat_middleware() -> None:
name=JOKER_NAME,
)
- print("-- Chat Middleware Path --")
+ print("-- Chat MiddlewareTypes Path --")
first: AgentResponse = await agent.run(
ChatMessage(
role="user",
@@ -305,7 +305,7 @@ async def run_with_custom_cache_provider() -> None:
async def main() -> None:
- print("== Purview Agent Sample (Middleware with Automatic Caching) ==")
+ print("== Purview Agent Sample (MiddlewareTypes with Automatic Caching) ==")
try:
await run_with_agent_middleware()
diff --git a/python/samples/getting_started/tools/function_tool_with_approval.py b/python/samples/getting_started/tools/function_tool_with_approval.py
index 188697a8ce..d740f8bad0 100644
--- a/python/samples/getting_started/tools/function_tool_with_approval.py
+++ b/python/samples/getting_started/tools/function_tool_with_approval.py
@@ -88,7 +88,7 @@ async def handle_approvals_streaming(query: str, agent: "AgentProtocol") -> None
user_input_requests: list[Any] = []
# Stream the response
- async for chunk in agent.run_stream(current_input):
+ async for chunk in agent.run(current_input, stream=True):
if chunk.text:
print(chunk.text, end="", flush=True)
@@ -123,9 +123,9 @@ async def handle_approvals_streaming(query: str, agent: "AgentProtocol") -> None
current_input = new_inputs
-async def run_weather_agent_with_approval(is_streaming: bool) -> None:
+async def run_weather_agent_with_approval(stream: bool) -> None:
"""Example showing AI function with approval requirement."""
- print(f"\n=== Weather Agent with Approval Required ({'Streaming' if is_streaming else 'Non-Streaming'}) ===\n")
+ print(f"\n=== Weather Agent with Approval Required ({'Streaming' if stream else 'Non-Streaming'}) ===\n")
async with ChatAgent(
chat_client=OpenAIResponsesClient(),
@@ -136,7 +136,7 @@ async def run_weather_agent_with_approval(is_streaming: bool) -> None:
query = "Can you give me an update of the weather in LA and Portland and detailed weather for Seattle?"
print(f"User: {query}")
- if is_streaming:
+ if stream:
print(f"\n{agent.name}: ", end="", flush=True)
await handle_approvals_streaming(query, agent)
print()
@@ -148,8 +148,8 @@ async def run_weather_agent_with_approval(is_streaming: bool) -> None:
async def main() -> None:
print("=== Demonstration of a tool with approvals ===\n")
- await run_weather_agent_with_approval(is_streaming=False)
- await run_weather_agent_with_approval(is_streaming=True)
+ await run_weather_agent_with_approval(stream=False)
+ await run_weather_agent_with_approval(stream=True)
if __name__ == "__main__":
diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py
index f44ececc63..f0cd23e134 100644
--- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py
+++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py
@@ -24,7 +24,7 @@
A Writer agent generates content,
then passes the conversation to a Reviewer agent that finalizes the result.
-The workflow is invoked with run_stream so you can observe events as they occur.
+The workflow is invoked with run(..., stream=True) so you can observe events as they occur.
Purpose:
Show how to wrap chat agents created by AzureOpenAIChatClient inside workflow executors, wire them with WorkflowBuilder,
@@ -121,8 +121,9 @@ async def main():
# Run the workflow with the user's initial message and stream events as they occur.
# This surfaces executor events, workflow outputs, run-state changes, and errors.
- async for event in workflow.run_stream(
- ChatMessage("user", ["Create a slogan for a new electric SUV that is affordable and fun to drive."])
+ async for event in workflow.run(
+ ChatMessage(role="user", text="Create a slogan for a new electric SUV that is affordable and fun to drive."),
+ stream=True,
):
if isinstance(event, WorkflowStatusEvent):
prefix = f"State ({event.origin.value}): "
diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py
index a7b9918991..fde402b338 100644
--- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py
+++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py
@@ -84,7 +84,7 @@ async def main():
)
output: AgentResponse | None = None
- async for event in workflow.run_stream("hello world"):
+ async for event in workflow.run("hello world", stream=True):
if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponse):
output = event.data
diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py
index 42f7dc3d23..2d33c9d0e2 100644
--- a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py
+++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py
@@ -16,8 +16,8 @@
Show how to wire chat agents into a WorkflowBuilder pipeline by adding agents directly as edges.
Demonstrate:
-- Automatic streaming of agent deltas via AgentRunUpdateEvent when using run_stream().
-- Agents adapt to workflow mode: run_stream() emits incremental updates, run() emits complete responses.
+- Automatic streaming of agent deltas via AgentRunUpdateEvent when using run(..., stream=True).
+- Agents adapt to workflow mode: run(..., stream=True) emits incremental updates, run() emits complete responses.
Prerequisites:
- Azure AI Agent Service configured, along with the required environment variables.
@@ -49,7 +49,7 @@ def create_reviewer_agent(client: AzureAIAgentClient) -> ChatAgent:
async def main() -> None:
async with AzureCliCredential() as cred, AzureAIAgentClient(async_credential=cred) as client:
# Build the workflow by adding agents directly as edges.
- # Agents adapt to workflow mode: run_stream() for incremental updates, run() for complete responses.
+ # Agents adapt to workflow mode: run(..., stream=True) for incremental updates, run() for complete responses.
workflow = (
WorkflowBuilder()
.register_agent(lambda: create_writer_agent(client), name="writer")
@@ -61,7 +61,9 @@ async def main() -> None:
last_executor_id: str | None = None
- events = workflow.run_stream("Create a slogan for a new electric SUV that is affordable and fun to drive.")
+ events = workflow.run(
+ "Create a slogan for a new electric SUV that is affordable and fun to drive.", stream=True
+ )
async for event in events:
if isinstance(event, AgentRunUpdateEvent):
eid = event.executor_id
diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py
index 64fb3f3e9a..e147282f6e 100644
--- a/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py
+++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py
@@ -117,8 +117,8 @@ async def main() -> None:
.build()
)
- events = workflow.run_stream(
- "Create quick workspace wellness tips for a remote analyst working across two monitors."
+ events = workflow.run(
+ "Create quick workspace wellness tips for a remote analyst working across two monitors.", stream=True
)
last_executor: str | None = None
diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py
index d8a8021a75..fcef2227dc 100644
--- a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py
+++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py
@@ -16,8 +16,8 @@
Show how to wire chat agents into a WorkflowBuilder pipeline by adding agents directly as edges.
Demonstrate:
-- Automatic streaming of agent deltas via AgentRunUpdateEvent when using run_stream().
-- Agents adapt to workflow mode: run_stream() emits incremental updates, run() emits complete responses.
+- Automatic streaming of agent deltas via AgentRunUpdateEvent when using run(..., stream=True).
+- Agents adapt to workflow mode: run(..., stream=True) emits incremental updates, run() emits complete responses.
Prerequisites:
- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables.
@@ -50,7 +50,7 @@ async def main():
"""Build and run a simple two node agent workflow: Writer then Reviewer."""
# Build the workflow using the fluent builder.
# Set the start node and connect an edge from writer to reviewer.
- # Agents adapt to workflow mode: run_stream() for incremental updates, run() for complete responses.
+ # Agents adapt to workflow mode: run(..., stream=True) for incremental updates, run() for complete responses.
workflow = (
WorkflowBuilder()
.register_agent(create_writer_agent, name="writer")
@@ -63,7 +63,7 @@ async def main():
# Stream events from the workflow. We aggregate partial token updates per executor for readable output.
last_executor_id: str | None = None
- events = workflow.run_stream("Create a slogan for a new electric SUV that is affordable and fun to drive.")
+ events = workflow.run("Create a slogan for a new electric SUV that is affordable and fun to drive.", stream=True)
async for event in events:
if isinstance(event, AgentRunUpdateEvent):
# AgentRunUpdateEvent contains incremental text deltas from the underlying agent.
diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py
index 73e08bd0c0..4b7eabf9ba 100644
--- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py
+++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py
@@ -277,8 +277,9 @@ async def main() -> None:
while not completed:
last_executor: str | None = None
if initial_run:
- stream = workflow.run_stream(
- "Create a short launch blurb for the LumenX desk lamp. Emphasize adjustability and warm lighting."
+ stream = workflow.run(
+ "Create a short launch blurb for the LumenX desk lamp. Emphasize adjustability and warm lighting.",
+ stream=True,
)
initial_run = False
elif pending_responses is not None:
diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py
index 3badeae78a..91681cb9be 100644
--- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py
+++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py
@@ -80,7 +80,7 @@ async def main() -> None:
# Wrap the workflow as an agent for composition scenarios
print("\nWrapping workflow as an agent and running...")
workflow_agent = workflow.as_agent(name="MagenticWorkflowAgent")
- async for response in workflow_agent.run_stream(task):
+ async for response in workflow_agent.run(task, stream=True):
# Fallback for any other events with text
print(response.text, end="", flush=True)
diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py
index 56b8c6de77..4b405720b9 100644
--- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py
+++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py
@@ -17,7 +17,7 @@
Key Concepts:
- Build a workflow using SequentialBuilder (or any builder pattern)
- Expose the workflow as a reusable agent via workflow.as_agent()
-- Pass custom context as kwargs when invoking workflow_agent.run() or run_stream()
+- Pass custom context as kwargs when invoking workflow_agent.run()
- kwargs are stored in SharedState and propagated to all agent invocations
- @tool functions receive kwargs via **kwargs parameter
@@ -121,10 +121,11 @@ async def main() -> None:
print("-" * 70)
# Run workflow agent with kwargs - these will flow through to tools
- # Note: kwargs are passed to workflow_agent.run_stream() just like workflow.run_stream()
+ # Note: kwargs are passed to workflow_agent.run() just like workflow.run()
print("\n===== Streaming Response =====")
- async for update in workflow_agent.run_stream(
+ async for update in workflow_agent.run(
"Please get my user data and then call the users API endpoint.",
+ stream=True,
custom_data=custom_data,
user_token=user_token,
):
diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py
index 577a892066..273b4fb441 100644
--- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py
+++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py
@@ -217,8 +217,9 @@ async def main() -> None:
print("-" * 50)
# Run agent in streaming mode to observe incremental updates.
- async for event in agent.run_stream(
- "Write code for parallel reading 1 million files on disk and write to a sorted output file."
+ async for event in agent.run(
+ "Write code for parallel reading 1 million files on disk and write to a sorted output file.",
+ stream=True,
):
print(f"Agent Response: {event}")
diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py
index 71cfff1cc9..8bf09ac9c1 100644
--- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py
+++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py
@@ -251,10 +251,10 @@ async def run_interactive_session(
else:
if initial_message:
print(f"\nStarting workflow with brief: {initial_message}\n")
- event_stream = workflow.run_stream(message=initial_message)
+ event_stream = workflow.run(message=initial_message, stream=True)
elif checkpoint_id:
print("\nStarting workflow from checkpoint...\n")
- event_stream = workflow.run_stream(checkpoint_id=checkpoint_id)
+ event_stream = workflow.run(checkpoint_id=checkpoint_id, stream=True)
else:
raise ValueError("Either initial_message or checkpoint_id must be provided")
diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py
index a6f0a2431b..b82eaf80e9 100644
--- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py
+++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py
@@ -119,9 +119,9 @@ async def main():
# Start from checkpoint or fresh execution
print(f"\n** Workflow {workflow.id} started **")
event_stream = (
- workflow.run_stream(message=10)
+ workflow.run(message=10, stream=True)
if latest_checkpoint is None
- else workflow.run_stream(checkpoint_id=latest_checkpoint.checkpoint_id)
+ else workflow.run(checkpoint_id=latest_checkpoint.checkpoint_id, stream=True)
)
output: str | None = None
diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py
index e35894b8db..7537b1491e 100644
--- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py
+++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py
@@ -38,7 +38,7 @@
6. Workflow continues from the saved state.
Pattern:
-- Step 1: workflow.run_stream(checkpoint_id=...) to restore checkpoint and pending requests.
+- Step 1: workflow.run(checkpoint_id=..., stream=True) to restore checkpoint and pending requests.
- Step 2: workflow.send_responses_streaming(responses) to supply human replies and approvals.
- Two-step approach is required because send_responses_streaming does not accept checkpoint_id.
@@ -188,10 +188,10 @@ async def run_until_user_input_needed(
if initial_message:
print(f"\nStarting workflow with: {initial_message}\n")
- event_stream = workflow.run_stream(message=initial_message) # type: ignore[attr-defined]
+ event_stream = workflow.run(message=initial_message, stream=True) # type: ignore[attr-defined]
elif checkpoint_id:
print(f"\nResuming workflow from checkpoint: {checkpoint_id}\n")
- event_stream = workflow.run_stream(checkpoint_id=checkpoint_id) # type: ignore[attr-defined]
+ event_stream = workflow.run(checkpoint_id=checkpoint_id, stream=True) # type: ignore[attr-defined]
else:
raise ValueError("Must provide either initial_message or checkpoint_id")
@@ -255,7 +255,7 @@ async def resume_with_responses(
# Step 1: Restore the checkpoint to load pending requests into memory
# The checkpoint restoration re-emits pending RequestInfoEvents
restored_requests: list[RequestInfoEvent] = []
- async for event in workflow.run_stream(checkpoint_id=latest_checkpoint.checkpoint_id): # type: ignore[attr-defined]
+ async for event in workflow.run(checkpoint_id=latest_checkpoint.checkpoint_id, stream=True): # type: ignore[attr-defined]
if isinstance(event, RequestInfoEvent):
restored_requests.append(event)
if isinstance(event.data, HandoffAgentUserRequest):
diff --git a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py
index 24dec9fb3e..6f8567d02c 100644
--- a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py
+++ b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py
@@ -334,7 +334,7 @@ async def main() -> None:
print("\n=== Stage 1: run until sub-workflow requests human review ===")
request_id: str | None = None
- async for event in workflow.run_stream("Contoso Gadget Launch"):
+ async for event in workflow.run("Contoso Gadget Launch", stream=True):
if isinstance(event, RequestInfoEvent) and request_id is None:
request_id = event.request_id
print(f"Captured review request id: {request_id}")
@@ -365,7 +365,7 @@ async def main() -> None:
workflow2 = build_parent_workflow(storage)
request_info_event: RequestInfoEvent | None = None
- async for event in workflow2.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id):
+ async for event in workflow2.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True):
if isinstance(event, RequestInfoEvent):
request_info_event = event
diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py
index c05ab2111e..d947330a19 100644
--- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py
+++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py
@@ -5,11 +5,11 @@
Purpose:
This sample demonstrates how to use checkpointing with a workflow wrapped as an agent.
-It shows how to enable checkpoint storage when calling agent.run() or agent.run_stream(),
+It shows how to enable checkpoint storage when calling agent.run(),
allowing workflow execution state to be persisted and potentially resumed.
What you learn:
-- How to pass checkpoint_storage to WorkflowAgent.run() and run_stream()
+- How to pass checkpoint_storage to WorkflowAgent.run()
- How checkpoints are created during workflow-as-agent execution
- How to combine thread conversation history with workflow checkpointing
- How to resume a workflow-as-agent from a checkpoint
@@ -147,7 +147,7 @@ def create_assistant() -> ChatAgent:
print("[assistant]: ", end="", flush=True)
# Stream with checkpointing
- async for update in agent.run_stream(query, checkpoint_storage=checkpoint_storage):
+ async for update in agent.run(query, checkpoint_storage=checkpoint_storage, stream=True):
if update.text:
print(update.text, end="", flush=True)
diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py
index 07e0f67d9d..bf95a980fd 100644
--- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py
+++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py
@@ -18,10 +18,10 @@
This sample demonstrates how custom context (kwargs) flows from a parent workflow
through to agents in sub-workflows. When you pass kwargs to the parent workflow's
-run_stream() or run(), they automatically propagate to nested sub-workflows.
+run(), they automatically propagate to nested sub-workflows.
Key Concepts:
-- kwargs passed to parent workflow.run_stream() propagate to sub-workflows
+- kwargs passed to parent workflow.run() propagate to sub-workflows
- Sub-workflow agents receive the same kwargs as the parent workflow
- Works with nested WorkflowExecutor compositions at any depth
- Useful for passing authentication tokens, configuration, or request context
@@ -123,8 +123,9 @@ async def main() -> None:
# Run the OUTER workflow with kwargs
# These kwargs will automatically propagate to the inner sub-workflow
- async for event in outer_workflow.run_stream(
+ async for event in outer_workflow.run(
"Please fetch my profile data and then call the users service.",
+ stream=True,
user_token=user_token,
service_config=service_config,
):
diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py
index 167ae2e950..b06a2ce82a 100644
--- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py
+++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py
@@ -302,7 +302,7 @@ async def main() -> None:
# Execute the workflow
for email in test_emails:
print(f"\n🚀 Processing email to '{email.recipient}'")
- async for event in workflow.run_stream(email):
+ async for event in workflow.run(email, stream=True):
if isinstance(event, WorkflowOutputEvent):
print(f"🎉 Final result for '{email.recipient}': {'Delivered' if event.data else 'Blocked'}")
diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py
index 65f6c9c77f..04d121c0ec 100644
--- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py
+++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py
@@ -276,7 +276,7 @@ def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str]
email = "Hello team, here are the updates for this week..."
# Print outputs and database events from streaming
- async for event in workflow.run_stream(email):
+ async for event in workflow.run(email, stream=True):
if isinstance(event, DatabaseEvent):
print(f"{event}")
elif isinstance(event, WorkflowOutputEvent):
diff --git a/python/samples/getting_started/workflows/control-flow/sequential_executors.py b/python/samples/getting_started/workflows/control-flow/sequential_executors.py
index e422009766..41bba945f3 100644
--- a/python/samples/getting_started/workflows/control-flow/sequential_executors.py
+++ b/python/samples/getting_started/workflows/control-flow/sequential_executors.py
@@ -16,7 +16,7 @@
Sample: Sequential workflow with streaming.
Two custom executors run in sequence. The first converts text to uppercase,
-the second reverses the text and completes the workflow. The run_stream loop prints events as they occur.
+the second reverses the text and completes the workflow. The streaming run loop prints events as they occur.
Purpose:
Show how to define explicit Executor classes with @handler methods, wire them in order with
@@ -75,7 +75,7 @@ async def main() -> None:
# Step 2: Stream events for a single input.
# The stream will include executor invoke and completion events, plus workflow outputs.
outputs: list[str] = []
- async for event in workflow.run_stream("hello world"):
+ async for event in workflow.run("hello world", stream=True):
print(f"Event: {event}")
if isinstance(event, WorkflowOutputEvent):
outputs.append(cast(str, event.data))
diff --git a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py
index ce7bc92758..1e31bcafc8 100644
--- a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py
+++ b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py
@@ -9,7 +9,7 @@
Sample: Foundational sequential workflow with streaming using function-style executors.
Two lightweight steps run in order. The first converts text to uppercase.
-The second reverses the text and yields the workflow output. Events are printed as they arrive from run_stream.
+The second reverses the text and yields the workflow output. Events are printed as they arrive from a streaming run.
Purpose:
Show how to declare executors with the @executor decorator, connect them with WorkflowBuilder,
@@ -64,7 +64,7 @@ async def main():
)
# Step 2: Run the workflow and stream events in real time.
- async for event in workflow.run_stream("hello world"):
+ async for event in workflow.run("hello world", stream=True):
# You will see executor invoke and completion events as the workflow progresses.
print(f"Event: {event}")
if isinstance(event, WorkflowOutputEvent):
diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py
index 348a014f9f..36a09241ed 100644
--- a/python/samples/getting_started/workflows/control-flow/simple_loop.py
+++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py
@@ -142,7 +142,7 @@ async def main():
# Step 2: Run the workflow and print the events.
iterations = 0
- async for event in workflow.run_stream(NumberSignal.INIT):
+ async for event in workflow.run(NumberSignal.INIT, stream=True):
if isinstance(event, ExecutorCompletedEvent) and event.executor_id == "guess_number":
iterations += 1
print(f"Event: {event}")
diff --git a/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py b/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py
index 2ebd5bd128..e921fbe9cf 100644
--- a/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py
+++ b/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py
@@ -13,7 +13,7 @@
Purpose:
Show how to cancel a running workflow by wrapping it in an asyncio.Task. This pattern
-works with both workflow.run() and workflow.run_stream(). Useful for implementing
+works with both workflow.run() stream=True and stream=False. Useful for implementing
timeouts, graceful shutdown, or A2A executors that need cancellation support.
Prerequisites:
diff --git a/python/samples/getting_started/workflows/declarative/customer_support/main.py b/python/samples/getting_started/workflows/declarative/customer_support/main.py
index 84e36b771d..685ff905d5 100644
--- a/python/samples/getting_started/workflows/declarative/customer_support/main.py
+++ b/python/samples/getting_started/workflows/declarative/customer_support/main.py
@@ -256,7 +256,7 @@ async def main() -> None:
pending_request_id = None
else:
# Start workflow
- stream = workflow.run_stream(user_input)
+ stream = workflow.run(user_input, stream=True)
async for event in stream:
if isinstance(event, WorkflowOutputEvent):
diff --git a/python/samples/getting_started/workflows/declarative/deep_research/main.py b/python/samples/getting_started/workflows/declarative/deep_research/main.py
index b5efef8101..947c5d288c 100644
--- a/python/samples/getting_started/workflows/declarative/deep_research/main.py
+++ b/python/samples/getting_started/workflows/declarative/deep_research/main.py
@@ -192,7 +192,7 @@ async def main() -> None:
# Example input
task = "What is the weather like in Seattle and how does it compare to the average for this time of year?"
- async for event in workflow.run_stream(task):
+ async for event in workflow.run(task, stream=True):
if isinstance(event, WorkflowOutputEvent):
print(f"{event.data}", end="", flush=True)
diff --git a/python/samples/getting_started/workflows/declarative/function_tools/README.md b/python/samples/getting_started/workflows/declarative/function_tools/README.md
index c1dd8d64a5..42f3dc6497 100644
--- a/python/samples/getting_started/workflows/declarative/function_tools/README.md
+++ b/python/samples/getting_started/workflows/declarative/function_tools/README.md
@@ -68,7 +68,7 @@ Session Complete
1. Create an Azure OpenAI chat client
2. Create an agent with instructions and function tools
3. Register the agent with the workflow factory
-4. Load the workflow YAML and run it with `run_stream()`
+4. Load the workflow YAML and run it with `run()` and `stream=True`
```python
# Create the agent with tools
@@ -85,6 +85,6 @@ factory.register_agent("MenuAgent", menu_agent)
# Load and run the workflow
workflow = factory.create_workflow_from_yaml_path(workflow_path)
-async for event in workflow.run_stream(inputs={"userInput": "What is the soup of the day?"}):
+async for event in workflow.run(inputs={"userInput": "What is the soup of the day?"}, stream=True):
...
```
diff --git a/python/samples/getting_started/workflows/declarative/function_tools/main.py b/python/samples/getting_started/workflows/declarative/function_tools/main.py
index 180175063e..0fd8dce643 100644
--- a/python/samples/getting_started/workflows/declarative/function_tools/main.py
+++ b/python/samples/getting_started/workflows/declarative/function_tools/main.py
@@ -92,7 +92,7 @@ async def main():
response = ExternalInputResponse(user_input=user_input)
stream = workflow.send_responses_streaming({pending_request_id: response})
else:
- stream = workflow.run_stream({"userInput": user_input})
+ stream = workflow.run({"userInput": user_input}, stream=True)
pending_request_id = None
first_response = True
diff --git a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py
index e9c0f90f83..aaf2faf613 100644
--- a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py
+++ b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py
@@ -21,11 +21,11 @@
async def run_with_streaming(workflow: Workflow) -> None:
- """Demonstrate streaming workflow execution with run_stream()."""
- print("\n=== Streaming Execution (run_stream) ===")
+ """Demonstrate streaming workflow execution."""
+ print("\n=== Streaming Execution ===")
print("-" * 40)
- async for event in workflow.run_stream({}):
+ async for event in workflow.run({}, stream=True):
# WorkflowOutputEvent wraps the actual output data
if isinstance(event, WorkflowOutputEvent):
data = event.data
diff --git a/python/samples/getting_started/workflows/declarative/marketing/main.py b/python/samples/getting_started/workflows/declarative/marketing/main.py
index e48d262076..639fbdddc3 100644
--- a/python/samples/getting_started/workflows/declarative/marketing/main.py
+++ b/python/samples/getting_started/workflows/declarative/marketing/main.py
@@ -84,7 +84,7 @@ async def main() -> None:
# Pass a simple string input - like .NET
product = "An eco-friendly stainless steel water bottle that keeps drinks cold for 24 hours."
- async for event in workflow.run_stream(product):
+ async for event in workflow.run(product, stream=True):
if isinstance(event, WorkflowOutputEvent):
print(f"{event.data}", end="", flush=True)
diff --git a/python/samples/getting_started/workflows/declarative/student_teacher/main.py b/python/samples/getting_started/workflows/declarative/student_teacher/main.py
index 746acaf009..dc252255a7 100644
--- a/python/samples/getting_started/workflows/declarative/student_teacher/main.py
+++ b/python/samples/getting_started/workflows/declarative/student_teacher/main.py
@@ -43,7 +43,7 @@
2. Gently point out errors without giving away the answer
3. Ask guiding questions to help them discover mistakes
4. Provide hints that lead toward understanding
-5. When the student demonstrates clear understanding, respond with "CONGRATULATIONS"
+5. When the student demonstrates clear understanding, respond with "CONGRATULATIONS"
followed by a summary of what they learned
Focus on building understanding, not just getting the right answer."""
@@ -81,7 +81,7 @@ async def main() -> None:
print("Student-Teacher Math Coaching Session")
print("=" * 50)
- async for event in workflow.run_stream("How would you compute the value of PI?"):
+ async for event in workflow.run("How would you compute the value of PI?", stream=True):
if isinstance(event, WorkflowOutputEvent):
print(f"{event.data}", flush=True, end="")
diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py
index 752956d0f2..077e1e3021 100644
--- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py
+++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py
@@ -147,7 +147,7 @@ async def main() -> None:
stream = (
workflow.send_responses_streaming(pending_responses)
if pending_responses
- else workflow.run_stream("Analyze the impact of large language models on software development.")
+ else workflow.run("Analyze the impact of large language models on software development.", stream=True)
)
pending_responses = None
diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py
index 5d36fbd13a..7e6e85da58 100644
--- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py
+++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py
@@ -109,9 +109,10 @@ async def main() -> None:
stream = (
workflow.send_responses_streaming(pending_responses)
if pending_responses
- else workflow.run_stream(
+ else workflow.run(
"Discuss how our team should approach adopting AI tools for productivity. "
- "Consider benefits, risks, and implementation strategies."
+ "Consider benefits, risks, and implementation strategies.",
+ stream=True,
)
)
diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py
index dba7f56b66..6ab71512a5 100644
--- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py
+++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py
@@ -10,6 +10,7 @@
ChatMessage, # Chat message structure
Executor, # Base class for workflow executors
RequestInfoEvent, # Event emitted when human input is requested
+ Role, # Enum of chat roles (user, assistant, system)
WorkflowBuilder, # Fluent builder for assembling the graph
WorkflowContext, # Per run context and event bus
WorkflowOutputEvent, # Event emitted when workflow yields output
@@ -17,7 +18,7 @@
WorkflowStatusEvent, # Event emitted on run state changes
handler,
response_handler, # Decorator to expose an Executor method as a step
- )
+)
from agent_framework.azure import AzureOpenAIChatClient
from azure.identity import AzureCliCredential
from pydantic import BaseModel
@@ -36,7 +37,7 @@
Demonstrate:
- Alternating turns between an AgentExecutor and a human, driven by events.
- Using Pydantic response_format to enforce structured JSON output from the agent instead of regex parsing.
-- Driving the loop in application code with run_stream and responses parameter.
+- Driving the loop in application code with responses parameter.
Prerequisites:
- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables.
@@ -86,7 +87,7 @@ async def start(self, _: str, ctx: WorkflowContext[AgentExecutorRequest]) -> Non
- Input is a simple starter token (ignored here).
- Output is an AgentExecutorRequest that triggers the agent to produce a guess.
"""
- user = ChatMessage("user", text="Start by making your first guess.")
+ user = ChatMessage(Role.USER, text="Start by making your first guess.")
await ctx.send_message(AgentExecutorRequest(messages=[user], should_respond=True))
@handler
@@ -136,7 +137,7 @@ async def on_human_feedback(
# Provide feedback to the agent to try again.
# We keep the agent's output strictly JSON to ensure stable parsing on the next turn.
user_msg = ChatMessage(
- "user",
+ Role.USER,
text=(f'Feedback: {reply}. Return ONLY a JSON object matching the schema {{"guess": }}.'),
)
await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True))
@@ -184,10 +185,12 @@ async def main() -> None:
# )
while workflow_output is None:
- # First iteration uses run_stream("start").
+ # First iteration uses run("start", stream=True).
# Subsequent iterations use send_responses_streaming with pending_responses from the console.
stream = (
- workflow.send_responses_streaming(pending_responses) if pending_responses else workflow.run_stream("start")
+ workflow.send_responses_streaming(pending_responses)
+ if pending_responses
+ else workflow.run("start", stream=True)
)
# Collect events for this turn. Among these you may see WorkflowStatusEvent
# with state IDLE_WITH_PENDING_REQUESTS when the workflow pauses for
diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py
index afb19753e5..aff4d5ba9e 100644
--- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py
+++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py
@@ -83,7 +83,7 @@ async def main() -> None:
stream = (
workflow.send_responses_streaming(pending_responses)
if pending_responses
- else workflow.run_stream("Write a brief introduction to artificial intelligence.")
+ else workflow.run("Write a brief introduction to artificial intelligence.", stream=True)
)
pending_responses = None
diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py
index 0237f294f2..a8f7576fcb 100644
--- a/python/samples/getting_started/workflows/observability/executor_io_observation.py
+++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py
@@ -91,7 +91,7 @@ async def main() -> None:
print("Running workflow with executor I/O observation...\n")
- async for event in workflow.run_stream("hello world"):
+ async for event in workflow.run("hello world", stream=True):
if isinstance(event, ExecutorInvokedEvent):
# The input message received by the executor is in event.data
print(f"[INVOKED] {event.executor_id}")
diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py b/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py
index cdc03a5ea5..563ff46be6 100644
--- a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py
+++ b/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py
@@ -84,7 +84,7 @@ async def main() -> None:
# Keep track of the last executor to format output nicely in streaming mode
last_executor_id: str | None = None
output_event: WorkflowOutputEvent | None = None
- async for event in workflow.run_stream(task):
+ async for event in workflow.run(task, stream=True):
if isinstance(event, AgentRunUpdateEvent):
eid = event.executor_id
if eid != last_executor_id:
diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py b/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py
index de613dea2e..be00dd1502 100644
--- a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py
+++ b/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py
@@ -238,7 +238,7 @@ async def main() -> None:
final_conversation: list[ChatMessage] = []
current_speaker: str | None = None
- async for event in workflow.run_stream(f"Please begin the discussion on: {topic}"):
+ async for event in workflow.run(f"Please begin the discussion on: {topic}", stream=True):
if isinstance(event, AgentRunUpdateEvent):
if event.executor_id != current_speaker:
if current_speaker is not None:
diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py
index 1047cd6f22..4394f55667 100644
--- a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py
+++ b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py
@@ -103,7 +103,7 @@ async def main() -> None:
# Keep track of the last executor to format output nicely in streaming mode
last_executor_id: str | None = None
output_event: WorkflowOutputEvent | None = None
- async for event in workflow.run_stream(task):
+ async for event in workflow.run(task, stream=True):
if isinstance(event, AgentRunUpdateEvent):
eid = event.executor_id
if eid != last_executor_id:
diff --git a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py b/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py
index e33b230ce7..e74b2070b4 100644
--- a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py
+++ b/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py
@@ -138,7 +138,7 @@ async def main() -> None:
request = "Perform a comprehensive research on Microsoft Agent Framework."
print("Request:", request)
- async for event in workflow.run_stream(request):
+ async for event in workflow.run(request, stream=True):
_display_event(event)
"""
diff --git a/python/samples/getting_started/workflows/orchestration/handoff_simple.py b/python/samples/getting_started/workflows/orchestration/handoff_simple.py
index 2e7f53a82d..9868fbdd67 100644
--- a/python/samples/getting_started/workflows/orchestration/handoff_simple.py
+++ b/python/samples/getting_started/workflows/orchestration/handoff_simple.py
@@ -235,12 +235,12 @@ async def main() -> None:
]
# Start the workflow with the initial user message
- # run_stream() returns an async iterator of WorkflowEvent
+ # run(..., stream=True) returns an async iterator of WorkflowEvent
print("[Starting workflow with initial user message...]\n")
initial_message = "Hello, I need assistance with my recent purchase."
print(f"- User: {initial_message}")
- workflow_result = await workflow.run(initial_message)
- pending_requests = _handle_events(workflow_result)
+ workflow_result = workflow.run(initial_message, stream=True)
+ pending_requests = _handle_events([event async for event in workflow_result])
# Process the request/response cycle
# The workflow will continue requesting input until:
diff --git a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py
index 0c0616850b..431d0d4770 100644
--- a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py
+++ b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py
@@ -168,7 +168,7 @@ async def main() -> None:
all_file_ids: list[str] = []
print(f"User: {user_inputs[0]}")
- events = await _drain(workflow.run_stream(user_inputs[0]))
+ events = await _drain(workflow.run(user_inputs[0], stream=True))
requests, file_ids = _handle_events(events)
all_file_ids.extend(file_ids)
input_index += 1
diff --git a/python/samples/getting_started/workflows/orchestration/magentic.py b/python/samples/getting_started/workflows/orchestration/magentic.py
index 60746bc113..41bc17acd1 100644
--- a/python/samples/getting_started/workflows/orchestration/magentic.py
+++ b/python/samples/getting_started/workflows/orchestration/magentic.py
@@ -104,7 +104,7 @@ async def main() -> None:
# Keep track of the last executor to format output nicely in streaming mode
last_message_id: str | None = None
output_event: WorkflowOutputEvent | None = None
- async for event in workflow.run_stream(task):
+ async for event in workflow.run(task, stream=True):
if isinstance(event, AgentRunUpdateEvent):
message_id = event.data.message_id
if message_id != last_message_id:
diff --git a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py
index 2dd6a1a170..2002641199 100644
--- a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py
+++ b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py
@@ -110,7 +110,7 @@ async def main() -> None:
# request_id we must reuse on resume. In a real system this is where the UI would present
# the plan for human review.
plan_review_request: MagenticPlanReviewRequest | None = None
- async for event in workflow.run_stream(TASK):
+ async for event in workflow.run(TASK, stream=True):
if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest:
plan_review_request = event.data
print(f"Captured plan review request: {event.request_id}")
@@ -149,7 +149,7 @@ async def main() -> None:
# Resume execution and capture the re-emitted plan review request.
request_info_event: RequestInfoEvent | None = None
- async for event in resumed_workflow.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id):
+ async for event in resumed_workflow.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True):
if isinstance(event, RequestInfoEvent) and isinstance(event.data, MagenticPlanReviewRequest):
request_info_event = event
@@ -222,7 +222,7 @@ def _pending_message_count(cp: WorkflowCheckpoint) -> int:
final_event_post: WorkflowOutputEvent | None = None
post_emitted_events = False
post_plan_workflow = build_workflow(checkpoint_storage)
- async for event in post_plan_workflow.run_stream(checkpoint_id=post_plan_checkpoint.checkpoint_id):
+ async for event in post_plan_workflow.run(checkpoint_id=post_plan_checkpoint.checkpoint_id, stream=True):
post_emitted_events = True
if isinstance(event, WorkflowOutputEvent):
final_event_post = event
diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py
index 1050463d01..aa7b9b5f8c 100644
--- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py
+++ b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py
@@ -87,7 +87,7 @@ async def main() -> None:
if pending_responses is not None:
stream = workflow.send_responses_streaming(pending_responses)
else:
- stream = workflow.run_stream(task)
+ stream = workflow.run(task, stream=True)
last_message_id: str | None = None
async for event in stream:
diff --git a/python/samples/getting_started/workflows/orchestration/sequential_agents.py b/python/samples/getting_started/workflows/orchestration/sequential_agents.py
index 59a9cb5bdd..9d25452613 100644
--- a/python/samples/getting_started/workflows/orchestration/sequential_agents.py
+++ b/python/samples/getting_started/workflows/orchestration/sequential_agents.py
@@ -46,7 +46,7 @@ async def main() -> None:
# 3) Run and collect outputs
outputs: list[list[ChatMessage]] = []
- async for event in workflow.run_stream("Write a tagline for a budget-friendly eBike."):
+ async for event in workflow.run("Write a tagline for a budget-friendly eBike.", stream=True):
if isinstance(event, WorkflowOutputEvent):
outputs.append(cast(list[ChatMessage], event.data))
diff --git a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py
index f59b1ea0c8..119055f31e 100644
--- a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py
+++ b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py
@@ -87,7 +87,7 @@ async def main() -> None:
# 2) Run the workflow
output: list[int | float] | None = None
- async for event in workflow.run_stream([random.randint(1, 100) for _ in range(10)]):
+ async for event in workflow.run([random.randint(1, 100) for _ in range(10)], stream=True):
if isinstance(event, WorkflowOutputEvent):
output = event.data
diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py
index f2ed5ad677..4fdc2da4b1 100644
--- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py
+++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py
@@ -11,11 +11,12 @@
Executor, # Base class for custom Python executors
ExecutorCompletedEvent,
ExecutorInvokedEvent,
+ Role, # Enum of chat roles (user, assistant, system)
WorkflowBuilder, # Fluent builder for wiring the workflow graph
WorkflowContext, # Per run context and event bus
WorkflowOutputEvent, # Event emitted when workflow yields output
handler, # Decorator to mark an Executor method as invokable
- )
+)
from agent_framework.azure import AzureOpenAIChatClient
from azure.identity import AzureCliCredential # Uses your az CLI login for credentials
from typing_extensions import Never
@@ -45,7 +46,7 @@ class DispatchToExperts(Executor):
@handler
async def dispatch(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None:
# Wrap the incoming prompt as a user message for each expert and request a response.
- initial_message = ChatMessage("user", text=prompt)
+ initial_message = ChatMessage(Role.USER, text=prompt)
await ctx.send_message(AgentExecutorRequest(messages=[initial_message], should_respond=True))
@@ -140,7 +141,9 @@ async def main() -> None:
)
# 3) Run with a single prompt and print progress plus the final consolidated output
- async for event in workflow.run_stream("We are launching a new budget-friendly electric bike for urban commuters."):
+ async for event in workflow.run(
+ "We are launching a new budget-friendly electric bike for urban commuters.", stream=True
+ ):
if isinstance(event, ExecutorInvokedEvent):
# Show when executors are invoked and completed for lightweight observability.
print(f"{event.executor_id} invoked")
diff --git a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py
index 9b46e74bd2..92380bcd3f 100644
--- a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py
+++ b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py
@@ -14,7 +14,7 @@
WorkflowOutputEvent, # Event emitted when workflow yields output
WorkflowViz, # Utility to visualize a workflow graph
handler, # Decorator to expose an Executor method as a step
- )
+)
from typing_extensions import Never
"""
@@ -329,7 +329,7 @@ async def main():
raw_text = await f.read()
# Step 4: Run the workflow with the raw text as input.
- async for event in workflow.run_stream(raw_text):
+ async for event in workflow.run(raw_text, stream=True):
print(f"Event: {event}")
if isinstance(event, WorkflowOutputEvent):
print(f"Final Output: {event.data}")
diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py
index bf7320f834..349d4ea86c 100644
--- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py
+++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py
@@ -15,7 +15,7 @@
through any workflow pattern to @tool functions using the **kwargs pattern.
Key Concepts:
-- Pass custom context as kwargs when invoking workflow.run_stream() or workflow.run()
+- Pass custom context as kwargs when invoking workflow.run()
- kwargs are stored in SharedState and passed to all agent invocations
- @tool functions receive kwargs via **kwargs parameter
- Works with Sequential, Concurrent, GroupChat, Handoff, and Magentic patterns
@@ -112,8 +112,9 @@ async def main() -> None:
print("-" * 70)
# Run workflow with kwargs - these will flow through to tools
- async for event in workflow.run_stream(
+ async for event in workflow.run(
"Please get my user data and then call the users API endpoint.",
+ stream=True,
custom_data=custom_data,
user_token=user_token,
):
diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py
index 4e202026fb..a8a7886192 100644
--- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py
+++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py
@@ -132,9 +132,10 @@ async def main() -> None:
# Phase 1: Run workflow and collect request info events
request_info_events: list[RequestInfoEvent] = []
- async for event in workflow.run_stream(
+ async for event in workflow.run(
"Manage my portfolio. Use a max of 5000 dollars to adjust my position using "
- "your best judgment based on market sentiment. No need to confirm trades with me."
+ "your best judgment based on market sentiment. No need to confirm trades with me.",
+ stream=True,
):
if isinstance(event, RequestInfoEvent):
request_info_events.append(event)
diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py
index b4bc773eba..422102a4bd 100644
--- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py
+++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py
@@ -139,8 +139,9 @@ async def main() -> None:
request_info_events: list[RequestInfoEvent] = []
# Keep track of the last response to format output nicely in streaming mode
last_response_id: str | None = None
- async for event in workflow.run_stream(
- "We need to deploy version 2.4.0 to production. Please coordinate the deployment."
+ async for event in workflow.run(
+ "We need to deploy version 2.4.0 to production. Please coordinate the deployment.",
+ stream=True,
):
if isinstance(event, RequestInfoEvent):
request_info_events.append(event)
diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py
index 30c6b2358f..60a3766cb8 100644
--- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py
+++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py
@@ -87,8 +87,9 @@ async def main() -> None:
# Phase 1: Run workflow and collect all events (stream ends at IDLE or IDLE_WITH_PENDING_REQUESTS)
request_info_events: list[RequestInfoEvent] = []
- async for event in workflow.run_stream(
- "Check the schema and then update all orders with status 'pending' to 'processing'"
+ async for event in workflow.run(
+ "Check the schema and then update all orders with status 'pending' to 'processing'",
+ stream=True,
):
if isinstance(event, RequestInfoEvent):
request_info_events.append(event)
diff --git a/python/samples/semantic-kernel-migration/README.md b/python/samples/semantic-kernel-migration/README.md
index 64c9d80aa5..c1fa894a4c 100644
--- a/python/samples/semantic-kernel-migration/README.md
+++ b/python/samples/semantic-kernel-migration/README.md
@@ -70,6 +70,6 @@ Swap the script path for any other workflow or process sample. Deactivate the sa
## Tips for Migration
- Keep the original SK sample open while iterating on the AF equivalent; the code is intentionally formatted so you can copy/paste across SDKs.
-- Threads/conversation state are explicit in AF. When porting SK code that relies on implicit thread reuse, call `agent.get_new_thread()` and pass it into each `run`/`run_stream` call.
+- Threads/conversation state are explicit in AF. When porting SK code that relies on implicit thread reuse, call `agent.get_new_thread()` and pass it into each `run` call.
- Tools map cleanly: SK `@kernel_function` plugins translate to AF `@tool` callables. Hosted tools (code interpreter, web search, MCP) are available only in AF—introduce them once parity is achieved.
- For multi-agent orchestration, AF workflows expose checkpoints and resume capabilities that SK Process/Team abstractions do not. Use the workflow samples as a blueprint when modernizing complex agent graphs.
diff --git a/python/samples/semantic-kernel-migration/chat_completion/03_chat_completion_thread_and_stream.py b/python/samples/semantic-kernel-migration/chat_completion/03_chat_completion_thread_and_stream.py
index 933910dd62..5d802867b1 100644
--- a/python/samples/semantic-kernel-migration/chat_completion/03_chat_completion_thread_and_stream.py
+++ b/python/samples/semantic-kernel-migration/chat_completion/03_chat_completion_thread_and_stream.py
@@ -53,9 +53,10 @@ async def run_agent_framework() -> None:
print("[AF]", first.text)
print("[AF][stream]", end=" ")
- async for chunk in chat_agent.run_stream(
+ async for chunk in chat_agent.run(
"Draft a 2 sentence blurb.",
thread=thread,
+ stream=True,
):
if chunk.text:
print(chunk.text, end="", flush=True)
diff --git a/python/samples/semantic-kernel-migration/copilot_studio/02_copilot_studio_streaming.py b/python/samples/semantic-kernel-migration/copilot_studio/02_copilot_studio_streaming.py
index d437ff807e..e0f02f682c 100644
--- a/python/samples/semantic-kernel-migration/copilot_studio/02_copilot_studio_streaming.py
+++ b/python/samples/semantic-kernel-migration/copilot_studio/02_copilot_studio_streaming.py
@@ -28,7 +28,7 @@ async def run_agent_framework() -> None:
)
# AF streaming provides incremental AgentResponseUpdate objects.
print("[AF][stream]", end=" ")
- async for update in agent.run_stream("Plan a day in Copenhagen for foodies."):
+ async for update in agent.run("Plan a day in Copenhagen for foodies.", stream=True):
if update.text:
print(update.text, end="", flush=True)
print()
diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py
index b07a3393a8..efd3d80e5d 100644
--- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py
+++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py
@@ -90,7 +90,7 @@ async def run_agent_framework_example(prompt: str) -> Sequence[list[ChatMessage]
workflow = ConcurrentBuilder().participants([physics, chemistry]).build()
outputs: list[list[ChatMessage]] = []
- async for event in workflow.run_stream(prompt):
+ async for event in workflow.run(prompt, stream=True):
if isinstance(event, WorkflowOutputEvent):
outputs.append(cast(list[ChatMessage], event.data))
diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py
index 4ce31f3a04..76ab8ee692 100644
--- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py
+++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py
@@ -239,7 +239,7 @@ async def run_agent_framework_example(task: str) -> str:
)
final_response = ""
- async for event in workflow.run_stream(task):
+ async for event in workflow.run(task, stream=True):
if isinstance(event, WorkflowOutputEvent):
data = event.data
if isinstance(data, list) and len(data) > 0:
diff --git a/python/samples/semantic-kernel-migration/orchestrations/handoff.py b/python/samples/semantic-kernel-migration/orchestrations/handoff.py
index a90c8acf14..f2333c0fb5 100644
--- a/python/samples/semantic-kernel-migration/orchestrations/handoff.py
+++ b/python/samples/semantic-kernel-migration/orchestrations/handoff.py
@@ -244,7 +244,7 @@ async def run_agent_framework_example(initial_task: str, scripted_responses: Seq
.build()
)
- events = await _drain_events(workflow.run_stream(initial_task))
+ events = await _drain_events(workflow.run(initial_task, stream=True))
pending = _collect_handoff_requests(events)
scripted_iter = iter(scripted_responses)
diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py
index 3d9aa67ea8..db201da443 100644
--- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py
+++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py
@@ -147,7 +147,7 @@ async def run_agent_framework_example(prompt: str) -> str | None:
workflow = MagenticBuilder().participants([researcher, coder]).with_manager(agent=manager_agent).build()
final_text: str | None = None
- async for event in workflow.run_stream(prompt):
+ async for event in workflow.run(prompt, stream=True):
if isinstance(event, WorkflowOutputEvent):
final_text = cast(str, event.data)
diff --git a/python/samples/semantic-kernel-migration/orchestrations/sequential.py b/python/samples/semantic-kernel-migration/orchestrations/sequential.py
index 3b66ab2538..e433c8c3d4 100644
--- a/python/samples/semantic-kernel-migration/orchestrations/sequential.py
+++ b/python/samples/semantic-kernel-migration/orchestrations/sequential.py
@@ -76,7 +76,7 @@ async def run_agent_framework_example(prompt: str) -> list[ChatMessage]:
workflow = SequentialBuilder().participants([writer, reviewer]).build()
conversation_outputs: list[list[ChatMessage]] = []
- async for event in workflow.run_stream(prompt):
+ async for event in workflow.run(prompt, stream=True):
if isinstance(event, WorkflowOutputEvent):
conversation_outputs.append(cast(list[ChatMessage], event.data))
diff --git a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py
index 626421ddc9..cb27e53cc0 100644
--- a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py
+++ b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py
@@ -231,7 +231,7 @@ async def run_agent_framework_workflow_example() -> str | None:
)
final_text: str | None = None
- async for event in workflow.run_stream(CommonEvents.START_PROCESS):
+ async for event in workflow.run(CommonEvents.START_PROCESS, stream=True):
if isinstance(event, WorkflowOutputEvent):
final_text = cast(str, event.data)
diff --git a/python/samples/semantic-kernel-migration/processes/nested_process.py b/python/samples/semantic-kernel-migration/processes/nested_process.py
index 884ee6f4b0..40c682a805 100644
--- a/python/samples/semantic-kernel-migration/processes/nested_process.py
+++ b/python/samples/semantic-kernel-migration/processes/nested_process.py
@@ -256,7 +256,7 @@ async def run_agent_framework_nested_workflow(initial_message: str) -> Sequence[
)
results: list[str] = []
- async for event in outer_workflow.run_stream(initial_message):
+ async for event in outer_workflow.run(initial_message, stream=True):
if isinstance(event, WorkflowOutputEvent):
results.append(cast(str, event.data))
diff --git a/python/uv.lock b/python/uv.lock
index 63e2cd11b8..d19e64cb58 100644
--- a/python/uv.lock
+++ b/python/uv.lock
@@ -190,7 +190,6 @@ dependencies = [
dev = [
{ name = "httpx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pytest", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-asyncio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
[package.metadata]
@@ -200,7 +199,6 @@ requires-dist = [
{ name = "fastapi", specifier = ">=0.115.0" },
{ name = "httpx", marker = "extra == 'dev'", specifier = ">=0.27.0" },
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=8.0.0" },
- { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" },
{ name = "uvicorn", specifier = ">=0.30.0" },
]
provides-extras = ["dev"]
@@ -550,7 +548,7 @@ math = [
tau2 = [
{ name = "loguru", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
@@ -562,12 +560,6 @@ dev = [
{ name = "pre-commit", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pyright", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pytest", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-asyncio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-cov", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-env", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-retry", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-timeout", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
- { name = "pytest-xdist", extra = ["psutil"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "rich", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "ruff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "tau2", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -601,12 +593,6 @@ dev = [
{ name = "pre-commit", specifier = ">=3.7" },
{ name = "pyright", specifier = ">=1.1.402" },
{ name = "pytest", specifier = ">=8.4.1" },
- { name = "pytest-asyncio", specifier = ">=1.0.0" },
- { name = "pytest-cov", specifier = ">=6.2.1" },
- { name = "pytest-env", specifier = ">=1.1.5" },
- { name = "pytest-retry", specifier = ">=1" },
- { name = "pytest-timeout", specifier = ">=2.3.1" },
- { name = "pytest-xdist", extras = ["psutil"], specifier = ">=3.8.0" },
{ name = "rich" },
{ name = "ruff", specifier = ">=0.11.8" },
{ name = "tau2", git = "https://github.com/sierra-research/tau2-bench?rev=5ba9e3e56db57c5e4114bf7f901291f09b2c5619" },
@@ -669,7 +655,7 @@ source = { editable = "packages/redis" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "redis", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "redisvl", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
@@ -1171,11 +1157,11 @@ wheels = [
[[package]]
name = "babel"
-version = "2.17.0"
+version = "2.18.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/7d/b2/51899539b6ceeeb420d40ed3cd4b7a40519404f9baf3d4ac99dc413a834b/babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d", size = 9959554, upload-time = "2026-02-01T12:30:56.078Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" },
+ { url = "https://files.pythonhosted.org/packages/77/f5/21d2de20e8b8b0408f0681956ca2c69f1320a3848ac50e6e7f39c6159675/babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35", size = 10196845, upload-time = "2026-02-01T12:30:53.445Z" },
]
[[package]]
@@ -1424,19 +1410,19 @@ wheels = [
[[package]]
name = "claude-agent-sdk"
-version = "0.1.25"
+version = "0.1.27"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "mcp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/c5/ce/d8dd6eb56e981d1b981bf6766e1849878c54fbd160b6862e7c8e11b282d3/claude_agent_sdk-0.1.25.tar.gz", hash = "sha256:e2284fa2ece778d04b225f0f34118ea2623ae1f9fe315bc3bf921792658b6645", size = 57113, upload-time = "2026-01-29T01:20:17.353Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/ce/ef/0e51909e5a6e39d7c9e4073fdd3e00ff70677f99f8d1b87adef329c34acc/claude_agent_sdk-0.1.27.tar.gz", hash = "sha256:d2f4fc4c5e5c088efbaf66c34efcfd2aa7efafa3fed82f5cb1a95c451df96c38", size = 57216, upload-time = "2026-01-31T23:48:29.494Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/23/09/e25dad92af3305ded5490d4493f782b1cb8c530145a7107bceea26ec811e/claude_agent_sdk-0.1.25-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6adeffacbb75fe5c91529512331587a7af0e5e6dcbce4bd6b3a6ef8a51bdabeb", size = 54672313, upload-time = "2026-01-29T01:20:03.651Z" },
- { url = "https://files.pythonhosted.org/packages/28/0f/7b39ce9dd7d8f995e2c9d2049e1ce79f9010144a6793e8dd6ea9df23f53e/claude_agent_sdk-0.1.25-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:f210a05b2b471568c7f4019875b0ab451c783397f21edc32d7bd9a7144d9aad1", size = 68848229, upload-time = "2026-01-29T01:20:07.311Z" },
- { url = "https://files.pythonhosted.org/packages/40/6f/0b22cd9a68c39c0a8f5bd024072c15ca89bfa2dbfad3a94a35f6a1a90ecd/claude_agent_sdk-0.1.25-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:3399c3c748eb42deac308c6230cb0bb6b975c51b0495b42fe06896fa741d336f", size = 70562885, upload-time = "2026-01-29T01:20:11.033Z" },
- { url = "https://files.pythonhosted.org/packages/5c/b6/2aaf28eeaa994e5491ad9589a9b006d5112b167aab8ced0823a6ffd86e4f/claude_agent_sdk-0.1.25-py3-none-win_amd64.whl", hash = "sha256:c5e8fe666b88049080ae4ac2a02dbd2d5c00ab1c495683d3c2f7dfab8ff1fec9", size = 72746667, upload-time = "2026-01-29T01:20:14.271Z" },
+ { url = "https://files.pythonhosted.org/packages/66/fe/52b1e8394428ddafd952f41799bb4c8b0e60627b808ee2d797644da02624/claude_agent_sdk-0.1.27-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eddfe7fa40fdbd0a49fafd5698791bc911bc1e66e6ace2f77c50d5b64e138e93", size = 53901311, upload-time = "2026-01-31T23:48:16.664Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/eb/69dedbb195b69bd4b2ebf127407778e89c56e547e02bbcb74c130e1584c4/claude_agent_sdk-0.1.27-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:babf796d478a2b7ff75afab61d47bede4afecd6c793b7d540ee3aab42f00d5fb", size = 68107707, upload-time = "2026-01-31T23:48:19.724Z" },
+ { url = "https://files.pythonhosted.org/packages/84/06/886931dcbce8cd586aa38afa3ebdefe7d9eaa4ad389fa795560317c1f891/claude_agent_sdk-0.1.27-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:de0e22f3408ce7bdf909218e28be0e317b8d7d64b855cefc2cb3dd022f5f887b", size = 69810719, upload-time = "2026-01-31T23:48:22.914Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/ea/c987078f5059f05756886609f3196c8aeebe10f4e79c1f82f58b71eaeb9f/claude_agent_sdk-0.1.27-py3-none-win_amd64.whl", hash = "sha256:23fbb90727cd4dc776ad894a1b2dc040fb9fc2f0277a32b94336665e7c950692", size = 71994821, upload-time = "2026-01-31T23:48:26.333Z" },
]
[[package]]
@@ -1563,7 +1549,7 @@ resolution-markers = [
"python_full_version == '3.11.*' and sys_platform == 'win32'",
]
dependencies = [
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" }
wheels = [
@@ -2732,7 +2718,7 @@ wheels = [
[[package]]
name = "huggingface-hub"
-version = "1.3.5"
+version = "1.3.7"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -2746,9 +2732,9 @@ dependencies = [
{ name = "typer-slim", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/67/e9/2658cb9bc4c72a67b7f87650e827266139befaf499095883d30dabc4d49f/huggingface_hub-1.3.5.tar.gz", hash = "sha256:8045aca8ddab35d937138f3c386c6d43a275f53437c5c64cdc9aa8408653b4ed", size = 627456, upload-time = "2026-01-29T10:34:19.687Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/6d/3f/352efd52136bfd8aa9280c6d4a445869226ae2ccd49ddad4f62e90cfd168/huggingface_hub-1.3.7.tar.gz", hash = "sha256:5f86cd48f27131cdbf2882699cbdf7a67dd4cbe89a81edfdc31211f42e4a5fd1", size = 627537, upload-time = "2026-02-02T10:40:10.61Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/f9/84/a579b95c46fe8e319f89dc700c087596f665141575f4dcf136aaa97d856f/huggingface_hub-1.3.5-py3-none-any.whl", hash = "sha256:fe332d7f86a8af874768452295c22cd3f37730fb2463cf6cc3295e26036f8ef9", size = 536675, upload-time = "2026-01-29T10:34:17.713Z" },
+ { url = "https://files.pythonhosted.org/packages/54/89/bfbfde252d649fae8d5f09b14a2870e5672ed160c1a6629301b3e5302621/huggingface_hub-1.3.7-py3-none-any.whl", hash = "sha256:8155ce937038fa3d0cb4347d752708079bc85e6d9eb441afb44c84bcf48620d2", size = 536728, upload-time = "2026-02-02T10:40:08.274Z" },
]
[[package]]
@@ -2840,99 +2826,99 @@ wheels = [
[[package]]
name = "jiter"
-version = "0.12.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/45/9d/e0660989c1370e25848bb4c52d061c71837239738ad937e83edca174c273/jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b", size = 168294, upload-time = "2025-11-09T20:49:23.302Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3b/91/13cb9505f7be74a933f37da3af22e029f6ba64f5669416cb8b2774bc9682/jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65", size = 316652, upload-time = "2025-11-09T20:46:41.021Z" },
- { url = "https://files.pythonhosted.org/packages/4e/76/4e9185e5d9bb4e482cf6dec6410d5f78dfeb374cfcecbbe9888d07c52daa/jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e", size = 319829, upload-time = "2025-11-09T20:46:43.281Z" },
- { url = "https://files.pythonhosted.org/packages/86/af/727de50995d3a153138139f259baae2379d8cb0522c0c00419957bc478a6/jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62", size = 350568, upload-time = "2025-11-09T20:46:45.075Z" },
- { url = "https://files.pythonhosted.org/packages/6a/c1/d6e9f4b7a3d5ac63bcbdfddeb50b2dcfbdc512c86cffc008584fdc350233/jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8", size = 369052, upload-time = "2025-11-09T20:46:46.818Z" },
- { url = "https://files.pythonhosted.org/packages/eb/be/00824cd530f30ed73fa8a4f9f3890a705519e31ccb9e929f1e22062e7c76/jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb", size = 481585, upload-time = "2025-11-09T20:46:48.319Z" },
- { url = "https://files.pythonhosted.org/packages/74/b6/2ad7990dff9504d4b5052eef64aa9574bd03d722dc7edced97aad0d47be7/jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc", size = 380541, upload-time = "2025-11-09T20:46:49.643Z" },
- { url = "https://files.pythonhosted.org/packages/b5/c7/f3c26ecbc1adbf1db0d6bba99192143d8fe8504729d9594542ecc4445784/jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74", size = 364423, upload-time = "2025-11-09T20:46:51.731Z" },
- { url = "https://files.pythonhosted.org/packages/18/51/eac547bf3a2d7f7e556927278e14c56a0604b8cddae75815d5739f65f81d/jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2", size = 389958, upload-time = "2025-11-09T20:46:53.432Z" },
- { url = "https://files.pythonhosted.org/packages/2c/1f/9ca592e67175f2db156cff035e0d817d6004e293ee0c1d73692d38fcb596/jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025", size = 522084, upload-time = "2025-11-09T20:46:54.848Z" },
- { url = "https://files.pythonhosted.org/packages/83/ff/597d9cdc3028f28224f53e1a9d063628e28b7a5601433e3196edda578cdd/jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca", size = 513054, upload-time = "2025-11-09T20:46:56.487Z" },
- { url = "https://files.pythonhosted.org/packages/24/6d/1970bce1351bd02e3afcc5f49e4f7ef3dabd7fb688f42be7e8091a5b809a/jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4", size = 206368, upload-time = "2025-11-09T20:46:58.638Z" },
- { url = "https://files.pythonhosted.org/packages/e3/6b/eb1eb505b2d86709b59ec06681a2b14a94d0941db091f044b9f0e16badc0/jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11", size = 204847, upload-time = "2025-11-09T20:47:00.295Z" },
- { url = "https://files.pythonhosted.org/packages/32/f9/eaca4633486b527ebe7e681c431f529b63fe2709e7c5242fc0f43f77ce63/jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9", size = 316435, upload-time = "2025-11-09T20:47:02.087Z" },
- { url = "https://files.pythonhosted.org/packages/10/c1/40c9f7c22f5e6ff715f28113ebaba27ab85f9af2660ad6e1dd6425d14c19/jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd", size = 320548, upload-time = "2025-11-09T20:47:03.409Z" },
- { url = "https://files.pythonhosted.org/packages/6b/1b/efbb68fe87e7711b00d2cfd1f26bb4bfc25a10539aefeaa7727329ffb9cb/jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423", size = 351915, upload-time = "2025-11-09T20:47:05.171Z" },
- { url = "https://files.pythonhosted.org/packages/15/2d/c06e659888c128ad1e838123d0638f0efad90cc30860cb5f74dd3f2fc0b3/jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7", size = 368966, upload-time = "2025-11-09T20:47:06.508Z" },
- { url = "https://files.pythonhosted.org/packages/6b/20/058db4ae5fb07cf6a4ab2e9b9294416f606d8e467fb74c2184b2a1eeacba/jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2", size = 482047, upload-time = "2025-11-09T20:47:08.382Z" },
- { url = "https://files.pythonhosted.org/packages/49/bb/dc2b1c122275e1de2eb12905015d61e8316b2f888bdaac34221c301495d6/jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9", size = 380835, upload-time = "2025-11-09T20:47:09.81Z" },
- { url = "https://files.pythonhosted.org/packages/23/7d/38f9cd337575349de16da575ee57ddb2d5a64d425c9367f5ef9e4612e32e/jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6", size = 364587, upload-time = "2025-11-09T20:47:11.529Z" },
- { url = "https://files.pythonhosted.org/packages/f0/a3/b13e8e61e70f0bb06085099c4e2462647f53cc2ca97614f7fedcaa2bb9f3/jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725", size = 390492, upload-time = "2025-11-09T20:47:12.993Z" },
- { url = "https://files.pythonhosted.org/packages/07/71/e0d11422ed027e21422f7bc1883c61deba2d9752b720538430c1deadfbca/jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6", size = 522046, upload-time = "2025-11-09T20:47:14.6Z" },
- { url = "https://files.pythonhosted.org/packages/9f/59/b968a9aa7102a8375dbbdfbd2aeebe563c7e5dddf0f47c9ef1588a97e224/jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e", size = 513392, upload-time = "2025-11-09T20:47:16.011Z" },
- { url = "https://files.pythonhosted.org/packages/ca/e4/7df62002499080dbd61b505c5cb351aa09e9959d176cac2aa8da6f93b13b/jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c", size = 206096, upload-time = "2025-11-09T20:47:17.344Z" },
- { url = "https://files.pythonhosted.org/packages/bb/60/1032b30ae0572196b0de0e87dce3b6c26a1eff71aad5fe43dee3082d32e0/jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f", size = 204899, upload-time = "2025-11-09T20:47:19.365Z" },
- { url = "https://files.pythonhosted.org/packages/49/d5/c145e526fccdb834063fb45c071df78b0cc426bbaf6de38b0781f45d956f/jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5", size = 188070, upload-time = "2025-11-09T20:47:20.75Z" },
- { url = "https://files.pythonhosted.org/packages/92/c9/5b9f7b4983f1b542c64e84165075335e8a236fa9e2ea03a0c79780062be8/jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37", size = 314449, upload-time = "2025-11-09T20:47:22.999Z" },
- { url = "https://files.pythonhosted.org/packages/98/6e/e8efa0e78de00db0aee82c0cf9e8b3f2027efd7f8a71f859d8f4be8e98ef/jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274", size = 319855, upload-time = "2025-11-09T20:47:24.779Z" },
- { url = "https://files.pythonhosted.org/packages/20/26/894cd88e60b5d58af53bec5c6759d1292bd0b37a8b5f60f07abf7a63ae5f/jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3", size = 350171, upload-time = "2025-11-09T20:47:26.469Z" },
- { url = "https://files.pythonhosted.org/packages/f5/27/a7b818b9979ac31b3763d25f3653ec3a954044d5e9f5d87f2f247d679fd1/jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf", size = 365590, upload-time = "2025-11-09T20:47:27.918Z" },
- { url = "https://files.pythonhosted.org/packages/ba/7e/e46195801a97673a83746170b17984aa8ac4a455746354516d02ca5541b4/jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1", size = 479462, upload-time = "2025-11-09T20:47:29.654Z" },
- { url = "https://files.pythonhosted.org/packages/ca/75/f833bfb009ab4bd11b1c9406d333e3b4357709ed0570bb48c7c06d78c7dd/jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df", size = 378983, upload-time = "2025-11-09T20:47:31.026Z" },
- { url = "https://files.pythonhosted.org/packages/71/b3/7a69d77943cc837d30165643db753471aff5df39692d598da880a6e51c24/jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403", size = 361328, upload-time = "2025-11-09T20:47:33.286Z" },
- { url = "https://files.pythonhosted.org/packages/b0/ac/a78f90caf48d65ba70d8c6efc6f23150bc39dc3389d65bbec2a95c7bc628/jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126", size = 386740, upload-time = "2025-11-09T20:47:34.703Z" },
- { url = "https://files.pythonhosted.org/packages/39/b6/5d31c2cc8e1b6a6bcf3c5721e4ca0a3633d1ab4754b09bc7084f6c4f5327/jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9", size = 520875, upload-time = "2025-11-09T20:47:36.058Z" },
- { url = "https://files.pythonhosted.org/packages/30/b5/4df540fae4e9f68c54b8dab004bd8c943a752f0b00efd6e7d64aa3850339/jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86", size = 511457, upload-time = "2025-11-09T20:47:37.932Z" },
- { url = "https://files.pythonhosted.org/packages/07/65/86b74010e450a1a77b2c1aabb91d4a91dd3cd5afce99f34d75fd1ac64b19/jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44", size = 204546, upload-time = "2025-11-09T20:47:40.47Z" },
- { url = "https://files.pythonhosted.org/packages/1c/c7/6659f537f9562d963488e3e55573498a442503ced01f7e169e96a6110383/jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb", size = 205196, upload-time = "2025-11-09T20:47:41.794Z" },
- { url = "https://files.pythonhosted.org/packages/21/f4/935304f5169edadfec7f9c01eacbce4c90bb9a82035ac1de1f3bd2d40be6/jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789", size = 186100, upload-time = "2025-11-09T20:47:43.007Z" },
- { url = "https://files.pythonhosted.org/packages/3d/a6/97209693b177716e22576ee1161674d1d58029eb178e01866a0422b69224/jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e", size = 313658, upload-time = "2025-11-09T20:47:44.424Z" },
- { url = "https://files.pythonhosted.org/packages/06/4d/125c5c1537c7d8ee73ad3d530a442d6c619714b95027143f1b61c0b4dfe0/jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1", size = 318605, upload-time = "2025-11-09T20:47:45.973Z" },
- { url = "https://files.pythonhosted.org/packages/99/bf/a840b89847885064c41a5f52de6e312e91fa84a520848ee56c97e4fa0205/jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf", size = 349803, upload-time = "2025-11-09T20:47:47.535Z" },
- { url = "https://files.pythonhosted.org/packages/8a/88/e63441c28e0db50e305ae23e19c1d8fae012d78ed55365da392c1f34b09c/jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44", size = 365120, upload-time = "2025-11-09T20:47:49.284Z" },
- { url = "https://files.pythonhosted.org/packages/0a/7c/49b02714af4343970eb8aca63396bc1c82fa01197dbb1e9b0d274b550d4e/jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45", size = 479918, upload-time = "2025-11-09T20:47:50.807Z" },
- { url = "https://files.pythonhosted.org/packages/69/ba/0a809817fdd5a1db80490b9150645f3aae16afad166960bcd562be194f3b/jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87", size = 379008, upload-time = "2025-11-09T20:47:52.211Z" },
- { url = "https://files.pythonhosted.org/packages/5f/c3/c9fc0232e736c8877d9e6d83d6eeb0ba4e90c6c073835cc2e8f73fdeef51/jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed", size = 361785, upload-time = "2025-11-09T20:47:53.512Z" },
- { url = "https://files.pythonhosted.org/packages/96/61/61f69b7e442e97ca6cd53086ddc1cf59fb830549bc72c0a293713a60c525/jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9", size = 386108, upload-time = "2025-11-09T20:47:54.893Z" },
- { url = "https://files.pythonhosted.org/packages/e9/2e/76bb3332f28550c8f1eba3bf6e5efe211efda0ddbbaf24976bc7078d42a5/jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626", size = 519937, upload-time = "2025-11-09T20:47:56.253Z" },
- { url = "https://files.pythonhosted.org/packages/84/d6/fa96efa87dc8bff2094fb947f51f66368fa56d8d4fc9e77b25d7fbb23375/jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c", size = 510853, upload-time = "2025-11-09T20:47:58.32Z" },
- { url = "https://files.pythonhosted.org/packages/8a/28/93f67fdb4d5904a708119a6ab58a8f1ec226ff10a94a282e0215402a8462/jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de", size = 204699, upload-time = "2025-11-09T20:47:59.686Z" },
- { url = "https://files.pythonhosted.org/packages/c4/1f/30b0eb087045a0abe2a5c9c0c0c8da110875a1d3be83afd4a9a4e548be3c/jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a", size = 204258, upload-time = "2025-11-09T20:48:01.01Z" },
- { url = "https://files.pythonhosted.org/packages/2c/f4/2b4daf99b96bce6fc47971890b14b2a36aef88d7beb9f057fafa032c6141/jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60", size = 185503, upload-time = "2025-11-09T20:48:02.35Z" },
- { url = "https://files.pythonhosted.org/packages/39/ca/67bb15a7061d6fe20b9b2a2fd783e296a1e0f93468252c093481a2f00efa/jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6", size = 317965, upload-time = "2025-11-09T20:48:03.783Z" },
- { url = "https://files.pythonhosted.org/packages/18/af/1788031cd22e29c3b14bc6ca80b16a39a0b10e611367ffd480c06a259831/jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4", size = 345831, upload-time = "2025-11-09T20:48:05.55Z" },
- { url = "https://files.pythonhosted.org/packages/05/17/710bf8472d1dff0d3caf4ced6031060091c1320f84ee7d5dcbed1f352417/jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb", size = 361272, upload-time = "2025-11-09T20:48:06.951Z" },
- { url = "https://files.pythonhosted.org/packages/fb/f1/1dcc4618b59761fef92d10bcbb0b038b5160be653b003651566a185f1a5c/jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7", size = 204604, upload-time = "2025-11-09T20:48:08.328Z" },
- { url = "https://files.pythonhosted.org/packages/d9/32/63cb1d9f1c5c6632a783c0052cde9ef7ba82688f7065e2f0d5f10a7e3edb/jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3", size = 185628, upload-time = "2025-11-09T20:48:09.572Z" },
- { url = "https://files.pythonhosted.org/packages/a8/99/45c9f0dbe4a1416b2b9a8a6d1236459540f43d7fb8883cff769a8db0612d/jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525", size = 312478, upload-time = "2025-11-09T20:48:10.898Z" },
- { url = "https://files.pythonhosted.org/packages/4c/a7/54ae75613ba9e0f55fcb0bc5d1f807823b5167cc944e9333ff322e9f07dd/jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49", size = 318706, upload-time = "2025-11-09T20:48:12.266Z" },
- { url = "https://files.pythonhosted.org/packages/59/31/2aa241ad2c10774baf6c37f8b8e1f39c07db358f1329f4eb40eba179c2a2/jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1", size = 351894, upload-time = "2025-11-09T20:48:13.673Z" },
- { url = "https://files.pythonhosted.org/packages/54/4f/0f2759522719133a9042781b18cc94e335b6d290f5e2d3e6899d6af933e3/jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e", size = 365714, upload-time = "2025-11-09T20:48:15.083Z" },
- { url = "https://files.pythonhosted.org/packages/dc/6f/806b895f476582c62a2f52c453151edd8a0fde5411b0497baaa41018e878/jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e", size = 478989, upload-time = "2025-11-09T20:48:16.706Z" },
- { url = "https://files.pythonhosted.org/packages/86/6c/012d894dc6e1033acd8db2b8346add33e413ec1c7c002598915278a37f79/jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff", size = 378615, upload-time = "2025-11-09T20:48:18.614Z" },
- { url = "https://files.pythonhosted.org/packages/87/30/d718d599f6700163e28e2c71c0bbaf6dace692e7df2592fd793ac9276717/jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a", size = 364745, upload-time = "2025-11-09T20:48:20.117Z" },
- { url = "https://files.pythonhosted.org/packages/8f/85/315b45ce4b6ddc7d7fceca24068543b02bdc8782942f4ee49d652e2cc89f/jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a", size = 386502, upload-time = "2025-11-09T20:48:21.543Z" },
- { url = "https://files.pythonhosted.org/packages/74/0b/ce0434fb40c5b24b368fe81b17074d2840748b4952256bab451b72290a49/jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67", size = 519845, upload-time = "2025-11-09T20:48:22.964Z" },
- { url = "https://files.pythonhosted.org/packages/e8/a3/7a7a4488ba052767846b9c916d208b3ed114e3eb670ee984e4c565b9cf0d/jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b", size = 510701, upload-time = "2025-11-09T20:48:24.483Z" },
- { url = "https://files.pythonhosted.org/packages/c3/16/052ffbf9d0467b70af24e30f91e0579e13ded0c17bb4a8eb2aed3cb60131/jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42", size = 205029, upload-time = "2025-11-09T20:48:25.749Z" },
- { url = "https://files.pythonhosted.org/packages/e4/18/3cf1f3f0ccc789f76b9a754bdb7a6977e5d1d671ee97a9e14f7eb728d80e/jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf", size = 204960, upload-time = "2025-11-09T20:48:27.415Z" },
- { url = "https://files.pythonhosted.org/packages/02/68/736821e52ecfdeeb0f024b8ab01b5a229f6b9293bbdb444c27efade50b0f/jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451", size = 185529, upload-time = "2025-11-09T20:48:29.125Z" },
- { url = "https://files.pythonhosted.org/packages/30/61/12ed8ee7a643cce29ac97c2281f9ce3956eb76b037e88d290f4ed0d41480/jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7", size = 318974, upload-time = "2025-11-09T20:48:30.87Z" },
- { url = "https://files.pythonhosted.org/packages/2d/c6/f3041ede6d0ed5e0e79ff0de4c8f14f401bbf196f2ef3971cdbe5fd08d1d/jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684", size = 345932, upload-time = "2025-11-09T20:48:32.658Z" },
- { url = "https://files.pythonhosted.org/packages/d5/5d/4d94835889edd01ad0e2dbfc05f7bdfaed46292e7b504a6ac7839aa00edb/jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c", size = 367243, upload-time = "2025-11-09T20:48:34.093Z" },
- { url = "https://files.pythonhosted.org/packages/fd/76/0051b0ac2816253a99d27baf3dda198663aff882fa6ea7deeb94046da24e/jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d", size = 479315, upload-time = "2025-11-09T20:48:35.507Z" },
- { url = "https://files.pythonhosted.org/packages/70/ae/83f793acd68e5cb24e483f44f482a1a15601848b9b6f199dacb970098f77/jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993", size = 380714, upload-time = "2025-11-09T20:48:40.014Z" },
- { url = "https://files.pythonhosted.org/packages/b1/5e/4808a88338ad2c228b1126b93fcd8ba145e919e886fe910d578230dabe3b/jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f", size = 365168, upload-time = "2025-11-09T20:48:41.462Z" },
- { url = "https://files.pythonhosted.org/packages/0c/d4/04619a9e8095b42aef436b5aeb4c0282b4ff1b27d1db1508df9f5dc82750/jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783", size = 387893, upload-time = "2025-11-09T20:48:42.921Z" },
- { url = "https://files.pythonhosted.org/packages/17/ea/d3c7e62e4546fdc39197fa4a4315a563a89b95b6d54c0d25373842a59cbe/jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b", size = 520828, upload-time = "2025-11-09T20:48:44.278Z" },
- { url = "https://files.pythonhosted.org/packages/cc/0b/c6d3562a03fd767e31cb119d9041ea7958c3c80cb3d753eafb19b3b18349/jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6", size = 511009, upload-time = "2025-11-09T20:48:45.726Z" },
- { url = "https://files.pythonhosted.org/packages/aa/51/2cb4468b3448a8385ebcd15059d325c9ce67df4e2758d133ab9442b19834/jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183", size = 205110, upload-time = "2025-11-09T20:48:47.033Z" },
- { url = "https://files.pythonhosted.org/packages/b2/c5/ae5ec83dec9c2d1af805fd5fe8f74ebded9c8670c5210ec7820ce0dbeb1e/jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873", size = 205223, upload-time = "2025-11-09T20:48:49.076Z" },
- { url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" },
- { url = "https://files.pythonhosted.org/packages/fe/54/5339ef1ecaa881c6948669956567a64d2670941925f245c434f494ffb0e5/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8", size = 311144, upload-time = "2025-11-09T20:49:10.503Z" },
- { url = "https://files.pythonhosted.org/packages/27/74/3446c652bffbd5e81ab354e388b1b5fc1d20daac34ee0ed11ff096b1b01a/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3", size = 305877, upload-time = "2025-11-09T20:49:12.269Z" },
- { url = "https://files.pythonhosted.org/packages/a1/f4/ed76ef9043450f57aac2d4fbeb27175aa0eb9c38f833be6ef6379b3b9a86/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e", size = 340419, upload-time = "2025-11-09T20:49:13.803Z" },
- { url = "https://files.pythonhosted.org/packages/21/01/857d4608f5edb0664aa791a3d45702e1a5bcfff9934da74035e7b9803846/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d", size = 347212, upload-time = "2025-11-09T20:49:15.643Z" },
- { url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974, upload-time = "2025-11-09T20:49:17.187Z" },
- { url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233, upload-time = "2025-11-09T20:49:18.734Z" },
- { url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537, upload-time = "2025-11-09T20:49:20.317Z" },
- { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" },
+version = "0.13.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d0/5a/41da76c5ea07bec1b0472b6b2fdb1b651074d504b19374d7e130e0cdfb25/jiter-0.13.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2ffc63785fd6c7977defe49b9824ae6ce2b2e2b77ce539bdaf006c26da06342e", size = 311164, upload-time = "2026-02-02T12:35:17.688Z" },
+ { url = "https://files.pythonhosted.org/packages/40/cb/4a1bf994a3e869f0d39d10e11efb471b76d0ad70ecbfb591427a46c880c2/jiter-0.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a638816427006c1e3f0013eb66d391d7a3acda99a7b0cf091eff4497ccea33a", size = 320296, upload-time = "2026-02-02T12:35:19.828Z" },
+ { url = "https://files.pythonhosted.org/packages/09/82/acd71ca9b50ecebadc3979c541cd717cce2fe2bc86236f4fa597565d8f1a/jiter-0.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19928b5d1ce0ff8c1ee1b9bdef3b5bfc19e8304f1b904e436caf30bc15dc6cf5", size = 352742, upload-time = "2026-02-02T12:35:21.258Z" },
+ { url = "https://files.pythonhosted.org/packages/71/03/d1fc996f3aecfd42eb70922edecfb6dd26421c874503e241153ad41df94f/jiter-0.13.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:309549b778b949d731a2f0e1594a3f805716be704a73bf3ad9a807eed5eb5721", size = 363145, upload-time = "2026-02-02T12:35:24.653Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/61/a30492366378cc7a93088858f8991acd7d959759fe6138c12a4644e58e81/jiter-0.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcdabaea26cb04e25df3103ce47f97466627999260290349a88c8136ecae0060", size = 487683, upload-time = "2026-02-02T12:35:26.162Z" },
+ { url = "https://files.pythonhosted.org/packages/20/4e/4223cffa9dbbbc96ed821c5aeb6bca510848c72c02086d1ed3f1da3d58a7/jiter-0.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a377af27b236abbf665a69b2bdd680e3b5a0bd2af825cd3b81245279a7606c", size = 373579, upload-time = "2026-02-02T12:35:27.582Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/c9/b0489a01329ab07a83812d9ebcffe7820a38163c6d9e7da644f926ff877c/jiter-0.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe49d3ff6db74321f144dff9addd4a5874d3105ac5ba7c5b77fac099cfae31ae", size = 362904, upload-time = "2026-02-02T12:35:28.925Z" },
+ { url = "https://files.pythonhosted.org/packages/05/af/53e561352a44afcba9a9bc67ee1d320b05a370aed8df54eafe714c4e454d/jiter-0.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2113c17c9a67071b0f820733c0893ed1d467b5fcf4414068169e5c2cabddb1e2", size = 392380, upload-time = "2026-02-02T12:35:30.385Z" },
+ { url = "https://files.pythonhosted.org/packages/76/2a/dd805c3afb8ed5b326c5ae49e725d1b1255b9754b1b77dbecdc621b20773/jiter-0.13.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ab1185ca5c8b9491b55ebf6c1e8866b8f68258612899693e24a92c5fdb9455d5", size = 517939, upload-time = "2026-02-02T12:35:31.865Z" },
+ { url = "https://files.pythonhosted.org/packages/20/2a/7b67d76f55b8fe14c937e7640389612f05f9a4145fc28ae128aaa5e62257/jiter-0.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9621ca242547edc16400981ca3231e0c91c0c4c1ab8573a596cd9bb3575d5c2b", size = 551696, upload-time = "2026-02-02T12:35:33.306Z" },
+ { url = "https://files.pythonhosted.org/packages/85/9c/57cdd64dac8f4c6ab8f994fe0eb04dc9fd1db102856a4458fcf8a99dfa62/jiter-0.13.0-cp310-cp310-win32.whl", hash = "sha256:a7637d92b1c9d7a771e8c56f445c7f84396d48f2e756e5978840ecba2fac0894", size = 204592, upload-time = "2026-02-02T12:35:34.58Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/38/f4f3ea5788b8a5bae7510a678cdc747eda0c45ffe534f9878ff37e7cf3b3/jiter-0.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c1b609e5cbd2f52bb74fb721515745b407df26d7b800458bd97cb3b972c29e7d", size = 206016, upload-time = "2026-02-02T12:35:36.435Z" },
+ { url = "https://files.pythonhosted.org/packages/71/29/499f8c9eaa8a16751b1c0e45e6f5f1761d180da873d417996cc7bddc8eef/jiter-0.13.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ea026e70a9a28ebbdddcbcf0f1323128a8db66898a06eaad3a4e62d2f554d096", size = 311157, upload-time = "2026-02-02T12:35:37.758Z" },
+ { url = "https://files.pythonhosted.org/packages/50/f6/566364c777d2ab450b92100bea11333c64c38d32caf8dc378b48e5b20c46/jiter-0.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66aa3e663840152d18cc8ff1e4faad3dd181373491b9cfdc6004b92198d67911", size = 319729, upload-time = "2026-02-02T12:35:39.246Z" },
+ { url = "https://files.pythonhosted.org/packages/73/dd/560f13ec5e4f116d8ad2658781646cca91b617ae3b8758d4a5076b278f70/jiter-0.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3524798e70655ff19aec58c7d05adb1f074fecff62da857ea9be2b908b6d701", size = 354766, upload-time = "2026-02-02T12:35:40.662Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/0d/061faffcfe94608cbc28a0d42a77a74222bdf5055ccdbe5fd2292b94f510/jiter-0.13.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec7e287d7fbd02cb6e22f9a00dd9c9cd504c40a61f2c61e7e1f9690a82726b4c", size = 362587, upload-time = "2026-02-02T12:35:42.025Z" },
+ { url = "https://files.pythonhosted.org/packages/92/c9/c66a7864982fd38a9773ec6e932e0398d1262677b8c60faecd02ffb67bf3/jiter-0.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47455245307e4debf2ce6c6e65a717550a0244231240dcf3b8f7d64e4c2f22f4", size = 487537, upload-time = "2026-02-02T12:35:43.459Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/86/84eb4352cd3668f16d1a88929b5888a3fe0418ea8c1dfc2ad4e7bf6e069a/jiter-0.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ee9da221dca6e0429c2704c1b3655fe7b025204a71d4d9b73390c759d776d165", size = 373717, upload-time = "2026-02-02T12:35:44.928Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/09/9fe4c159358176f82d4390407a03f506a8659ed13ca3ac93a843402acecf/jiter-0.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24ab43126d5e05f3d53a36a8e11eb2f23304c6c1117844aaaf9a0aa5e40b5018", size = 362683, upload-time = "2026-02-02T12:35:46.636Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/5e/85f3ab9caca0c1d0897937d378b4a515cae9e119730563572361ea0c48ae/jiter-0.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9da38b4fedde4fb528c740c2564628fbab737166a0e73d6d46cb4bb5463ff411", size = 392345, upload-time = "2026-02-02T12:35:48.088Z" },
+ { url = "https://files.pythonhosted.org/packages/12/4c/05b8629ad546191939e6f0c2f17e29f542a398f4a52fb987bc70b6d1eb8b/jiter-0.13.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b34c519e17658ed88d5047999a93547f8889f3c1824120c26ad6be5f27b6cf5", size = 517775, upload-time = "2026-02-02T12:35:49.482Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/88/367ea2eb6bc582c7052e4baf5ddf57ebe5ab924a88e0e09830dfb585c02d/jiter-0.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2a6394e6af690d462310a86b53c47ad75ac8c21dc79f120714ea449979cb1d3", size = 551325, upload-time = "2026-02-02T12:35:51.104Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/12/fa377ffb94a2f28c41afaed093e0d70cfe512035d5ecb0cad0ae4792d35e/jiter-0.13.0-cp311-cp311-win32.whl", hash = "sha256:0f0c065695f616a27c920a56ad0d4fc46415ef8b806bf8fc1cacf25002bd24e1", size = 204709, upload-time = "2026-02-02T12:35:52.467Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/16/8e8203ce92f844dfcd3d9d6a5a7322c77077248dbb12da52d23193a839cd/jiter-0.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:0733312953b909688ae3c2d58d043aa040f9f1a6a75693defed7bc2cc4bf2654", size = 204560, upload-time = "2026-02-02T12:35:53.925Z" },
+ { url = "https://files.pythonhosted.org/packages/44/26/97cc40663deb17b9e13c3a5cf29251788c271b18ee4d262c8f94798b8336/jiter-0.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:5d9b34ad56761b3bf0fbe8f7e55468704107608512350962d3317ffd7a4382d5", size = 189608, upload-time = "2026-02-02T12:35:55.304Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" },
+ { url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" },
+ { url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" },
+ { url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" },
+ { url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" },
+ { url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" },
+ { url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" },
+ { url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" },
+ { url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" },
+ { url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" },
+ { url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" },
+ { url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" },
+ { url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" },
+ { url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" },
+ { url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" },
+ { url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" },
+ { url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" },
+ { url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" },
+ { url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" },
+ { url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" },
+ { url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" },
+ { url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" },
+ { url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" },
+ { url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" },
+ { url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" },
+ { url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" },
+ { url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" },
+ { url = "https://files.pythonhosted.org/packages/79/b3/3c29819a27178d0e461a8571fb63c6ae38be6dc36b78b3ec2876bbd6a910/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b1cbfa133241d0e6bdab48dcdc2604e8ba81512f6bbd68ec3e8e1357dd3c316c", size = 307016, upload-time = "2026-02-02T12:37:42.755Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/ae/60993e4b07b1ac5ebe46da7aa99fdbb802eb986c38d26e3883ac0125c4e0/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:db367d8be9fad6e8ebbac4a7578b7af562e506211036cba2c06c3b998603c3d2", size = 305024, upload-time = "2026-02-02T12:37:44.774Z" },
+ { url = "https://files.pythonhosted.org/packages/77/fa/2227e590e9cf98803db2811f172b2d6460a21539ab73006f251c66f44b14/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45f6f8efb2f3b0603092401dc2df79fa89ccbc027aaba4174d2d4133ed661434", size = 339337, upload-time = "2026-02-02T12:37:46.668Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/92/015173281f7eb96c0ef580c997da8ef50870d4f7f4c9e03c845a1d62ae04/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:597245258e6ad085d064780abfb23a284d418d3e61c57362d9449c6c7317ee2d", size = 346395, upload-time = "2026-02-02T12:37:48.09Z" },
+ { url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" },
+ { url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" },
]
[[package]]
@@ -3205,7 +3191,7 @@ wheels = [
[[package]]
name = "litellm"
-version = "1.81.5"
+version = "1.81.6"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -3221,9 +3207,9 @@ dependencies = [
{ name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "tokenizers", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/38/f4/c109bc5504520baa7b96a910b619d1b1b5af6cb5c28053e53adfed83e3ab/litellm-1.81.5.tar.gz", hash = "sha256:599994651cbb64b8ee7cd3b4979275139afc6e426bdd4aa840a61121bb3b04c9", size = 13615436, upload-time = "2026-01-29T01:37:54.817Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/2e/f3/194a2dca6cb3eddb89f4bc2920cf5e27542256af907c23be13c61fe7e021/litellm-1.81.6.tar.gz", hash = "sha256:f02b503dfb7d66d1c939f82e4db21aeec1d6e2ed1fe3f5cd02aaec3f792bc4ae", size = 13878107, upload-time = "2026-02-01T04:02:27.36Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/74/0f/5312b944208efeec5dcbf8e0ed956f8f7c430b0c6458301d206380c90b56/litellm-1.81.5-py3-none-any.whl", hash = "sha256:206505c5a0c6503e465154b9c979772be3ede3f5bf746d15b37dca5ae54d239f", size = 11950016, upload-time = "2026-01-29T01:37:52.6Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/05/3516cc7386b220d388aa0bd833308c677e94eceb82b2756dd95e06f6a13f/litellm-1.81.6-py3-none-any.whl", hash = "sha256:573206ba194d49a1691370ba33f781671609ac77c35347f8a0411d852cf6341a", size = 12224343, upload-time = "2026-02-01T04:02:23.704Z" },
]
[package.optional-dependencies]
@@ -3265,11 +3251,11 @@ wheels = [
[[package]]
name = "litellm-proxy-extras"
-version = "0.4.27"
+version = "0.4.29"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/01/af/9fdc22e7e3dcaa44c0f206a3f12065286c32d7e453f87e14dac1e69cf49a/litellm_proxy_extras-0.4.27.tar.gz", hash = "sha256:81059120016cfc03c82aa9664424912bdcffad103f66a5f925fef6b26f2cc151", size = 23269, upload-time = "2026-01-24T22:03:26.97Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/42/c5/9c4325452b3b3fc144e942f0f0e6582374d588f3159a0706594e3422943c/litellm_proxy_extras-0.4.29.tar.gz", hash = "sha256:1a8266911e0546f1e17e6714ca20b72e9fef47c1683f9c16399cf2d1786437a0", size = 23561, upload-time = "2026-01-31T23:13:58.707Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/50/c8/508b5a277e5d56e71ef51c5fe8111c7ec045ffd98f126089af803171ccc6/litellm_proxy_extras-0.4.27-py3-none-any.whl", hash = "sha256:752c1faabc86ce3d2b1fa451495d34de82323798e37b9cb5c0fea93deae1c5c8", size = 50073, upload-time = "2026-01-24T22:03:25.757Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/d6/7393367fdf4b65d80ba0c32d517743a7aa8975a36b32cc70a0352b9514aa/litellm_proxy_extras-0.4.29-py3-none-any.whl", hash = "sha256:c36c1b69675c61acccc6b61dd610eb37daeb72c6fd819461cefb5b0cc7e0550f", size = 50734, upload-time = "2026-01-31T23:13:56.986Z" },
]
[[package]]
@@ -3393,7 +3379,7 @@ dependencies = [
{ name = "fonttools", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "kiwisolver", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pillow", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pyparsing", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -3498,7 +3484,7 @@ wheels = [
[[package]]
name = "mem0ai"
-version = "1.0.2"
+version = "1.0.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "openai", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -3509,9 +3495,9 @@ dependencies = [
{ name = "qdrant-client", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "sqlalchemy", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/4c/b3/57edb1253e7dc24d41e102722a585d6e08a96c6191a6a04e43112c01dc5d/mem0ai-1.0.2.tar.gz", hash = "sha256:533c370e8a4e817d47a583cb7fa4df55db59de8dd67be39f2b927e2ad19607d1", size = 182395, upload-time = "2026-01-13T07:40:00.666Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/ce/b6/9d3a747a5c1af2b4f73572a3d296bf5e99c99630a3f201b0ddbb14e811e6/mem0ai-1.0.3.tar.gz", hash = "sha256:8f7abe485a61653e3f2d3f8c222f531f8b52660b19d88820c56522103d9f31b5", size = 182698, upload-time = "2026-02-03T05:38:04.608Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/d7/82/59309070bd2d2ddccebd89d8ebb7a2155ce12531f0c36123d0a39eada544/mem0ai-1.0.2-py3-none-any.whl", hash = "sha256:3528523653bc57efa477d55e703dcedf8decc23868d4dbcc6d43a97f2315834a", size = 275428, upload-time = "2026-01-13T07:39:58.339Z" },
+ { url = "https://files.pythonhosted.org/packages/84/3e/b300ab9fa6efd36c78f1402684eab1483f282c4ca6e983920fceb9c0f4fb/mem0ai-1.0.3-py3-none-any.whl", hash = "sha256:f500c3decc12c2663b2ad829ac4edcd0c674f2bd9bf4abf7f5c0522aef3d3cf8", size = 275722, upload-time = "2026-02-03T05:38:03.126Z" },
]
[[package]]
@@ -3560,7 +3546,7 @@ version = "0.5.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/0e/4a/c27b42ed9b1c7d13d9ba8b6905dece787d6259152f2309338aed29b2447b/ml_dtypes-0.5.4.tar.gz", hash = "sha256:8ab06a50fb9bf9666dd0fe5dfb4676fa2b0ac0f31ecff72a6c3af8e22c063453", size = 692314, upload-time = "2025-11-17T22:32:31.031Z" }
wheels = [
@@ -3830,11 +3816,11 @@ wheels = [
[[package]]
name = "narwhals"
-version = "2.15.0"
+version = "2.16.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/47/6d/b57c64e5038a8cf071bce391bb11551657a74558877ac961e7fa905ece27/narwhals-2.15.0.tar.gz", hash = "sha256:a9585975b99d95084268445a1fdd881311fa26ef1caa18020d959d5b2ff9a965", size = 603479, upload-time = "2026-01-06T08:10:13.27Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/fc/6f/713be67779028d482c6e0f2dde5bc430021b2578a4808c1c9f6d7ad48257/narwhals-2.16.0.tar.gz", hash = "sha256:155bb45132b370941ba0396d123cf9ed192bf25f39c4cea726f2da422ca4e145", size = 618268, upload-time = "2026-02-02T10:31:00.545Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/3d/2e/cf2ffeb386ac3763526151163ad7da9f1b586aac96d2b4f7de1eaebf0c61/narwhals-2.15.0-py3-none-any.whl", hash = "sha256:cbfe21ca19d260d9fd67f995ec75c44592d1f106933b03ddd375df7ac841f9d6", size = 432856, upload-time = "2026-01-06T08:10:11.511Z" },
+ { url = "https://files.pythonhosted.org/packages/03/cc/7cb74758e6df95e0c4e1253f203b6dd7f348bf2f29cf89e9210a2416d535/narwhals-2.16.0-py3-none-any.whl", hash = "sha256:846f1fd7093ac69d63526e50732033e86c30ea0026a44d9b23991010c7d1485d", size = 443951, upload-time = "2026-02-02T10:30:58.635Z" },
]
[[package]]
@@ -3915,7 +3901,7 @@ wheels = [
[[package]]
name = "numpy"
-version = "2.4.1"
+version = "2.4.2"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version >= '3.14' and sys_platform == 'darwin'",
@@ -3931,79 +3917,79 @@ resolution-markers = [
"python_full_version == '3.12.*' and sys_platform == 'win32'",
"python_full_version == '3.11.*' and sys_platform == 'win32'",
]
-sdist = { url = "https://files.pythonhosted.org/packages/24/62/ae72ff66c0f1fd959925b4c11f8c2dea61f47f6acaea75a08512cdfe3fed/numpy-2.4.1.tar.gz", hash = "sha256:a1ceafc5042451a858231588a104093474c6a5c57dcc724841f5c888d237d690", size = 20721320, upload-time = "2026-01-10T06:44:59.619Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a5/34/2b1bc18424f3ad9af577f6ce23600319968a70575bd7db31ce66731bbef9/numpy-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0cce2a669e3c8ba02ee563c7835f92c153cf02edff1ae05e1823f1dde21b16a5", size = 16944563, upload-time = "2026-01-10T06:42:14.615Z" },
- { url = "https://files.pythonhosted.org/packages/2c/57/26e5f97d075aef3794045a6ca9eada6a4ed70eb9a40e7a4a93f9ac80d704/numpy-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:899d2c18024984814ac7e83f8f49d8e8180e2fbe1b2e252f2e7f1d06bea92425", size = 12645658, upload-time = "2026-01-10T06:42:17.298Z" },
- { url = "https://files.pythonhosted.org/packages/8e/ba/80fc0b1e3cb2fd5c6143f00f42eb67762aa043eaa05ca924ecc3222a7849/numpy-2.4.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:09aa8a87e45b55a1c2c205d42e2808849ece5c484b2aab11fecabec3841cafba", size = 5474132, upload-time = "2026-01-10T06:42:19.637Z" },
- { url = "https://files.pythonhosted.org/packages/40/ae/0a5b9a397f0e865ec171187c78d9b57e5588afc439a04ba9cab1ebb2c945/numpy-2.4.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:edee228f76ee2dab4579fad6f51f6a305de09d444280109e0f75df247ff21501", size = 6804159, upload-time = "2026-01-10T06:42:21.44Z" },
- { url = "https://files.pythonhosted.org/packages/86/9c/841c15e691c7085caa6fd162f063eff494099c8327aeccd509d1ab1e36ab/numpy-2.4.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a92f227dbcdc9e4c3e193add1a189a9909947d4f8504c576f4a732fd0b54240a", size = 14708058, upload-time = "2026-01-10T06:42:23.546Z" },
- { url = "https://files.pythonhosted.org/packages/5d/9d/7862db06743f489e6a502a3b93136d73aea27d97b2cf91504f70a27501d6/numpy-2.4.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:538bf4ec353709c765ff75ae616c34d3c3dca1a68312727e8f2676ea644f8509", size = 16651501, upload-time = "2026-01-10T06:42:25.909Z" },
- { url = "https://files.pythonhosted.org/packages/a6/9c/6fc34ebcbd4015c6e5f0c0ce38264010ce8a546cb6beacb457b84a75dfc8/numpy-2.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ac08c63cb7779b85e9d5318e6c3518b424bc1f364ac4cb2c6136f12e5ff2dccc", size = 16492627, upload-time = "2026-01-10T06:42:28.938Z" },
- { url = "https://files.pythonhosted.org/packages/aa/63/2494a8597502dacda439f61b3c0db4da59928150e62be0e99395c3ad23c5/numpy-2.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f9c360ecef085e5841c539a9a12b883dff005fbd7ce46722f5e9cef52634d82", size = 18585052, upload-time = "2026-01-10T06:42:31.312Z" },
- { url = "https://files.pythonhosted.org/packages/6a/93/098e1162ae7522fc9b618d6272b77404c4656c72432ecee3abc029aa3de0/numpy-2.4.1-cp311-cp311-win32.whl", hash = "sha256:0f118ce6b972080ba0758c6087c3617b5ba243d806268623dc34216d69099ba0", size = 6236575, upload-time = "2026-01-10T06:42:33.872Z" },
- { url = "https://files.pythonhosted.org/packages/8c/de/f5e79650d23d9e12f38a7bc6b03ea0835b9575494f8ec94c11c6e773b1b1/numpy-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:18e14c4d09d55eef39a6ab5b08406e84bc6869c1e34eef45564804f90b7e0574", size = 12604479, upload-time = "2026-01-10T06:42:35.778Z" },
- { url = "https://files.pythonhosted.org/packages/dd/65/e1097a7047cff12ce3369bd003811516b20ba1078dbdec135e1cd7c16c56/numpy-2.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:6461de5113088b399d655d45c3897fa188766415d0f568f175ab071c8873bd73", size = 10578325, upload-time = "2026-01-10T06:42:38.518Z" },
- { url = "https://files.pythonhosted.org/packages/78/7f/ec53e32bf10c813604edf07a3682616bd931d026fcde7b6d13195dfb684a/numpy-2.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d3703409aac693fa82c0aee023a1ae06a6e9d065dba10f5e8e80f642f1e9d0a2", size = 16656888, upload-time = "2026-01-10T06:42:40.913Z" },
- { url = "https://files.pythonhosted.org/packages/b8/e0/1f9585d7dae8f14864e948fd7fa86c6cb72dee2676ca2748e63b1c5acfe0/numpy-2.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7211b95ca365519d3596a1d8688a95874cc94219d417504d9ecb2df99fa7bfa8", size = 12373956, upload-time = "2026-01-10T06:42:43.091Z" },
- { url = "https://files.pythonhosted.org/packages/8e/43/9762e88909ff2326f5e7536fa8cb3c49fb03a7d92705f23e6e7f553d9cb3/numpy-2.4.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5adf01965456a664fc727ed69cc71848f28d063217c63e1a0e200a118d5eec9a", size = 5202567, upload-time = "2026-01-10T06:42:45.107Z" },
- { url = "https://files.pythonhosted.org/packages/4b/ee/34b7930eb61e79feb4478800a4b95b46566969d837546aa7c034c742ef98/numpy-2.4.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:26f0bcd9c79a00e339565b303badc74d3ea2bd6d52191eeca5f95936cad107d0", size = 6549459, upload-time = "2026-01-10T06:42:48.152Z" },
- { url = "https://files.pythonhosted.org/packages/79/e3/5f115fae982565771be994867c89bcd8d7208dbfe9469185497d70de5ddf/numpy-2.4.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0093e85df2960d7e4049664b26afc58b03236e967fb942354deef3208857a04c", size = 14404859, upload-time = "2026-01-10T06:42:49.947Z" },
- { url = "https://files.pythonhosted.org/packages/d9/7d/9c8a781c88933725445a859cac5d01b5871588a15969ee6aeb618ba99eee/numpy-2.4.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ad270f438cbdd402c364980317fb6b117d9ec5e226fff5b4148dd9aa9fc6e02", size = 16371419, upload-time = "2026-01-10T06:42:52.409Z" },
- { url = "https://files.pythonhosted.org/packages/a6/d2/8aa084818554543f17cf4162c42f162acbd3bb42688aefdba6628a859f77/numpy-2.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:297c72b1b98100c2e8f873d5d35fb551fce7040ade83d67dd51d38c8d42a2162", size = 16182131, upload-time = "2026-01-10T06:42:54.694Z" },
- { url = "https://files.pythonhosted.org/packages/60/db/0425216684297c58a8df35f3284ef56ec4a043e6d283f8a59c53562caf1b/numpy-2.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf6470d91d34bf669f61d515499859fa7a4c2f7c36434afb70e82df7217933f9", size = 18295342, upload-time = "2026-01-10T06:42:56.991Z" },
- { url = "https://files.pythonhosted.org/packages/31/4c/14cb9d86240bd8c386c881bafbe43f001284b7cce3bc01623ac9475da163/numpy-2.4.1-cp312-cp312-win32.whl", hash = "sha256:b6bcf39112e956594b3331316d90c90c90fb961e39696bda97b89462f5f3943f", size = 5959015, upload-time = "2026-01-10T06:42:59.631Z" },
- { url = "https://files.pythonhosted.org/packages/51/cf/52a703dbeb0c65807540d29699fef5fda073434ff61846a564d5c296420f/numpy-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:e1a27bb1b2dee45a2a53f5ca6ff2d1a7f135287883a1689e930d44d1ff296c87", size = 12310730, upload-time = "2026-01-10T06:43:01.627Z" },
- { url = "https://files.pythonhosted.org/packages/69/80/a828b2d0ade5e74a9fe0f4e0a17c30fdc26232ad2bc8c9f8b3197cf7cf18/numpy-2.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:0e6e8f9d9ecf95399982019c01223dc130542960a12edfa8edd1122dfa66a8a8", size = 10312166, upload-time = "2026-01-10T06:43:03.673Z" },
- { url = "https://files.pythonhosted.org/packages/04/68/732d4b7811c00775f3bd522a21e8dd5a23f77eb11acdeb663e4a4ebf0ef4/numpy-2.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d797454e37570cfd61143b73b8debd623c3c0952959adb817dd310a483d58a1b", size = 16652495, upload-time = "2026-01-10T06:43:06.283Z" },
- { url = "https://files.pythonhosted.org/packages/20/ca/857722353421a27f1465652b2c66813eeeccea9d76d5f7b74b99f298e60e/numpy-2.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82c55962006156aeef1629b953fd359064aa47e4d82cfc8e67f0918f7da3344f", size = 12368657, upload-time = "2026-01-10T06:43:09.094Z" },
- { url = "https://files.pythonhosted.org/packages/81/0d/2377c917513449cc6240031a79d30eb9a163d32a91e79e0da47c43f2c0c8/numpy-2.4.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:71abbea030f2cfc3092a0ff9f8c8fdefdc5e0bf7d9d9c99663538bb0ecdac0b9", size = 5197256, upload-time = "2026-01-10T06:43:13.634Z" },
- { url = "https://files.pythonhosted.org/packages/17/39/569452228de3f5de9064ac75137082c6214be1f5c532016549a7923ab4b5/numpy-2.4.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5b55aa56165b17aaf15520beb9cbd33c9039810e0d9643dd4379e44294c7303e", size = 6545212, upload-time = "2026-01-10T06:43:15.661Z" },
- { url = "https://files.pythonhosted.org/packages/8c/a4/77333f4d1e4dac4395385482557aeecf4826e6ff517e32ca48e1dafbe42a/numpy-2.4.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0faba4a331195bfa96f93dd9dfaa10b2c7aa8cda3a02b7fd635e588fe821bf5", size = 14402871, upload-time = "2026-01-10T06:43:17.324Z" },
- { url = "https://files.pythonhosted.org/packages/ba/87/d341e519956273b39d8d47969dd1eaa1af740615394fe67d06f1efa68773/numpy-2.4.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e3087f53e2b4428766b54932644d148613c5a595150533ae7f00dab2f319a8", size = 16359305, upload-time = "2026-01-10T06:43:19.376Z" },
- { url = "https://files.pythonhosted.org/packages/32/91/789132c6666288eaa20ae8066bb99eba1939362e8f1a534949a215246e97/numpy-2.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:49e792ec351315e16da54b543db06ca8a86985ab682602d90c60ef4ff4db2a9c", size = 16181909, upload-time = "2026-01-10T06:43:21.808Z" },
- { url = "https://files.pythonhosted.org/packages/cf/b8/090b8bd27b82a844bb22ff8fdf7935cb1980b48d6e439ae116f53cdc2143/numpy-2.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e9e06c4c2379db47f3f6fc7a8652e7498251789bf8ff5bd43bf478ef314ca2", size = 18284380, upload-time = "2026-01-10T06:43:23.957Z" },
- { url = "https://files.pythonhosted.org/packages/67/78/722b62bd31842ff029412271556a1a27a98f45359dea78b1548a3a9996aa/numpy-2.4.1-cp313-cp313-win32.whl", hash = "sha256:3d1a100e48cb266090a031397863ff8a30050ceefd798f686ff92c67a486753d", size = 5957089, upload-time = "2026-01-10T06:43:27.535Z" },
- { url = "https://files.pythonhosted.org/packages/da/a6/cf32198b0b6e18d4fbfa9a21a992a7fca535b9bb2b0cdd217d4a3445b5ca/numpy-2.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:92a0e65272fd60bfa0d9278e0484c2f52fe03b97aedc02b357f33fe752c52ffb", size = 12307230, upload-time = "2026-01-10T06:43:29.298Z" },
- { url = "https://files.pythonhosted.org/packages/44/6c/534d692bfb7d0afe30611320c5fb713659dcb5104d7cc182aff2aea092f5/numpy-2.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:20d4649c773f66cc2fc36f663e091f57c3b7655f936a4c681b4250855d1da8f5", size = 10313125, upload-time = "2026-01-10T06:43:31.782Z" },
- { url = "https://files.pythonhosted.org/packages/da/a1/354583ac5c4caa566de6ddfbc42744409b515039e085fab6e0ff942e0df5/numpy-2.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f93bc6892fe7b0663e5ffa83b61aab510aacffd58c16e012bb9352d489d90cb7", size = 12496156, upload-time = "2026-01-10T06:43:34.237Z" },
- { url = "https://files.pythonhosted.org/packages/51/b0/42807c6e8cce58c00127b1dc24d365305189991f2a7917aa694a109c8d7d/numpy-2.4.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:178de8f87948163d98a4c9ab5bee4ce6519ca918926ec8df195af582de28544d", size = 5324663, upload-time = "2026-01-10T06:43:36.211Z" },
- { url = "https://files.pythonhosted.org/packages/fe/55/7a621694010d92375ed82f312b2f28017694ed784775269115323e37f5e2/numpy-2.4.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:98b35775e03ab7f868908b524fc0a84d38932d8daf7b7e1c3c3a1b6c7a2c9f15", size = 6645224, upload-time = "2026-01-10T06:43:37.884Z" },
- { url = "https://files.pythonhosted.org/packages/50/96/9fa8635ed9d7c847d87e30c834f7109fac5e88549d79ef3324ab5c20919f/numpy-2.4.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:941c2a93313d030f219f3a71fd3d91a728b82979a5e8034eb2e60d394a2b83f9", size = 14462352, upload-time = "2026-01-10T06:43:39.479Z" },
- { url = "https://files.pythonhosted.org/packages/03/d1/8cf62d8bb2062da4fb82dd5d49e47c923f9c0738032f054e0a75342faba7/numpy-2.4.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:529050522e983e00a6c1c6b67411083630de8b57f65e853d7b03d9281b8694d2", size = 16407279, upload-time = "2026-01-10T06:43:41.93Z" },
- { url = "https://files.pythonhosted.org/packages/86/1c/95c86e17c6b0b31ce6ef219da00f71113b220bcb14938c8d9a05cee0ff53/numpy-2.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2302dc0224c1cbc49bb94f7064f3f923a971bfae45c33870dcbff63a2a550505", size = 16248316, upload-time = "2026-01-10T06:43:44.121Z" },
- { url = "https://files.pythonhosted.org/packages/30/b4/e7f5ff8697274c9d0fa82398b6a372a27e5cef069b37df6355ccb1f1db1a/numpy-2.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9171a42fcad32dcf3fa86f0a4faa5e9f8facefdb276f54b8b390d90447cff4e2", size = 18329884, upload-time = "2026-01-10T06:43:46.613Z" },
- { url = "https://files.pythonhosted.org/packages/37/a4/b073f3e9d77f9aec8debe8ca7f9f6a09e888ad1ba7488f0c3b36a94c03ac/numpy-2.4.1-cp313-cp313t-win32.whl", hash = "sha256:382ad67d99ef49024f11d1ce5dcb5ad8432446e4246a4b014418ba3a1175a1f4", size = 6081138, upload-time = "2026-01-10T06:43:48.854Z" },
- { url = "https://files.pythonhosted.org/packages/16/16/af42337b53844e67752a092481ab869c0523bc95c4e5c98e4dac4e9581ac/numpy-2.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:62fea415f83ad8fdb6c20840578e5fbaf5ddd65e0ec6c3c47eda0f69da172510", size = 12447478, upload-time = "2026-01-10T06:43:50.476Z" },
- { url = "https://files.pythonhosted.org/packages/6c/f8/fa85b2eac68ec631d0b631abc448552cb17d39afd17ec53dcbcc3537681a/numpy-2.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:a7870e8c5fc11aef57d6fea4b4085e537a3a60ad2cdd14322ed531fdca68d261", size = 10382981, upload-time = "2026-01-10T06:43:52.575Z" },
- { url = "https://files.pythonhosted.org/packages/1b/a7/ef08d25698e0e4b4efbad8d55251d20fe2a15f6d9aa7c9b30cd03c165e6f/numpy-2.4.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:3869ea1ee1a1edc16c29bbe3a2f2a4e515cc3a44d43903ad41e0cacdbaf733dc", size = 16652046, upload-time = "2026-01-10T06:43:54.797Z" },
- { url = "https://files.pythonhosted.org/packages/8f/39/e378b3e3ca13477e5ac70293ec027c438d1927f18637e396fe90b1addd72/numpy-2.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e867df947d427cdd7a60e3e271729090b0f0df80f5f10ab7dd436f40811699c3", size = 12378858, upload-time = "2026-01-10T06:43:57.099Z" },
- { url = "https://files.pythonhosted.org/packages/c3/74/7ec6154f0006910ed1fdbb7591cf4432307033102b8a22041599935f8969/numpy-2.4.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:e3bd2cb07841166420d2fa7146c96ce00cb3410664cbc1a6be028e456c4ee220", size = 5207417, upload-time = "2026-01-10T06:43:59.037Z" },
- { url = "https://files.pythonhosted.org/packages/f7/b7/053ac11820d84e42f8feea5cb81cc4fcd1091499b45b1ed8c7415b1bf831/numpy-2.4.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:f0a90aba7d521e6954670550e561a4cb925713bd944445dbe9e729b71f6cabee", size = 6542643, upload-time = "2026-01-10T06:44:01.852Z" },
- { url = "https://files.pythonhosted.org/packages/c0/c4/2e7908915c0e32ca636b92e4e4a3bdec4cb1e7eb0f8aedf1ed3c68a0d8cd/numpy-2.4.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d558123217a83b2d1ba316b986e9248a1ed1971ad495963d555ccd75dcb1556", size = 14418963, upload-time = "2026-01-10T06:44:04.047Z" },
- { url = "https://files.pythonhosted.org/packages/eb/c0/3ed5083d94e7ffd7c404e54619c088e11f2e1939a9544f5397f4adb1b8ba/numpy-2.4.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2f44de05659b67d20499cbc96d49f2650769afcb398b79b324bb6e297bfe3844", size = 16363811, upload-time = "2026-01-10T06:44:06.207Z" },
- { url = "https://files.pythonhosted.org/packages/0e/68/42b66f1852bf525050a67315a4fb94586ab7e9eaa541b1bef530fab0c5dd/numpy-2.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:69e7419c9012c4aaf695109564e3387f1259f001b4326dfa55907b098af082d3", size = 16197643, upload-time = "2026-01-10T06:44:08.33Z" },
- { url = "https://files.pythonhosted.org/packages/d2/40/e8714fc933d85f82c6bfc7b998a0649ad9769a32f3494ba86598aaf18a48/numpy-2.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd257026eb1b34352e749d7cc1678b5eeec3e329ad8c9965a797e08ccba205", size = 18289601, upload-time = "2026-01-10T06:44:10.841Z" },
- { url = "https://files.pythonhosted.org/packages/80/9a/0d44b468cad50315127e884802351723daca7cf1c98d102929468c81d439/numpy-2.4.1-cp314-cp314-win32.whl", hash = "sha256:727c6c3275ddefa0dc078524a85e064c057b4f4e71ca5ca29a19163c607be745", size = 6005722, upload-time = "2026-01-10T06:44:13.332Z" },
- { url = "https://files.pythonhosted.org/packages/7e/bb/c6513edcce5a831810e2dddc0d3452ce84d208af92405a0c2e58fd8e7881/numpy-2.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:7d5d7999df434a038d75a748275cd6c0094b0ecdb0837342b332a82defc4dc4d", size = 12438590, upload-time = "2026-01-10T06:44:15.006Z" },
- { url = "https://files.pythonhosted.org/packages/e9/da/a598d5cb260780cf4d255102deba35c1d072dc028c4547832f45dd3323a8/numpy-2.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:ce9ce141a505053b3c7bce3216071f3bf5c182b8b28930f14cd24d43932cd2df", size = 10596180, upload-time = "2026-01-10T06:44:17.386Z" },
- { url = "https://files.pythonhosted.org/packages/de/bc/ea3f2c96fcb382311827231f911723aeff596364eb6e1b6d1d91128aa29b/numpy-2.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:4e53170557d37ae404bf8d542ca5b7c629d6efa1117dac6a83e394142ea0a43f", size = 12498774, upload-time = "2026-01-10T06:44:19.467Z" },
- { url = "https://files.pythonhosted.org/packages/aa/ab/ef9d939fe4a812648c7a712610b2ca6140b0853c5efea361301006c02ae5/numpy-2.4.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:a73044b752f5d34d4232f25f18160a1cc418ea4507f5f11e299d8ac36875f8a0", size = 5327274, upload-time = "2026-01-10T06:44:23.189Z" },
- { url = "https://files.pythonhosted.org/packages/bd/31/d381368e2a95c3b08b8cf7faac6004849e960f4a042d920337f71cef0cae/numpy-2.4.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:fb1461c99de4d040666ca0444057b06541e5642f800b71c56e6ea92d6a853a0c", size = 6648306, upload-time = "2026-01-10T06:44:25.012Z" },
- { url = "https://files.pythonhosted.org/packages/c8/e5/0989b44ade47430be6323d05c23207636d67d7362a1796ccbccac6773dd2/numpy-2.4.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423797bdab2eeefbe608d7c1ec7b2b4fd3c58d51460f1ee26c7500a1d9c9ee93", size = 14464653, upload-time = "2026-01-10T06:44:26.706Z" },
- { url = "https://files.pythonhosted.org/packages/10/a7/cfbe475c35371cae1358e61f20c5f075badc18c4797ab4354140e1d283cf/numpy-2.4.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:52b5f61bdb323b566b528899cc7db2ba5d1015bda7ea811a8bcf3c89c331fa42", size = 16405144, upload-time = "2026-01-10T06:44:29.378Z" },
- { url = "https://files.pythonhosted.org/packages/f8/a3/0c63fe66b534888fa5177cc7cef061541064dbe2b4b60dcc60ffaf0d2157/numpy-2.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42d7dd5fa36d16d52a84f821eb96031836fd405ee6955dd732f2023724d0aa01", size = 16247425, upload-time = "2026-01-10T06:44:31.721Z" },
- { url = "https://files.pythonhosted.org/packages/6b/2b/55d980cfa2c93bd40ff4c290bf824d792bd41d2fe3487b07707559071760/numpy-2.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7b6b5e28bbd47b7532698e5db2fe1db693d84b58c254e4389d99a27bb9b8f6b", size = 18330053, upload-time = "2026-01-10T06:44:34.617Z" },
- { url = "https://files.pythonhosted.org/packages/23/12/8b5fc6b9c487a09a7957188e0943c9ff08432c65e34567cabc1623b03a51/numpy-2.4.1-cp314-cp314t-win32.whl", hash = "sha256:5de60946f14ebe15e713a6f22850c2372fa72f4ff9a432ab44aa90edcadaa65a", size = 6152482, upload-time = "2026-01-10T06:44:36.798Z" },
- { url = "https://files.pythonhosted.org/packages/00/a5/9f8ca5856b8940492fc24fbe13c1bc34d65ddf4079097cf9e53164d094e1/numpy-2.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:8f085da926c0d491ffff3096f91078cc97ea67e7e6b65e490bc8dcda65663be2", size = 12627117, upload-time = "2026-01-10T06:44:38.828Z" },
- { url = "https://files.pythonhosted.org/packages/ad/0d/eca3d962f9eef265f01a8e0d20085c6dd1f443cbffc11b6dede81fd82356/numpy-2.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:6436cffb4f2bf26c974344439439c95e152c9a527013f26b3577be6c2ca64295", size = 10667121, upload-time = "2026-01-10T06:44:41.644Z" },
- { url = "https://files.pythonhosted.org/packages/1e/48/d86f97919e79314a1cdee4c832178763e6e98e623e123d0bada19e92c15a/numpy-2.4.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8ad35f20be147a204e28b6a0575fbf3540c5e5f802634d4258d55b1ff5facce1", size = 16822202, upload-time = "2026-01-10T06:44:43.738Z" },
- { url = "https://files.pythonhosted.org/packages/51/e9/1e62a7f77e0f37dcfb0ad6a9744e65df00242b6ea37dfafb55debcbf5b55/numpy-2.4.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8097529164c0f3e32bb89412a0905d9100bf434d9692d9fc275e18dcf53c9344", size = 12569985, upload-time = "2026-01-10T06:44:45.945Z" },
- { url = "https://files.pythonhosted.org/packages/c7/7e/914d54f0c801342306fdcdce3e994a56476f1b818c46c47fc21ae968088c/numpy-2.4.1-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:ea66d2b41ca4a1630aae5507ee0a71647d3124d1741980138aa8f28f44dac36e", size = 5398484, upload-time = "2026-01-10T06:44:48.012Z" },
- { url = "https://files.pythonhosted.org/packages/1c/d8/9570b68584e293a33474e7b5a77ca404f1dcc655e40050a600dee81d27fb/numpy-2.4.1-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d3f8f0df9f4b8be57b3bf74a1d087fec68f927a2fab68231fdb442bf2c12e426", size = 6713216, upload-time = "2026-01-10T06:44:49.725Z" },
- { url = "https://files.pythonhosted.org/packages/33/9b/9dd6e2db8d49eb24f86acaaa5258e5f4c8ed38209a4ee9de2d1a0ca25045/numpy-2.4.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2023ef86243690c2791fd6353e5b4848eedaa88ca8a2d129f462049f6d484696", size = 14538937, upload-time = "2026-01-10T06:44:51.498Z" },
- { url = "https://files.pythonhosted.org/packages/53/87/d5bd995b0f798a37105b876350d346eea5838bd8f77ea3d7a48392f3812b/numpy-2.4.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8361ea4220d763e54cff2fbe7d8c93526b744f7cd9ddab47afeff7e14e8503be", size = 16479830, upload-time = "2026-01-10T06:44:53.931Z" },
- { url = "https://files.pythonhosted.org/packages/5b/c7/b801bf98514b6ae6475e941ac05c58e6411dd863ea92916bfd6d510b08c1/numpy-2.4.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4f1b68ff47680c2925f8063402a693ede215f0257f02596b1318ecdfb1d79e33", size = 12492579, upload-time = "2026-01-10T06:44:57.094Z" },
+sdist = { url = "https://files.pythonhosted.org/packages/57/fd/0005efbd0af48e55eb3c7208af93f2862d4b1a56cd78e84309a2d959208d/numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae", size = 20723651, upload-time = "2026-01-31T23:13:10.135Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d3/44/71852273146957899753e69986246d6a176061ea183407e95418c2aa4d9a/numpy-2.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7e88598032542bd49af7c4747541422884219056c268823ef6e5e89851c8825", size = 16955478, upload-time = "2026-01-31T23:10:25.623Z" },
+ { url = "https://files.pythonhosted.org/packages/74/41/5d17d4058bd0cd96bcbd4d9ff0fb2e21f52702aab9a72e4a594efa18692f/numpy-2.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7edc794af8b36ca37ef5fcb5e0d128c7e0595c7b96a2318d1badb6fcd8ee86b1", size = 14965467, upload-time = "2026-01-31T23:10:28.186Z" },
+ { url = "https://files.pythonhosted.org/packages/49/48/fb1ce8136c19452ed15f033f8aee91d5defe515094e330ce368a0647846f/numpy-2.4.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:6e9f61981ace1360e42737e2bae58b27bf28a1b27e781721047d84bd754d32e7", size = 5475172, upload-time = "2026-01-31T23:10:30.848Z" },
+ { url = "https://files.pythonhosted.org/packages/40/a9/3feb49f17bbd1300dd2570432961f5c8a4ffeff1db6f02c7273bd020a4c9/numpy-2.4.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cb7bbb88aa74908950d979eeaa24dbdf1a865e3c7e45ff0121d8f70387b55f73", size = 6805145, upload-time = "2026-01-31T23:10:32.352Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/39/fdf35cbd6d6e2fcad42fcf85ac04a85a0d0fbfbf34b30721c98d602fd70a/numpy-2.4.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f069069931240b3fc703f1e23df63443dbd6390614c8c44a87d96cd0ec81eb1", size = 15966084, upload-time = "2026-01-31T23:10:34.502Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/46/6fa4ea94f1ddf969b2ee941290cca6f1bfac92b53c76ae5f44afe17ceb69/numpy-2.4.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c02ef4401a506fb60b411467ad501e1429a3487abca4664871d9ae0b46c8ba32", size = 16899477, upload-time = "2026-01-31T23:10:37.075Z" },
+ { url = "https://files.pythonhosted.org/packages/09/a1/2a424e162b1a14a5bd860a464ab4e07513916a64ab1683fae262f735ccd2/numpy-2.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2653de5c24910e49c2b106499803124dde62a5a1fe0eedeaecf4309a5f639390", size = 17323429, upload-time = "2026-01-31T23:10:39.704Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/a2/73014149ff250628df72c58204822ac01d768697913881aacf839ff78680/numpy-2.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1ae241bbfc6ae276f94a170b14785e561cb5e7f626b6688cf076af4110887413", size = 18635109, upload-time = "2026-01-31T23:10:41.924Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/0c/73e8be2f1accd56df74abc1c5e18527822067dced5ec0861b5bb882c2ce0/numpy-2.4.2-cp311-cp311-win32.whl", hash = "sha256:df1b10187212b198dd45fa943d8985a3c8cf854aed4923796e0e019e113a1bda", size = 6237915, upload-time = "2026-01-31T23:10:45.26Z" },
+ { url = "https://files.pythonhosted.org/packages/76/ae/e0265e0163cf127c24c3969d29f1c4c64551a1e375d95a13d32eab25d364/numpy-2.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:b9c618d56a29c9cb1c4da979e9899be7578d2e0b3c24d52079c166324c9e8695", size = 12607972, upload-time = "2026-01-31T23:10:47.021Z" },
+ { url = "https://files.pythonhosted.org/packages/29/a5/c43029af9b8014d6ea157f192652c50042e8911f4300f8f6ed3336bf437f/numpy-2.4.2-cp311-cp311-win_arm64.whl", hash = "sha256:47c5a6ed21d9452b10227e5e8a0e1c22979811cad7dcc19d8e3e2fb8fa03f1a3", size = 10485763, upload-time = "2026-01-31T23:10:50.087Z" },
+ { url = "https://files.pythonhosted.org/packages/51/6e/6f394c9c77668153e14d4da83bcc247beb5952f6ead7699a1a2992613bea/numpy-2.4.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:21982668592194c609de53ba4933a7471880ccbaadcc52352694a59ecc860b3a", size = 16667963, upload-time = "2026-01-31T23:10:52.147Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/f8/55483431f2b2fd015ae6ed4fe62288823ce908437ed49db5a03d15151678/numpy-2.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40397bda92382fcec844066efb11f13e1c9a3e2a8e8f318fb72ed8b6db9f60f1", size = 14693571, upload-time = "2026-01-31T23:10:54.789Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/20/18026832b1845cdc82248208dd929ca14c9d8f2bac391f67440707fff27c/numpy-2.4.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b3a24467af63c67829bfaa61eecf18d5432d4f11992688537be59ecd6ad32f5e", size = 5203469, upload-time = "2026-01-31T23:10:57.343Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/33/2eb97c8a77daaba34eaa3fa7241a14ac5f51c46a6bd5911361b644c4a1e2/numpy-2.4.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:805cc8de9fd6e7a22da5aed858e0ab16be5a4db6c873dde1d7451c541553aa27", size = 6550820, upload-time = "2026-01-31T23:10:59.429Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/91/b97fdfd12dc75b02c44e26c6638241cc004d4079a0321a69c62f51470c4c/numpy-2.4.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d82351358ffbcdcd7b686b90742a9b86632d6c1c051016484fa0b326a0a1548", size = 15663067, upload-time = "2026-01-31T23:11:01.291Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/c6/a18e59f3f0b8071cc85cbc8d80cd02d68aa9710170b2553a117203d46936/numpy-2.4.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e35d3e0144137d9fdae62912e869136164534d64a169f86438bc9561b6ad49f", size = 16619782, upload-time = "2026-01-31T23:11:03.669Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/83/9751502164601a79e18847309f5ceec0b1446d7b6aa12305759b72cf98b2/numpy-2.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adb6ed2ad29b9e15321d167d152ee909ec73395901b70936f029c3bc6d7f4460", size = 17013128, upload-time = "2026-01-31T23:11:05.913Z" },
+ { url = "https://files.pythonhosted.org/packages/61/c4/c4066322256ec740acc1c8923a10047818691d2f8aec254798f3dd90f5f2/numpy-2.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8906e71fd8afcb76580404e2a950caef2685df3d2a57fe82a86ac8d33cc007ba", size = 18345324, upload-time = "2026-01-31T23:11:08.248Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/af/6157aa6da728fa4525a755bfad486ae7e3f76d4c1864138003eb84328497/numpy-2.4.2-cp312-cp312-win32.whl", hash = "sha256:ec055f6dae239a6299cace477b479cca2fc125c5675482daf1dd886933a1076f", size = 5960282, upload-time = "2026-01-31T23:11:10.497Z" },
+ { url = "https://files.pythonhosted.org/packages/92/0f/7ceaaeaacb40567071e94dbf2c9480c0ae453d5bb4f52bea3892c39dc83c/numpy-2.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:209fae046e62d0ce6435fcfe3b1a10537e858249b3d9b05829e2a05218296a85", size = 12314210, upload-time = "2026-01-31T23:11:12.176Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/a3/56c5c604fae6dd40fa2ed3040d005fca97e91bd320d232ac9931d77ba13c/numpy-2.4.2-cp312-cp312-win_arm64.whl", hash = "sha256:fbde1b0c6e81d56f5dccd95dd4a711d9b95df1ae4009a60887e56b27e8d903fa", size = 10220171, upload-time = "2026-01-31T23:11:14.684Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/22/815b9fe25d1d7ae7d492152adbc7226d3eff731dffc38fe970589fcaaa38/numpy-2.4.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25f2059807faea4b077a2b6837391b5d830864b3543627f381821c646f31a63c", size = 16663696, upload-time = "2026-01-31T23:11:17.516Z" },
+ { url = "https://files.pythonhosted.org/packages/09/f0/817d03a03f93ba9c6c8993de509277d84e69f9453601915e4a69554102a1/numpy-2.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bd3a7a9f5847d2fb8c2c6d1c862fa109c31a9abeca1a3c2bd5a64572955b2979", size = 14688322, upload-time = "2026-01-31T23:11:19.883Z" },
+ { url = "https://files.pythonhosted.org/packages/da/b4/f805ab79293c728b9a99438775ce51885fd4f31b76178767cfc718701a39/numpy-2.4.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8e4549f8a3c6d13d55041925e912bfd834285ef1dd64d6bc7d542583355e2e98", size = 5198157, upload-time = "2026-01-31T23:11:22.375Z" },
+ { url = "https://files.pythonhosted.org/packages/74/09/826e4289844eccdcd64aac27d13b0fd3f32039915dd5b9ba01baae1f436c/numpy-2.4.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:aea4f66ff44dfddf8c2cffd66ba6538c5ec67d389285292fe428cb2c738c8aef", size = 6546330, upload-time = "2026-01-31T23:11:23.958Z" },
+ { url = "https://files.pythonhosted.org/packages/19/fb/cbfdbfa3057a10aea5422c558ac57538e6acc87ec1669e666d32ac198da7/numpy-2.4.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3cd545784805de05aafe1dde61752ea49a359ccba9760c1e5d1c88a93bbf2b7", size = 15660968, upload-time = "2026-01-31T23:11:25.713Z" },
+ { url = "https://files.pythonhosted.org/packages/04/dc/46066ce18d01645541f0186877377b9371b8fa8017fa8262002b4ef22612/numpy-2.4.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0d9b7c93578baafcbc5f0b83eaf17b79d345c6f36917ba0c67f45226911d499", size = 16607311, upload-time = "2026-01-31T23:11:28.117Z" },
+ { url = "https://files.pythonhosted.org/packages/14/d9/4b5adfc39a43fa6bf918c6d544bc60c05236cc2f6339847fc5b35e6cb5b0/numpy-2.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f74f0f7779cc7ae07d1810aab8ac6b1464c3eafb9e283a40da7309d5e6e48fbb", size = 17012850, upload-time = "2026-01-31T23:11:30.888Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/20/adb6e6adde6d0130046e6fdfb7675cc62bc2f6b7b02239a09eb58435753d/numpy-2.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7ac672d699bf36275c035e16b65539931347d68b70667d28984c9fb34e07fa7", size = 18334210, upload-time = "2026-01-31T23:11:33.214Z" },
+ { url = "https://files.pythonhosted.org/packages/78/0e/0a73b3dff26803a8c02baa76398015ea2a5434d9b8265a7898a6028c1591/numpy-2.4.2-cp313-cp313-win32.whl", hash = "sha256:8e9afaeb0beff068b4d9cd20d322ba0ee1cecfb0b08db145e4ab4dd44a6b5110", size = 5958199, upload-time = "2026-01-31T23:11:35.385Z" },
+ { url = "https://files.pythonhosted.org/packages/43/bc/6352f343522fcb2c04dbaf94cb30cca6fd32c1a750c06ad6231b4293708c/numpy-2.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:7df2de1e4fba69a51c06c28f5a3de36731eb9639feb8e1cf7e4a7b0daf4cf622", size = 12310848, upload-time = "2026-01-31T23:11:38.001Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/8d/6da186483e308da5da1cc6918ce913dcfe14ffde98e710bfeff2a6158d4e/numpy-2.4.2-cp313-cp313-win_arm64.whl", hash = "sha256:0fece1d1f0a89c16b03442eae5c56dc0be0c7883b5d388e0c03f53019a4bfd71", size = 10221082, upload-time = "2026-01-31T23:11:40.392Z" },
+ { url = "https://files.pythonhosted.org/packages/25/a1/9510aa43555b44781968935c7548a8926274f815de42ad3997e9e83680dd/numpy-2.4.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5633c0da313330fd20c484c78cdd3f9b175b55e1a766c4a174230c6b70ad8262", size = 14815866, upload-time = "2026-01-31T23:11:42.495Z" },
+ { url = "https://files.pythonhosted.org/packages/36/30/6bbb5e76631a5ae46e7923dd16ca9d3f1c93cfa8d4ed79a129814a9d8db3/numpy-2.4.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d9f64d786b3b1dd742c946c42d15b07497ed14af1a1f3ce840cce27daa0ce913", size = 5325631, upload-time = "2026-01-31T23:11:44.7Z" },
+ { url = "https://files.pythonhosted.org/packages/46/00/3a490938800c1923b567b3a15cd17896e68052e2145d8662aaf3e1ffc58f/numpy-2.4.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:b21041e8cb6a1eb5312dd1d2f80a94d91efffb7a06b70597d44f1bd2dfc315ab", size = 6646254, upload-time = "2026-01-31T23:11:46.341Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/e9/fac0890149898a9b609caa5af7455a948b544746e4b8fe7c212c8edd71f8/numpy-2.4.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00ab83c56211a1d7c07c25e3217ea6695e50a3e2f255053686b081dc0b091a82", size = 15720138, upload-time = "2026-01-31T23:11:48.082Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/5c/08887c54e68e1e28df53709f1893ce92932cc6f01f7c3d4dc952f61ffd4e/numpy-2.4.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fb882da679409066b4603579619341c6d6898fc83a8995199d5249f986e8e8f", size = 16655398, upload-time = "2026-01-31T23:11:50.293Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/89/253db0fa0e66e9129c745e4ef25631dc37d5f1314dad2b53e907b8538e6d/numpy-2.4.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66cb9422236317f9d44b67b4d18f44efe6e9c7f8794ac0462978513359461554", size = 17079064, upload-time = "2026-01-31T23:11:52.927Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/d5/cbade46ce97c59c6c3da525e8d95b7abe8a42974a1dc5c1d489c10433e88/numpy-2.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0f01dcf33e73d80bd8dc0f20a71303abbafa26a19e23f6b68d1aa9990af90257", size = 18379680, upload-time = "2026-01-31T23:11:55.22Z" },
+ { url = "https://files.pythonhosted.org/packages/40/62/48f99ae172a4b63d981babe683685030e8a3df4f246c893ea5c6ef99f018/numpy-2.4.2-cp313-cp313t-win32.whl", hash = "sha256:52b913ec40ff7ae845687b0b34d8d93b60cb66dcee06996dd5c99f2fc9328657", size = 6082433, upload-time = "2026-01-31T23:11:58.096Z" },
+ { url = "https://files.pythonhosted.org/packages/07/38/e054a61cfe48ad9f1ed0d188e78b7e26859d0b60ef21cd9de4897cdb5326/numpy-2.4.2-cp313-cp313t-win_amd64.whl", hash = "sha256:5eea80d908b2c1f91486eb95b3fb6fab187e569ec9752ab7d9333d2e66bf2d6b", size = 12451181, upload-time = "2026-01-31T23:11:59.782Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/a4/a05c3a6418575e185dd84d0b9680b6bb2e2dc3e4202f036b7b4e22d6e9dc/numpy-2.4.2-cp313-cp313t-win_arm64.whl", hash = "sha256:fd49860271d52127d61197bb50b64f58454e9f578cb4b2c001a6de8b1f50b0b1", size = 10290756, upload-time = "2026-01-31T23:12:02.438Z" },
+ { url = "https://files.pythonhosted.org/packages/18/88/b7df6050bf18fdcfb7046286c6535cabbdd2064a3440fca3f069d319c16e/numpy-2.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b", size = 16663092, upload-time = "2026-01-31T23:12:04.521Z" },
+ { url = "https://files.pythonhosted.org/packages/25/7a/1fee4329abc705a469a4afe6e69b1ef7e915117747886327104a8493a955/numpy-2.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000", size = 14698770, upload-time = "2026-01-31T23:12:06.96Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/0b/f9e49ba6c923678ad5bc38181c08ac5e53b7a5754dbca8e581aa1a56b1ff/numpy-2.4.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1", size = 5208562, upload-time = "2026-01-31T23:12:09.632Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/12/d7de8f6f53f9bb76997e5e4c069eda2051e3fe134e9181671c4391677bb2/numpy-2.4.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74", size = 6543710, upload-time = "2026-01-31T23:12:11.969Z" },
+ { url = "https://files.pythonhosted.org/packages/09/63/c66418c2e0268a31a4cf8a8b512685748200f8e8e8ec6c507ce14e773529/numpy-2.4.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a", size = 15677205, upload-time = "2026-01-31T23:12:14.33Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/6c/7f237821c9642fb2a04d2f1e88b4295677144ca93285fd76eff3bcba858d/numpy-2.4.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325", size = 16611738, upload-time = "2026-01-31T23:12:16.525Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/a7/39c4cdda9f019b609b5c473899d87abff092fc908cfe4d1ecb2fcff453b0/numpy-2.4.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909", size = 17028888, upload-time = "2026-01-31T23:12:19.306Z" },
+ { url = "https://files.pythonhosted.org/packages/da/b3/e84bb64bdfea967cc10950d71090ec2d84b49bc691df0025dddb7c26e8e3/numpy-2.4.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a", size = 18339556, upload-time = "2026-01-31T23:12:21.816Z" },
+ { url = "https://files.pythonhosted.org/packages/88/f5/954a291bc1192a27081706862ac62bb5920fbecfbaa302f64682aa90beed/numpy-2.4.2-cp314-cp314-win32.whl", hash = "sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a", size = 6006899, upload-time = "2026-01-31T23:12:24.14Z" },
+ { url = "https://files.pythonhosted.org/packages/05/cb/eff72a91b2efdd1bc98b3b8759f6a1654aa87612fc86e3d87d6fe4f948c4/numpy-2.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75", size = 12443072, upload-time = "2026-01-31T23:12:26.33Z" },
+ { url = "https://files.pythonhosted.org/packages/37/75/62726948db36a56428fce4ba80a115716dc4fad6a3a4352487f8bb950966/numpy-2.4.2-cp314-cp314-win_arm64.whl", hash = "sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05", size = 10494886, upload-time = "2026-01-31T23:12:28.488Z" },
+ { url = "https://files.pythonhosted.org/packages/36/2f/ee93744f1e0661dc267e4b21940870cabfae187c092e1433b77b09b50ac4/numpy-2.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308", size = 14818567, upload-time = "2026-01-31T23:12:30.709Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/24/6535212add7d76ff938d8bdc654f53f88d35cddedf807a599e180dcb8e66/numpy-2.4.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef", size = 5328372, upload-time = "2026-01-31T23:12:32.962Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/9d/c48f0a035725f925634bf6b8994253b43f2047f6778a54147d7e213bc5a7/numpy-2.4.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d", size = 6649306, upload-time = "2026-01-31T23:12:34.797Z" },
+ { url = "https://files.pythonhosted.org/packages/81/05/7c73a9574cd4a53a25907bad38b59ac83919c0ddc8234ec157f344d57d9a/numpy-2.4.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8", size = 15722394, upload-time = "2026-01-31T23:12:36.565Z" },
+ { url = "https://files.pythonhosted.org/packages/35/fa/4de10089f21fc7d18442c4a767ab156b25c2a6eaf187c0db6d9ecdaeb43f/numpy-2.4.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5", size = 16653343, upload-time = "2026-01-31T23:12:39.188Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/f9/d33e4ffc857f3763a57aa85650f2e82486832d7492280ac21ba9efda80da/numpy-2.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e", size = 17078045, upload-time = "2026-01-31T23:12:42.041Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/b8/54bdb43b6225badbea6389fa038c4ef868c44f5890f95dd530a218706da3/numpy-2.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a", size = 18380024, upload-time = "2026-01-31T23:12:44.331Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/55/6e1a61ded7af8df04016d81b5b02daa59f2ea9252ee0397cb9f631efe9e5/numpy-2.4.2-cp314-cp314t-win32.whl", hash = "sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443", size = 6153937, upload-time = "2026-01-31T23:12:47.229Z" },
+ { url = "https://files.pythonhosted.org/packages/45/aa/fa6118d1ed6d776b0983f3ceac9b1a5558e80df9365b1c3aa6d42bf9eee4/numpy-2.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236", size = 12631844, upload-time = "2026-01-31T23:12:48.997Z" },
+ { url = "https://files.pythonhosted.org/packages/32/0a/2ec5deea6dcd158f254a7b372fb09cfba5719419c8d66343bab35237b3fb/numpy-2.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181", size = 10565379, upload-time = "2026-01-31T23:12:51.345Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/f8/50e14d36d915ef64d8f8bc4a087fc8264d82c785eda6711f80ab7e620335/numpy-2.4.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:89f7268c009bc492f506abd6f5265defa7cb3f7487dc21d357c3d290add45082", size = 16833179, upload-time = "2026-01-31T23:12:53.5Z" },
+ { url = "https://files.pythonhosted.org/packages/17/17/809b5cad63812058a8189e91a1e2d55a5a18fd04611dbad244e8aeae465c/numpy-2.4.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6dee3bb76aa4009d5a912180bf5b2de012532998d094acee25d9cb8dee3e44a", size = 14889755, upload-time = "2026-01-31T23:12:55.933Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/ea/181b9bcf7627fc8371720316c24db888dcb9829b1c0270abf3d288b2e29b/numpy-2.4.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:cd2bd2bbed13e213d6b55dc1d035a4f91748a7d3edc9480c13898b0353708920", size = 5399500, upload-time = "2026-01-31T23:12:58.671Z" },
+ { url = "https://files.pythonhosted.org/packages/33/9f/413adf3fc955541ff5536b78fcf0754680b3c6d95103230252a2c9408d23/numpy-2.4.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:cf28c0c1d4c4bf00f509fa7eb02c58d7caf221b50b467bcb0d9bbf1584d5c821", size = 6714252, upload-time = "2026-01-31T23:13:00.518Z" },
+ { url = "https://files.pythonhosted.org/packages/91/da/643aad274e29ccbdf42ecd94dafe524b81c87bcb56b83872d54827f10543/numpy-2.4.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e04ae107ac591763a47398bb45b568fc38f02dbc4aa44c063f67a131f99346cb", size = 15797142, upload-time = "2026-01-31T23:13:02.219Z" },
+ { url = "https://files.pythonhosted.org/packages/66/27/965b8525e9cb5dc16481b30a1b3c21e50c7ebf6e9dbd48d0c4d0d5089c7e/numpy-2.4.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:602f65afdef699cda27ec0b9224ae5dc43e328f4c24c689deaf77133dbee74d0", size = 16727979, upload-time = "2026-01-31T23:13:04.62Z" },
+ { url = "https://files.pythonhosted.org/packages/de/e5/b7d20451657664b07986c2f6e3be564433f5dcaf3482d68eaecd79afaf03/numpy-2.4.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be71bf1edb48ebbbf7f6337b5bfd2f895d1902f6335a5830b20141fc126ffba0", size = 12502577, upload-time = "2026-01-31T23:13:07.08Z" },
]
[[package]]
@@ -4251,83 +4237,83 @@ wheels = [
[[package]]
name = "orjson"
-version = "3.11.6"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/70/a3/4e09c61a5f0c521cba0bb433639610ae037437669f1a4cbc93799e731d78/orjson-3.11.6.tar.gz", hash = "sha256:0a54c72259f35299fd033042367df781c2f66d10252955ca1efb7db309b954cb", size = 6175856, upload-time = "2026-01-29T15:13:07.942Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/30/3c/098ed0e49c565fdf1ccc6a75b190115d1ca74148bf5b6ab036554a550650/orjson-3.11.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a613fc37e007143d5b6286dccb1394cd114b07832417006a02b620ddd8279e37", size = 250411, upload-time = "2026-01-29T15:11:17.941Z" },
- { url = "https://files.pythonhosted.org/packages/15/7c/cb11a360fd228ceebade03b1e8e9e138dd4b1b3b11602b72dbdad915aded/orjson-3.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46ebee78f709d3ba7a65384cfe285bb0763157c6d2f836e7bde2f12d33a867a2", size = 138147, upload-time = "2026-01-29T15:11:19.659Z" },
- { url = "https://files.pythonhosted.org/packages/4e/4b/e57b5c45ffe69fbef7cbd56e9f40e2dc0d5de920caafefcc6981d1a7efc5/orjson-3.11.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a726fa86d2368cd57990f2bd95ef5495a6e613b08fc9585dfe121ec758fb08d1", size = 135110, upload-time = "2026-01-29T15:11:21.231Z" },
- { url = "https://files.pythonhosted.org/packages/b0/6e/4f21c6256f8cee3c0c69926cf7ac821cfc36f218512eedea2e2dc4a490c8/orjson-3.11.6-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:150f12e59d6864197770c78126e1a6e07a3da73d1728731bf3bc1e8b96ffdbe6", size = 140995, upload-time = "2026-01-29T15:11:22.902Z" },
- { url = "https://files.pythonhosted.org/packages/d0/78/92c36205ba2f6094ba1eea60c8e646885072abe64f155196833988c14b74/orjson-3.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a2d9746a5b5ce20c0908ada451eb56da4ffa01552a50789a0354d8636a02953", size = 144435, upload-time = "2026-01-29T15:11:24.124Z" },
- { url = "https://files.pythonhosted.org/packages/4d/52/1b518d164005811eb3fea92650e76e7d9deadb0b41e92c483373b1e82863/orjson-3.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd177f5dd91666d31e9019f1b06d2fcdf8a409a1637ddcb5915085dede85680", size = 142734, upload-time = "2026-01-29T15:11:25.708Z" },
- { url = "https://files.pythonhosted.org/packages/4b/11/60ea7885a2b7c1bf60ed8b5982356078a73785bd3bab392041a5bcf8de7c/orjson-3.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d777ec41a327bd3b7de97ba7bce12cc1007815ca398e4e4de9ec56c022c090b", size = 145802, upload-time = "2026-01-29T15:11:26.917Z" },
- { url = "https://files.pythonhosted.org/packages/41/7f/15a927e7958fd4f7560fb6dbb9346bee44a168e40168093c46020d866098/orjson-3.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f3a135f83185c87c13ff231fcb7dbb2fa4332a376444bd65135b50ff4cc5265c", size = 147504, upload-time = "2026-01-29T15:11:28.07Z" },
- { url = "https://files.pythonhosted.org/packages/66/1f/cabb9132a533f4f913e29294d0a1ca818b1a9a52e990526fe3f7ddd75f1c/orjson-3.11.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:2a8eeed7d4544cf391a142b0dd06029dac588e96cc692d9ab1c3f05b1e57c7f6", size = 421408, upload-time = "2026-01-29T15:11:29.314Z" },
- { url = "https://files.pythonhosted.org/packages/4c/b9/09bda9257a982e300313e4a9fc9b9c3aaff424d07bcf765bf045e4e3ed03/orjson-3.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9d576865a21e5cc6695be8fb78afc812079fd361ce6a027a7d41561b61b33a90", size = 155801, upload-time = "2026-01-29T15:11:30.575Z" },
- { url = "https://files.pythonhosted.org/packages/98/19/4e40ea3e5f4c6a8d51f31fd2382351ee7b396fecca915b17cd1af588175b/orjson-3.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:925e2df51f60aa50f8797830f2adfc05330425803f4105875bb511ced98b7f89", size = 147647, upload-time = "2026-01-29T15:11:31.856Z" },
- { url = "https://files.pythonhosted.org/packages/5a/73/ef4bd7dd15042cf33a402d16b87b9e969e71edb452b63b6e2b05025d1f7d/orjson-3.11.6-cp310-cp310-win32.whl", hash = "sha256:09dded2de64e77ac0b312ad59f35023548fb87393a57447e1bb36a26c181a90f", size = 139770, upload-time = "2026-01-29T15:11:33.031Z" },
- { url = "https://files.pythonhosted.org/packages/b4/ac/daab6e10467f7fffd7081ba587b492505b49313130ff5446a6fe28bf076e/orjson-3.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:3a63b5e7841ca8635214c6be7c0bf0246aa8c5cd4ef0c419b14362d0b2fb13de", size = 136783, upload-time = "2026-01-29T15:11:34.686Z" },
- { url = "https://files.pythonhosted.org/packages/f3/fd/d6b0a36854179b93ed77839f107c4089d91cccc9f9ba1b752b6e3bac5f34/orjson-3.11.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e259e85a81d76d9665f03d6129e09e4435531870de5961ddcd0bf6e3a7fde7d7", size = 250029, upload-time = "2026-01-29T15:11:35.942Z" },
- { url = "https://files.pythonhosted.org/packages/a3/bb/22902619826641cf3b627c24aab62e2ad6b571bdd1d34733abb0dd57f67a/orjson-3.11.6-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:52263949f41b4a4822c6b1353bcc5ee2f7109d53a3b493501d3369d6d0e7937a", size = 134518, upload-time = "2026-01-29T15:11:37.347Z" },
- { url = "https://files.pythonhosted.org/packages/72/90/7a818da4bba1de711a9653c420749c0ac95ef8f8651cbc1dca551f462fe0/orjson-3.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6439e742fa7834a24698d358a27346bb203bff356ae0402e7f5df8f749c621a8", size = 137917, upload-time = "2026-01-29T15:11:38.511Z" },
- { url = "https://files.pythonhosted.org/packages/59/0f/02846c1cac8e205cb3822dd8aa8f9114acda216f41fd1999ace6b543418d/orjson-3.11.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b81ffd68f084b4e993e3867acb554a049fa7787cc8710bbcc1e26965580d99be", size = 134923, upload-time = "2026-01-29T15:11:39.711Z" },
- { url = "https://files.pythonhosted.org/packages/94/cf/aeaf683001b474bb3c3c757073a4231dfdfe8467fceaefa5bfd40902c99f/orjson-3.11.6-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5a5468e5e60f7ef6d7f9044b06c8f94a3c56ba528c6e4f7f06ae95164b595ec", size = 140752, upload-time = "2026-01-29T15:11:41.347Z" },
- { url = "https://files.pythonhosted.org/packages/fc/fe/dad52d8315a65f084044a0819d74c4c9daf9ebe0681d30f525b0d29a31f0/orjson-3.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72c5005eb45bd2535632d4f3bec7ad392832cfc46b62a3021da3b48a67734b45", size = 144201, upload-time = "2026-01-29T15:11:42.537Z" },
- { url = "https://files.pythonhosted.org/packages/36/bc/ab070dd421565b831801077f1e390c4d4af8bfcecafc110336680a33866b/orjson-3.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b14dd49f3462b014455a28a4d810d3549bf990567653eb43765cd847df09145", size = 142380, upload-time = "2026-01-29T15:11:44.309Z" },
- { url = "https://files.pythonhosted.org/packages/e6/d8/4b581c725c3a308717f28bf45a9fdac210bca08b67e8430143699413ff06/orjson-3.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bb2c1ea30ef302f0f89f9bf3e7f9ab5e2af29dc9f80eb87aa99788e4e2d65", size = 145582, upload-time = "2026-01-29T15:11:45.506Z" },
- { url = "https://files.pythonhosted.org/packages/5b/a2/09aab99b39f9a7f175ea8fa29adb9933a3d01e7d5d603cdee7f1c40c8da2/orjson-3.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:825e0a85d189533c6bff7e2fc417a28f6fcea53d27125c4551979aecd6c9a197", size = 147270, upload-time = "2026-01-29T15:11:46.782Z" },
- { url = "https://files.pythonhosted.org/packages/b8/2f/5ef8eaf7829dc50da3bf497c7775b21ee88437bc8c41f959aa3504ca6631/orjson-3.11.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:b04575417a26530637f6ab4b1f7b4f666eb0433491091da4de38611f97f2fcf3", size = 421222, upload-time = "2026-01-29T15:11:48.106Z" },
- { url = "https://files.pythonhosted.org/packages/3b/b0/dd6b941294c2b5b13da5fdc7e749e58d0c55a5114ab37497155e83050e95/orjson-3.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b83eb2e40e8c4da6d6b340ee6b1d6125f5195eb1b0ebb7eac23c6d9d4f92d224", size = 155562, upload-time = "2026-01-29T15:11:49.408Z" },
- { url = "https://files.pythonhosted.org/packages/8e/09/43924331a847476ae2f9a16bd6d3c9dab301265006212ba0d3d7fd58763a/orjson-3.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1f42da604ee65a6b87eef858c913ce3e5777872b19321d11e6fc6d21de89b64f", size = 147432, upload-time = "2026-01-29T15:11:50.635Z" },
- { url = "https://files.pythonhosted.org/packages/5d/e9/d9865961081816909f6b49d880749dbbd88425afd7c5bbce0549e2290d77/orjson-3.11.6-cp311-cp311-win32.whl", hash = "sha256:5ae45df804f2d344cffb36c43fdf03c82fb6cd247f5faa41e21891b40dfbf733", size = 139623, upload-time = "2026-01-29T15:11:51.82Z" },
- { url = "https://files.pythonhosted.org/packages/b4/f9/6836edb92f76eec1082919101eb1145d2f9c33c8f2c5e6fa399b82a2aaa8/orjson-3.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:f4295948d65ace0a2d8f2c4ccc429668b7eb8af547578ec882e16bf79b0050b2", size = 136647, upload-time = "2026-01-29T15:11:53.454Z" },
- { url = "https://files.pythonhosted.org/packages/b3/0c/4954082eea948c9ae52ee0bcbaa2f99da3216a71bcc314ab129bde22e565/orjson-3.11.6-cp311-cp311-win_arm64.whl", hash = "sha256:314e9c45e0b81b547e3a1cfa3df3e07a815821b3dac9fe8cb75014071d0c16a4", size = 135327, upload-time = "2026-01-29T15:11:56.616Z" },
- { url = "https://files.pythonhosted.org/packages/14/ba/759f2879f41910b7e5e0cdbd9cf82a4f017c527fb0e972e9869ca7fe4c8e/orjson-3.11.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6f03f30cd8953f75f2a439070c743c7336d10ee940da918d71c6f3556af3ddcf", size = 249988, upload-time = "2026-01-29T15:11:58.294Z" },
- { url = "https://files.pythonhosted.org/packages/f0/70/54cecb929e6c8b10104fcf580b0cc7dc551aa193e83787dd6f3daba28bb5/orjson-3.11.6-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:af44baae65ef386ad971469a8557a0673bb042b0b9fd4397becd9c2dfaa02588", size = 134445, upload-time = "2026-01-29T15:11:59.819Z" },
- { url = "https://files.pythonhosted.org/packages/f2/6f/ec0309154457b9ba1ad05f11faa4441f76037152f75e1ac577db3ce7ca96/orjson-3.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c310a48542094e4f7dbb6ac076880994986dda8ca9186a58c3cb70a3514d3231", size = 137708, upload-time = "2026-01-29T15:12:01.488Z" },
- { url = "https://files.pythonhosted.org/packages/20/52/3c71b80840f8bab9cb26417302707b7716b7d25f863f3a541bcfa232fe6e/orjson-3.11.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8dfa7a5d387f15ecad94cb6b2d2d5f4aeea64efd8d526bfc03c9812d01e1cc0", size = 134798, upload-time = "2026-01-29T15:12:02.705Z" },
- { url = "https://files.pythonhosted.org/packages/30/51/b490a43b22ff736282360bd02e6bded455cf31dfc3224e01cd39f919bbd2/orjson-3.11.6-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba8daee3e999411b50f8b50dbb0a3071dd1845f3f9a1a0a6fa6de86d1689d84d", size = 140839, upload-time = "2026-01-29T15:12:03.956Z" },
- { url = "https://files.pythonhosted.org/packages/95/bc/4bcfe4280c1bc63c5291bb96f98298845b6355da2226d3400e17e7b51e53/orjson-3.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f89d104c974eafd7436d7a5fdbc57f7a1e776789959a2f4f1b2eab5c62a339f4", size = 144080, upload-time = "2026-01-29T15:12:05.151Z" },
- { url = "https://files.pythonhosted.org/packages/01/74/22970f9ead9ab1f1b5f8c227a6c3aa8d71cd2c5acd005868a1d44f2362fa/orjson-3.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2e2e2456788ca5ea75616c40da06fc885a7dc0389780e8a41bf7c5389ba257b", size = 142435, upload-time = "2026-01-29T15:12:06.641Z" },
- { url = "https://files.pythonhosted.org/packages/29/34/d564aff85847ab92c82ee43a7a203683566c2fca0723a5f50aebbe759603/orjson-3.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a42efebc45afabb1448001e90458c4020d5c64fbac8a8dc4045b777db76cb5a", size = 145631, upload-time = "2026-01-29T15:12:08.351Z" },
- { url = "https://files.pythonhosted.org/packages/e7/ef/016957a3890752c4aa2368326ea69fa53cdc1fdae0a94a542b6410dbdf52/orjson-3.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71b7cbef8471324966c3738c90ba38775563ef01b512feb5ad4805682188d1b9", size = 147058, upload-time = "2026-01-29T15:12:10.023Z" },
- { url = "https://files.pythonhosted.org/packages/56/cc/9a899c3972085645b3225569f91a30e221f441e5dc8126e6d060b971c252/orjson-3.11.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:f8515e5910f454fe9a8e13c2bb9dc4bae4c1836313e967e72eb8a4ad874f0248", size = 421161, upload-time = "2026-01-29T15:12:11.308Z" },
- { url = "https://files.pythonhosted.org/packages/21/a8/767d3fbd6d9b8fdee76974db40619399355fd49bf91a6dd2c4b6909ccf05/orjson-3.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:300360edf27c8c9bf7047345a94fddf3a8b8922df0ff69d71d854a170cb375cf", size = 155757, upload-time = "2026-01-29T15:12:12.776Z" },
- { url = "https://files.pythonhosted.org/packages/ad/0b/205cd69ac87e2272e13ef3f5f03a3d4657e317e38c1b08aaa2ef97060bbc/orjson-3.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:caaed4dad39e271adfadc106fab634d173b2bb23d9cf7e67bd645f879175ebfc", size = 147446, upload-time = "2026-01-29T15:12:14.166Z" },
- { url = "https://files.pythonhosted.org/packages/de/c5/dd9f22aa9f27c54c7d05cc32f4580c9ac9b6f13811eeb81d6c4c3f50d6b1/orjson-3.11.6-cp312-cp312-win32.whl", hash = "sha256:955368c11808c89793e847830e1b1007503a5923ddadc108547d3b77df761044", size = 139717, upload-time = "2026-01-29T15:12:15.7Z" },
- { url = "https://files.pythonhosted.org/packages/23/a1/e62fc50d904486970315a1654b8cfb5832eb46abb18cd5405118e7e1fc79/orjson-3.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:2c68de30131481150073d90a5d227a4a421982f42c025ecdfb66157f9579e06f", size = 136711, upload-time = "2026-01-29T15:12:17.055Z" },
- { url = "https://files.pythonhosted.org/packages/04/3d/b4fefad8bdf91e0fe212eb04975aeb36ea92997269d68857efcc7eb1dda3/orjson-3.11.6-cp312-cp312-win_arm64.whl", hash = "sha256:65dfa096f4e3a5e02834b681f539a87fbe85adc82001383c0db907557f666bfc", size = 135212, upload-time = "2026-01-29T15:12:18.3Z" },
- { url = "https://files.pythonhosted.org/packages/ae/45/d9c71c8c321277bc1ceebf599bc55ba826ae538b7c61f287e9a7e71bd589/orjson-3.11.6-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e4ae1670caabb598a88d385798692ce2a1b2f078971b3329cfb85253c6097f5b", size = 249828, upload-time = "2026-01-29T15:12:20.14Z" },
- { url = "https://files.pythonhosted.org/packages/ac/7e/4afcf4cfa9c2f93846d70eee9c53c3c0123286edcbeb530b7e9bd2aea1b2/orjson-3.11.6-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:2c6b81f47b13dac2caa5d20fbc953c75eb802543abf48403a4703ed3bff225f0", size = 134339, upload-time = "2026-01-29T15:12:22.01Z" },
- { url = "https://files.pythonhosted.org/packages/40/10/6d2b8a064c8d2411d3d0ea6ab43125fae70152aef6bea77bb50fa54d4097/orjson-3.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:647d6d034e463764e86670644bdcaf8e68b076e6e74783383b01085ae9ab334f", size = 137662, upload-time = "2026-01-29T15:12:23.307Z" },
- { url = "https://files.pythonhosted.org/packages/5a/50/5804ea7d586baf83ee88969eefda97a24f9a5bdba0727f73e16305175b26/orjson-3.11.6-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8523b9cc4ef174ae52414f7699e95ee657c16aa18b3c3c285d48d7966cce9081", size = 134626, upload-time = "2026-01-29T15:12:25.099Z" },
- { url = "https://files.pythonhosted.org/packages/9e/2e/f0492ed43e376722bb4afd648e06cc1e627fc7ec8ff55f6ee739277813ea/orjson-3.11.6-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:313dfd7184cde50c733fc0d5c8c0e2f09017b573afd11dc36bd7476b30b4cb17", size = 140873, upload-time = "2026-01-29T15:12:26.369Z" },
- { url = "https://files.pythonhosted.org/packages/10/15/6f874857463421794a303a39ac5494786ad46a4ab46d92bda6705d78c5aa/orjson-3.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:905ee036064ff1e1fd1fb800055ac477cdcb547a78c22c1bc2bbf8d5d1a6fb42", size = 144044, upload-time = "2026-01-29T15:12:28.082Z" },
- { url = "https://files.pythonhosted.org/packages/d2/c7/b7223a3a70f1d0cc2d86953825de45f33877ee1b124a91ca1f79aa6e643f/orjson-3.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce374cb98411356ba906914441fc993f271a7a666d838d8de0e0900dd4a4bc12", size = 142396, upload-time = "2026-01-29T15:12:30.529Z" },
- { url = "https://files.pythonhosted.org/packages/87/e3/aa1b6d3ad3cd80f10394134f73ae92a1d11fdbe974c34aa199cc18bb5fcf/orjson-3.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cded072b9f65fcfd188aead45efa5bd528ba552add619b3ad2a81f67400ec450", size = 145600, upload-time = "2026-01-29T15:12:31.848Z" },
- { url = "https://files.pythonhosted.org/packages/f6/cf/e4aac5a46cbd39d7e769ef8650efa851dfce22df1ba97ae2b33efe893b12/orjson-3.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ab85bdbc138e1f73a234db6bb2e4cc1f0fcec8f4bd2bd2430e957a01aadf746", size = 146967, upload-time = "2026-01-29T15:12:33.203Z" },
- { url = "https://files.pythonhosted.org/packages/0b/04/975b86a4bcf6cfeda47aad15956d52fbeda280811206e9967380fa9355c8/orjson-3.11.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:351b96b614e3c37a27b8ab048239ebc1e0be76cc17481a430d70a77fb95d3844", size = 421003, upload-time = "2026-01-29T15:12:35.097Z" },
- { url = "https://files.pythonhosted.org/packages/28/d1/0369d0baf40eea5ff2300cebfe209883b2473ab4aa4c4974c8bd5ee42bb2/orjson-3.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f9959c85576beae5cdcaaf39510b15105f1ee8b70d5dacd90152617f57be8c83", size = 155695, upload-time = "2026-01-29T15:12:36.589Z" },
- { url = "https://files.pythonhosted.org/packages/ab/1f/d10c6d6ae26ff1d7c3eea6fd048280ef2e796d4fb260c5424fd021f68ecf/orjson-3.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:75682d62b1b16b61a30716d7a2ec1f4c36195de4a1c61f6665aedd947b93a5d5", size = 147392, upload-time = "2026-01-29T15:12:37.876Z" },
- { url = "https://files.pythonhosted.org/packages/8d/43/7479921c174441a0aa5277c313732e20713c0969ac303be9f03d88d3db5d/orjson-3.11.6-cp313-cp313-win32.whl", hash = "sha256:40dc277999c2ef227dcc13072be879b4cfd325502daeb5c35ed768f706f2bf30", size = 139718, upload-time = "2026-01-29T15:12:39.274Z" },
- { url = "https://files.pythonhosted.org/packages/88/bc/9ffe7dfbf8454bc4e75bb8bf3a405ed9e0598df1d3535bb4adcd46be07d0/orjson-3.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:f0f6e9f8ff7905660bc3c8a54cd4a675aa98f7f175cf00a59815e2ff42c0d916", size = 136635, upload-time = "2026-01-29T15:12:40.593Z" },
- { url = "https://files.pythonhosted.org/packages/6f/7e/51fa90b451470447ea5023b20d83331ec741ae28d1e6d8ed547c24e7de14/orjson-3.11.6-cp313-cp313-win_arm64.whl", hash = "sha256:1608999478664de848e5900ce41f25c4ecdfc4beacbc632b6fd55e1a586e5d38", size = 135175, upload-time = "2026-01-29T15:12:41.997Z" },
- { url = "https://files.pythonhosted.org/packages/31/9f/46ca908abaeeec7560638ff20276ab327b980d73b3cc2f5b205b4a1c60b3/orjson-3.11.6-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6026db2692041d2a23fe2545606df591687787825ad5821971ef0974f2c47630", size = 249823, upload-time = "2026-01-29T15:12:43.332Z" },
- { url = "https://files.pythonhosted.org/packages/ff/78/ca478089818d18c9cd04f79c43f74ddd031b63c70fa2a946eb5e85414623/orjson-3.11.6-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:132b0ab2e20c73afa85cf142e547511feb3d2f5b7943468984658f3952b467d4", size = 134328, upload-time = "2026-01-29T15:12:45.171Z" },
- { url = "https://files.pythonhosted.org/packages/39/5e/cbb9d830ed4e47f4375ad8eef8e4fff1bf1328437732c3809054fc4e80be/orjson-3.11.6-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b376fb05f20a96ec117d47987dd3b39265c635725bda40661b4c5b73b77b5fde", size = 137651, upload-time = "2026-01-29T15:12:46.602Z" },
- { url = "https://files.pythonhosted.org/packages/7c/3a/35df6558c5bc3a65ce0961aefee7f8364e59af78749fc796ea255bfa0cf5/orjson-3.11.6-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:954dae4e080574672a1dfcf2a840eddef0f27bd89b0e94903dd0824e9c1db060", size = 134596, upload-time = "2026-01-29T15:12:47.95Z" },
- { url = "https://files.pythonhosted.org/packages/cd/8e/3d32dd7b7f26a19cc4512d6ed0ae3429567c71feef720fe699ff43c5bc9e/orjson-3.11.6-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe515bb89d59e1e4b48637a964f480b35c0a2676de24e65e55310f6016cca7ce", size = 140923, upload-time = "2026-01-29T15:12:49.333Z" },
- { url = "https://files.pythonhosted.org/packages/6c/9c/1efbf5c99b3304f25d6f0d493a8d1492ee98693637c10ce65d57be839d7b/orjson-3.11.6-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:380f9709c275917af28feb086813923251e11ee10687257cd7f1ea188bcd4485", size = 144068, upload-time = "2026-01-29T15:12:50.927Z" },
- { url = "https://files.pythonhosted.org/packages/82/83/0d19eeb5be797de217303bbb55dde58dba26f996ed905d301d98fd2d4637/orjson-3.11.6-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8173e0d3f6081e7034c51cf984036d02f6bab2a2126de5a759d79f8e5a140e7", size = 142493, upload-time = "2026-01-29T15:12:52.432Z" },
- { url = "https://files.pythonhosted.org/packages/32/a7/573fec3df4dc8fc259b7770dc6c0656f91adce6e19330c78d23f87945d1e/orjson-3.11.6-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dddf9ba706294906c56ef5150a958317b09aa3a8a48df1c52ccf22ec1907eac", size = 145616, upload-time = "2026-01-29T15:12:53.903Z" },
- { url = "https://files.pythonhosted.org/packages/c2/0e/23551b16f21690f7fd5122e3cf40fdca5d77052a434d0071990f97f5fe2f/orjson-3.11.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cbae5c34588dc79938dffb0b6fbe8c531f4dc8a6ad7f39759a9eb5d2da405ef2", size = 146951, upload-time = "2026-01-29T15:12:55.698Z" },
- { url = "https://files.pythonhosted.org/packages/b8/63/5e6c8f39805c39123a18e412434ea364349ee0012548d08aa586e2bd6aa9/orjson-3.11.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:f75c318640acbddc419733b57f8a07515e587a939d8f54363654041fd1f4e465", size = 421024, upload-time = "2026-01-29T15:12:57.434Z" },
- { url = "https://files.pythonhosted.org/packages/1d/4d/724975cf0087f6550bd01fd62203418afc0ea33fd099aed318c5bcc52df8/orjson-3.11.6-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:e0ab8d13aa2a3e98b4a43487c9205b2c92c38c054b4237777484d503357c8437", size = 155774, upload-time = "2026-01-29T15:12:59.397Z" },
- { url = "https://files.pythonhosted.org/packages/a8/a3/f4c4e3f46b55db29e0a5f20493b924fc791092d9a03ff2068c9fe6c1002f/orjson-3.11.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f884c7fb1020d44612bd7ac0db0babba0e2f78b68d9a650c7959bf99c783773f", size = 147393, upload-time = "2026-01-29T15:13:00.769Z" },
- { url = "https://files.pythonhosted.org/packages/ee/86/6f5529dd27230966171ee126cecb237ed08e9f05f6102bfaf63e5b32277d/orjson-3.11.6-cp314-cp314-win32.whl", hash = "sha256:8d1035d1b25732ec9f971e833a3e299d2b1a330236f75e6fd945ad982c76aaf3", size = 139760, upload-time = "2026-01-29T15:13:02.173Z" },
- { url = "https://files.pythonhosted.org/packages/d3/b5/91ae7037b2894a6b5002fb33f4fbccec98424a928469835c3837fbb22a9b/orjson-3.11.6-cp314-cp314-win_amd64.whl", hash = "sha256:931607a8865d21682bb72de54231655c86df1870502d2962dbfd12c82890d077", size = 136633, upload-time = "2026-01-29T15:13:04.267Z" },
- { url = "https://files.pythonhosted.org/packages/55/74/f473a3ec7a0a7ebc825ca8e3c86763f7d039f379860c81ba12dcdd456547/orjson-3.11.6-cp314-cp314-win_arm64.whl", hash = "sha256:fe71f6b283f4f1832204ab8235ce07adad145052614f77c876fcf0dac97bc06f", size = 135168, upload-time = "2026-01-29T15:13:05.932Z" },
+version = "3.11.7"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/53/45/b268004f745ede84e5798b48ee12b05129d19235d0e15267aa57dcdb400b/orjson-3.11.7.tar.gz", hash = "sha256:9b1a67243945819ce55d24a30b59d6a168e86220452d2c96f4d1f093e71c0c49", size = 6144992, upload-time = "2026-02-02T15:38:49.29Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/de/1a/a373746fa6d0e116dd9e54371a7b54622c44d12296d5d0f3ad5e3ff33490/orjson-3.11.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a02c833f38f36546ba65a452127633afce4cf0dd7296b753d3bb54e55e5c0174", size = 229140, upload-time = "2026-02-02T15:37:06.082Z" },
+ { url = "https://files.pythonhosted.org/packages/52/a2/fa129e749d500f9b183e8a3446a193818a25f60261e9ce143ad61e975208/orjson-3.11.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63c6e6738d7c3470ad01601e23376aa511e50e1f3931395b9f9c722406d1a67", size = 128670, upload-time = "2026-02-02T15:37:08.002Z" },
+ { url = "https://files.pythonhosted.org/packages/08/93/1e82011cd1e0bd051ef9d35bed1aa7fb4ea1f0a055dc2c841b46b43a9ebd/orjson-3.11.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:043d3006b7d32c7e233b8cfb1f01c651013ea079e08dcef7189a29abd8befe11", size = 123832, upload-time = "2026-02-02T15:37:09.191Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/d8/a26b431ef962c7d55736674dddade876822f3e33223c1f47a36879350d04/orjson-3.11.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57036b27ac8a25d81112eb0cc9835cd4833c5b16e1467816adc0015f59e870dc", size = 129171, upload-time = "2026-02-02T15:37:11.112Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/19/f47819b84a580f490da260c3ee9ade214cf4cf78ac9ce8c1c758f80fdfc9/orjson-3.11.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:733ae23ada68b804b222c44affed76b39e30806d38660bf1eb200520d259cc16", size = 141967, upload-time = "2026-02-02T15:37:12.282Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/cd/37ece39a0777ba077fdcdbe4cccae3be8ed00290c14bf8afdc548befc260/orjson-3.11.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fdfad2093bdd08245f2e204d977facd5f871c88c4a71230d5bcbd0e43bf6222", size = 130991, upload-time = "2026-02-02T15:37:13.465Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/ed/f2b5d66aa9b6b5c02ff5f120efc7b38c7c4962b21e6be0f00fd99a5c348e/orjson-3.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cededd6738e1c153530793998e31c05086582b08315db48ab66649768f326baa", size = 133674, upload-time = "2026-02-02T15:37:14.694Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/6e/baa83e68d1aa09fa8c3e5b2c087d01d0a0bd45256de719ed7bc22c07052d/orjson-3.11.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:14f440c7268c8f8633d1b3d443a434bd70cb15686117ea6beff8fdc8f5917a1e", size = 138722, upload-time = "2026-02-02T15:37:16.501Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/47/7f8ef4963b772cd56999b535e553f7eb5cd27e9dd6c049baee6f18bfa05d/orjson-3.11.7-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:3a2479753bbb95b0ebcf7969f562cdb9668e6d12416a35b0dda79febf89cdea2", size = 409056, upload-time = "2026-02-02T15:37:17.895Z" },
+ { url = "https://files.pythonhosted.org/packages/38/eb/2df104dd2244b3618f25325a656f85cc3277f74bbd91224752410a78f3c7/orjson-3.11.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:71924496986275a737f38e3f22b4e0878882b3f7a310d2ff4dc96e812789120c", size = 144196, upload-time = "2026-02-02T15:37:19.349Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/2a/ee41de0aa3a6686598661eae2b4ebdff1340c65bfb17fcff8b87138aab21/orjson-3.11.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b4a9eefdc70bf8bf9857f0290f973dec534ac84c35cd6a7f4083be43e7170a8f", size = 134979, upload-time = "2026-02-02T15:37:20.906Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/fa/92fc5d3d402b87a8b28277a9ed35386218a6a5287c7fe5ee9b9f02c53fb2/orjson-3.11.7-cp310-cp310-win32.whl", hash = "sha256:ae9e0b37a834cef7ce8f99de6498f8fad4a2c0bf6bfc3d02abd8ed56aa15b2de", size = 127968, upload-time = "2026-02-02T15:37:23.178Z" },
+ { url = "https://files.pythonhosted.org/packages/07/29/a576bf36d73d60df06904d3844a9df08e25d59eba64363aaf8ec2f9bff41/orjson-3.11.7-cp310-cp310-win_amd64.whl", hash = "sha256:d772afdb22555f0c58cfc741bdae44180122b3616faa1ecadb595cd526e4c993", size = 125128, upload-time = "2026-02-02T15:37:24.329Z" },
+ { url = "https://files.pythonhosted.org/packages/37/02/da6cb01fc6087048d7f61522c327edf4250f1683a58a839fdcc435746dd5/orjson-3.11.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9487abc2c2086e7c8eb9a211d2ce8855bae0e92586279d0d27b341d5ad76c85c", size = 228664, upload-time = "2026-02-02T15:37:25.542Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/c2/5885e7a5881dba9a9af51bc564e8967225a642b3e03d089289a35054e749/orjson-3.11.7-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:79cacb0b52f6004caf92405a7e1f11e6e2de8bdf9019e4f76b44ba045125cd6b", size = 125344, upload-time = "2026-02-02T15:37:26.92Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/1d/4e7688de0a92d1caf600dfd5fb70b4c5bfff51dfa61ac555072ef2d0d32a/orjson-3.11.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e85fe4698b6a56d5e2ebf7ae87544d668eb6bde1ad1226c13f44663f20ec9e", size = 128404, upload-time = "2026-02-02T15:37:28.108Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/b2/ec04b74ae03a125db7bd69cffd014b227b7f341e3261bf75b5eb88a1aa92/orjson-3.11.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8d14b71c0b12963fe8a62aac87119f1afdf4cb88a400f61ca5ae581449efcb5", size = 123677, upload-time = "2026-02-02T15:37:30.287Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/69/f95bdf960605f08f827f6e3291fe243d8aa9c5c9ff017a8d7232209184c3/orjson-3.11.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91c81ef070c8f3220054115e1ef468b1c9ce8497b4e526cb9f68ab4dc0a7ac62", size = 128950, upload-time = "2026-02-02T15:37:31.595Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/1b/de59c57bae1d148ef298852abd31909ac3089cff370dfd4cd84cc99cbc42/orjson-3.11.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:411ebaf34d735e25e358a6d9e7978954a9c9d58cfb47bc6683cdc3964cd2f910", size = 141756, upload-time = "2026-02-02T15:37:32.985Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/9e/9decc59f4499f695f65c650f6cfa6cd4c37a3fbe8fa235a0a3614cb54386/orjson-3.11.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a16bcd08ab0bcdfc7e8801d9c4a9cc17e58418e4d48ddc6ded4e9e4b1a94062b", size = 130812, upload-time = "2026-02-02T15:37:34.204Z" },
+ { url = "https://files.pythonhosted.org/packages/28/e6/59f932bcabd1eac44e334fe8e3281a92eacfcb450586e1f4bde0423728d8/orjson-3.11.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c0b51672e466fd7e56230ffbae7f1639e18d0ce023351fb75da21b71bc2c960", size = 133444, upload-time = "2026-02-02T15:37:35.446Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/36/b0f05c0eaa7ca30bc965e37e6a2956b0d67adb87a9872942d3568da846ae/orjson-3.11.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:136dcd6a2e796dfd9ffca9fc027d778567b0b7c9968d092842d3c323cef88aa8", size = 138609, upload-time = "2026-02-02T15:37:36.657Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/03/58ec7d302b8d86944c60c7b4b82975d5161fcce4c9bc8c6cb1d6741b6115/orjson-3.11.7-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:7ba61079379b0ae29e117db13bda5f28d939766e410d321ec1624afc6a0b0504", size = 408918, upload-time = "2026-02-02T15:37:38.076Z" },
+ { url = "https://files.pythonhosted.org/packages/06/3a/868d65ef9a8b99be723bd510de491349618abd9f62c826cf206d962db295/orjson-3.11.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0527a4510c300e3b406591b0ba69b5dc50031895b0a93743526a3fc45f59d26e", size = 143998, upload-time = "2026-02-02T15:37:39.706Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/c7/1e18e1c83afe3349f4f6dc9e14910f0ae5f82eac756d1412ea4018938535/orjson-3.11.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a709e881723c9b18acddcfb8ba357322491ad553e277cf467e1e7e20e2d90561", size = 134802, upload-time = "2026-02-02T15:37:41.002Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/0b/ccb7ee1a65b37e8eeb8b267dc953561d72370e85185e459616d4345bab34/orjson-3.11.7-cp311-cp311-win32.whl", hash = "sha256:c43b8b5bab288b6b90dac410cca7e986a4fa747a2e8f94615aea407da706980d", size = 127828, upload-time = "2026-02-02T15:37:42.241Z" },
+ { url = "https://files.pythonhosted.org/packages/af/9e/55c776dffda3f381e0f07d010a4f5f3902bf48eaba1bb7684d301acd4924/orjson-3.11.7-cp311-cp311-win_amd64.whl", hash = "sha256:6543001328aa857187f905308a028935864aefe9968af3848401b6fe80dbb471", size = 124941, upload-time = "2026-02-02T15:37:43.444Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/8e/424a620fa7d263b880162505fb107ef5e0afaa765b5b06a88312ac291560/orjson-3.11.7-cp311-cp311-win_arm64.whl", hash = "sha256:1ee5cc7160a821dfe14f130bc8e63e7611051f964b463d9e2a3a573204446a4d", size = 126245, upload-time = "2026-02-02T15:37:45.18Z" },
+ { url = "https://files.pythonhosted.org/packages/80/bf/76f4f1665f6983385938f0e2a5d7efa12a58171b8456c252f3bae8a4cf75/orjson-3.11.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bd03ea7606833655048dab1a00734a2875e3e86c276e1d772b2a02556f0d895f", size = 228545, upload-time = "2026-02-02T15:37:46.376Z" },
+ { url = "https://files.pythonhosted.org/packages/79/53/6c72c002cb13b5a978a068add59b25a8bdf2800ac1c9c8ecdb26d6d97064/orjson-3.11.7-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:89e440ebc74ce8ab5c7bc4ce6757b4a6b1041becb127df818f6997b5c71aa60b", size = 125224, upload-time = "2026-02-02T15:37:47.697Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/83/10e48852865e5dd151bdfe652c06f7da484578ed02c5fca938e3632cb0b8/orjson-3.11.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ede977b5fe5ac91b1dffc0a517ca4542d2ec8a6a4ff7b2652d94f640796342a", size = 128154, upload-time = "2026-02-02T15:37:48.954Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/52/a66e22a2b9abaa374b4a081d410edab6d1e30024707b87eab7c734afe28d/orjson-3.11.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b7b1dae39230a393df353827c855a5f176271c23434cfd2db74e0e424e693e10", size = 123548, upload-time = "2026-02-02T15:37:50.187Z" },
+ { url = "https://files.pythonhosted.org/packages/de/38/605d371417021359f4910c496f764c48ceb8997605f8c25bf1dfe58c0ebe/orjson-3.11.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed46f17096e28fb28d2975834836a639af7278aa87c84f68ab08fbe5b8bd75fa", size = 129000, upload-time = "2026-02-02T15:37:51.426Z" },
+ { url = "https://files.pythonhosted.org/packages/44/98/af32e842b0ffd2335c89714d48ca4e3917b42f5d6ee5537832e069a4b3ac/orjson-3.11.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3726be79e36e526e3d9c1aceaadbfb4a04ee80a72ab47b3f3c17fefb9812e7b8", size = 141686, upload-time = "2026-02-02T15:37:52.607Z" },
+ { url = "https://files.pythonhosted.org/packages/96/0b/fc793858dfa54be6feee940c1463370ece34b3c39c1ca0aa3845f5ba9892/orjson-3.11.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0724e265bc548af1dedebd9cb3d24b4e1c1e685a343be43e87ba922a5c5fff2f", size = 130812, upload-time = "2026-02-02T15:37:53.944Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/91/98a52415059db3f374757d0b7f0f16e3b5cd5976c90d1c2b56acaea039e6/orjson-3.11.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7745312efa9e11c17fbd3cb3097262d079da26930ae9ae7ba28fb738367cbad", size = 133440, upload-time = "2026-02-02T15:37:55.615Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/b6/cb540117bda61791f46381f8c26c8f93e802892830a6055748d3bb1925ab/orjson-3.11.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f904c24bdeabd4298f7a977ef14ca2a022ca921ed670b92ecd16ab6f3d01f867", size = 138386, upload-time = "2026-02-02T15:37:56.814Z" },
+ { url = "https://files.pythonhosted.org/packages/63/1a/50a3201c334a7f17c231eee5f841342190723794e3b06293f26e7cf87d31/orjson-3.11.7-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b9fc4d0f81f394689e0814617aadc4f2ea0e8025f38c226cbf22d3b5ddbf025d", size = 408853, upload-time = "2026-02-02T15:37:58.291Z" },
+ { url = "https://files.pythonhosted.org/packages/87/cd/8de1c67d0be44fdc22701e5989c0d015a2adf391498ad42c4dc589cd3013/orjson-3.11.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:849e38203e5be40b776ed2718e587faf204d184fc9a008ae441f9442320c0cab", size = 144130, upload-time = "2026-02-02T15:38:00.163Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/fe/d605d700c35dd55f51710d159fc54516a280923cd1b7e47508982fbb387d/orjson-3.11.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4682d1db3bcebd2b64757e0ddf9e87ae5f00d29d16c5cdf3a62f561d08cc3dd2", size = 134818, upload-time = "2026-02-02T15:38:01.507Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/e4/15ecc67edb3ddb3e2f46ae04475f2d294e8b60c1825fbe28a428b93b3fbd/orjson-3.11.7-cp312-cp312-win32.whl", hash = "sha256:f4f7c956b5215d949a1f65334cf9d7612dde38f20a95f2315deef167def91a6f", size = 127923, upload-time = "2026-02-02T15:38:02.75Z" },
+ { url = "https://files.pythonhosted.org/packages/34/70/2e0855361f76198a3965273048c8e50a9695d88cd75811a5b46444895845/orjson-3.11.7-cp312-cp312-win_amd64.whl", hash = "sha256:bf742e149121dc5648ba0a08ea0871e87b660467ef168a3a5e53bc1fbd64bb74", size = 125007, upload-time = "2026-02-02T15:38:04.032Z" },
+ { url = "https://files.pythonhosted.org/packages/68/40/c2051bd19fc467610fed469dc29e43ac65891571138f476834ca192bc290/orjson-3.11.7-cp312-cp312-win_arm64.whl", hash = "sha256:26c3b9132f783b7d7903bf1efb095fed8d4a3a85ec0d334ee8beff3d7a4749d5", size = 126089, upload-time = "2026-02-02T15:38:05.297Z" },
+ { url = "https://files.pythonhosted.org/packages/89/25/6e0e52cac5aab51d7b6dcd257e855e1dec1c2060f6b28566c509b4665f62/orjson-3.11.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1d98b30cc1313d52d4af17d9c3d307b08389752ec5f2e5febdfada70b0f8c733", size = 228390, upload-time = "2026-02-02T15:38:06.8Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/29/a77f48d2fc8a05bbc529e5ff481fb43d914f9e383ea2469d4f3d51df3d00/orjson-3.11.7-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:d897e81f8d0cbd2abb82226d1860ad2e1ab3ff16d7b08c96ca00df9d45409ef4", size = 125189, upload-time = "2026-02-02T15:38:08.181Z" },
+ { url = "https://files.pythonhosted.org/packages/89/25/0a16e0729a0e6a1504f9d1a13cdd365f030068aab64cec6958396b9969d7/orjson-3.11.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:814be4b49b228cfc0b3c565acf642dd7d13538f966e3ccde61f4f55be3e20785", size = 128106, upload-time = "2026-02-02T15:38:09.41Z" },
+ { url = "https://files.pythonhosted.org/packages/66/da/a2e505469d60666a05ab373f1a6322eb671cb2ba3a0ccfc7d4bc97196787/orjson-3.11.7-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d06e5c5fed5caedd2e540d62e5b1c25e8c82431b9e577c33537e5fa4aa909539", size = 123363, upload-time = "2026-02-02T15:38:10.73Z" },
+ { url = "https://files.pythonhosted.org/packages/23/bf/ed73f88396ea35c71b38961734ea4a4746f7ca0768bf28fd551d37e48dd0/orjson-3.11.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31c80ce534ac4ea3739c5ee751270646cbc46e45aea7576a38ffec040b4029a1", size = 129007, upload-time = "2026-02-02T15:38:12.138Z" },
+ { url = "https://files.pythonhosted.org/packages/73/3c/b05d80716f0225fc9008fbf8ab22841dcc268a626aa550561743714ce3bf/orjson-3.11.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f50979824bde13d32b4320eedd513431c921102796d86be3eee0b58e58a3ecd1", size = 141667, upload-time = "2026-02-02T15:38:13.398Z" },
+ { url = "https://files.pythonhosted.org/packages/61/e8/0be9b0addd9bf86abfc938e97441dcd0375d494594b1c8ad10fe57479617/orjson-3.11.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e54f3808e2b6b945078c41aa8d9b5834b28c50843846e97807e5adb75fa9705", size = 130832, upload-time = "2026-02-02T15:38:14.698Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/ec/c68e3b9021a31d9ec15a94931db1410136af862955854ed5dd7e7e4f5bff/orjson-3.11.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12b80df61aab7b98b490fe9e4879925ba666fccdfcd175252ce4d9035865ace", size = 133373, upload-time = "2026-02-02T15:38:16.109Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/45/f3466739aaafa570cc8e77c6dbb853c48bf56e3b43738020e2661e08b0ac/orjson-3.11.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:996b65230271f1a97026fd0e6a753f51fbc0c335d2ad0c6201f711b0da32693b", size = 138307, upload-time = "2026-02-02T15:38:17.453Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/84/9f7f02288da1ffb31405c1be07657afd1eecbcb4b64ee2817b6fe0f785fa/orjson-3.11.7-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ab49d4b2a6a1d415ddb9f37a21e02e0d5dbfe10b7870b21bf779fc21e9156157", size = 408695, upload-time = "2026-02-02T15:38:18.831Z" },
+ { url = "https://files.pythonhosted.org/packages/18/07/9dd2f0c0104f1a0295ffbe912bc8d63307a539b900dd9e2c48ef7810d971/orjson-3.11.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:390a1dce0c055ddf8adb6aa94a73b45a4a7d7177b5c584b8d1c1947f2ba60fb3", size = 144099, upload-time = "2026-02-02T15:38:20.28Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/66/857a8e4a3292e1f7b1b202883bcdeb43a91566cf59a93f97c53b44bd6801/orjson-3.11.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1eb80451a9c351a71dfaf5b7ccc13ad065405217726b59fdbeadbcc544f9d223", size = 134806, upload-time = "2026-02-02T15:38:22.186Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/5b/6ebcf3defc1aab3a338ca777214966851e92efb1f30dc7fc8285216e6d1b/orjson-3.11.7-cp313-cp313-win32.whl", hash = "sha256:7477aa6a6ec6139c5cb1cc7b214643592169a5494d200397c7fc95d740d5fcf3", size = 127914, upload-time = "2026-02-02T15:38:23.511Z" },
+ { url = "https://files.pythonhosted.org/packages/00/04/c6f72daca5092e3117840a1b1e88dfc809cc1470cf0734890d0366b684a1/orjson-3.11.7-cp313-cp313-win_amd64.whl", hash = "sha256:b9f95dcdea9d4f805daa9ddf02617a89e484c6985fa03055459f90e87d7a0757", size = 124986, upload-time = "2026-02-02T15:38:24.836Z" },
+ { url = "https://files.pythonhosted.org/packages/03/ba/077a0f6f1085d6b806937246860fafbd5b17f3919c70ee3f3d8d9c713f38/orjson-3.11.7-cp313-cp313-win_arm64.whl", hash = "sha256:800988273a014a0541483dc81021247d7eacb0c845a9d1a34a422bc718f41539", size = 126045, upload-time = "2026-02-02T15:38:26.216Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:de0a37f21d0d364954ad5de1970491d7fbd0fb1ef7417d4d56a36dc01ba0c0a0", size = 228391, upload-time = "2026-02-02T15:38:27.757Z" },
+ { url = "https://files.pythonhosted.org/packages/46/19/e40f6225da4d3aa0c8dc6e5219c5e87c2063a560fe0d72a88deb59776794/orjson-3.11.7-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:c2428d358d85e8da9d37cba18b8c4047c55222007a84f97156a5b22028dfbfc0", size = 125188, upload-time = "2026-02-02T15:38:29.241Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/7e/c4de2babef2c0817fd1f048fd176aa48c37bec8aef53d2fa932983032cce/orjson-3.11.7-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c4bc6c6ac52cdaa267552544c73e486fecbd710b7ac09bc024d5a78555a22f6", size = 128097, upload-time = "2026-02-02T15:38:30.618Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/74/233d360632bafd2197f217eee7fb9c9d0229eac0c18128aee5b35b0014fe/orjson-3.11.7-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd0d68edd7dfca1b2eca9361a44ac9f24b078de3481003159929a0573f21a6bf", size = 123364, upload-time = "2026-02-02T15:38:32.363Z" },
+ { url = "https://files.pythonhosted.org/packages/79/51/af79504981dd31efe20a9e360eb49c15f06df2b40e7f25a0a52d9ae888e8/orjson-3.11.7-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:623ad1b9548ef63886319c16fa317848e465a21513b31a6ad7b57443c3e0dcf5", size = 129076, upload-time = "2026-02-02T15:38:33.68Z" },
+ { url = "https://files.pythonhosted.org/packages/67/e2/da898eb68b72304f8de05ca6715870d09d603ee98d30a27e8a9629abc64b/orjson-3.11.7-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6e776b998ac37c0396093d10290e60283f59cfe0fc3fccbd0ccc4bd04dd19892", size = 141705, upload-time = "2026-02-02T15:38:34.989Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/89/15364d92acb3d903b029e28d834edb8780c2b97404cbf7929aa6b9abdb24/orjson-3.11.7-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c6c3af76716f4a9c290371ba2e390ede06f6603edb277b481daf37f6f464e", size = 130855, upload-time = "2026-02-02T15:38:36.379Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a56df3239294ea5964adf074c54bcc4f0ccd21636049a2cf3ca9cf03b5d03cf1", size = 133386, upload-time = "2026-02-02T15:38:37.704Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/0e/45e1dcf10e17d0924b7c9162f87ec7b4ca79e28a0548acf6a71788d3e108/orjson-3.11.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bda117c4148e81f746655d5a3239ae9bd00cb7bc3ca178b5fc5a5997e9744183", size = 138295, upload-time = "2026-02-02T15:38:39.096Z" },
+ { url = "https://files.pythonhosted.org/packages/63/d7/4d2e8b03561257af0450f2845b91fbd111d7e526ccdf737267108075e0ba/orjson-3.11.7-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:23d6c20517a97a9daf1d48b580fcdc6f0516c6f4b5038823426033690b4d2650", size = 408720, upload-time = "2026-02-02T15:38:40.634Z" },
+ { url = "https://files.pythonhosted.org/packages/78/cf/d45343518282108b29c12a65892445fc51f9319dc3c552ceb51bb5905ed2/orjson-3.11.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:8ff206156006da5b847c9304b6308a01e8cdbc8cce824e2779a5ba71c3def141", size = 144152, upload-time = "2026-02-02T15:38:42.262Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/3a/d6001f51a7275aacd342e77b735c71fa04125a3f93c36fee4526bc8c654e/orjson-3.11.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:962d046ee1765f74a1da723f4b33e3b228fe3a48bd307acce5021dfefe0e29b2", size = 134814, upload-time = "2026-02-02T15:38:43.627Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/d3/f19b47ce16820cc2c480f7f1723e17f6d411b3a295c60c8ad3aa9ff1c96a/orjson-3.11.7-cp314-cp314-win32.whl", hash = "sha256:89e13dd3f89f1c38a9c9eba5fbf7cdc2d1feca82f5f290864b4b7a6aac704576", size = 127997, upload-time = "2026-02-02T15:38:45.06Z" },
+ { url = "https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl", hash = "sha256:845c3e0d8ded9c9271cd79596b9b552448b885b97110f628fb687aee2eed11c1", size = 124985, upload-time = "2026-02-02T15:38:46.388Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/1c/f2a8d8a1b17514660a614ce5f7aac74b934e69f5abc2700cc7ced882a009/orjson-3.11.7-cp314-cp314-win_arm64.whl", hash = "sha256:4a2e9c5be347b937a2e0203866f12bba36082e89b402ddb9e927d5822e43088d", size = 126038, upload-time = "2026-02-02T15:38:47.703Z" },
]
[[package]]
@@ -4424,7 +4410,7 @@ resolution-markers = [
"python_full_version == '3.11.*' and sys_platform == 'win32'",
]
dependencies = [
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "python-dateutil", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "tzdata", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" },
]
@@ -4597,11 +4583,11 @@ wheels = [
[[package]]
name = "pip"
-version = "25.3"
+version = "26.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/fe/6e/74a3f0179a4a73a53d66ce57fdb4de0080a8baa1de0063de206d6167acc2/pip-25.3.tar.gz", hash = "sha256:8d0538dbbd7babbd207f261ed969c65de439f6bc9e5dbd3b3b9a77f25d95f343", size = 1803014, upload-time = "2025-10-25T00:55:41.394Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/44/c2/65686a7783a7c27a329706207147e82f23c41221ee9ae33128fc331670a0/pip-26.0.tar.gz", hash = "sha256:3ce220a0a17915972fbf1ab451baae1521c4539e778b28127efa79b974aff0fa", size = 1812654, upload-time = "2026-01-31T01:40:54.361Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/44/3c/d717024885424591d5376220b5e836c2d5293ce2011523c9de23ff7bf068/pip-25.3-py3-none-any.whl", hash = "sha256:9655943313a94722b7774661c21049070f6bbb0a1516bf02f7c8d5d9201514cd", size = 1778622, upload-time = "2025-10-25T00:55:39.247Z" },
+ { url = "https://files.pythonhosted.org/packages/69/00/5ac7aa77688ec4d34148b423d34dc0c9bc4febe0d872a9a1ad9860b2f6f1/pip-26.0-py3-none-any.whl", hash = "sha256:98436feffb9e31bc9339cf369fd55d3331b1580b6a6f1173bacacddcf9c34754", size = 1787564, upload-time = "2026-01-31T01:40:52.252Z" },
]
[[package]]
@@ -4700,7 +4686,7 @@ wheels = [
[[package]]
name = "posthog"
-version = "7.7.0"
+version = "7.8.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "backoff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -4710,9 +4696,9 @@ dependencies = [
{ name = "six", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/23/dd/ca6d5a79614af27ededc0dca85e77f42f7704e29f8314819d7ce92b9a7f3/posthog-7.7.0.tar.gz", hash = "sha256:b4f2b1a616e099961f6ab61a5a2f88de62082c26801699e556927d21c00737ef", size = 160766, upload-time = "2026-01-27T21:15:41.63Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/67/39/613f56a5d469e4c4f4e9616f533bd0451ae1b7b70d033201227b9229bf17/posthog-7.8.0.tar.gz", hash = "sha256:5f46730090be503a9d4357905d3260178ed6be4c1f6c666e8d7b44189e11fbb8", size = 167014, upload-time = "2026-01-30T13:43:29.829Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/41/3f/41b426ed9ab161d630edec84bacb6664ae62b6e63af1165919c7e11c17d1/posthog-7.7.0-py3-none-any.whl", hash = "sha256:955f42097bf147459653b9102e5f7f9a22e4b6fc9f15003447bd1137fafbc505", size = 185353, upload-time = "2026-01-27T21:15:40.051Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/f6/c3118de9b52fd442c0de92e4ad68326f5ecb327c1d354e0b9a8d0213ce45/posthog-7.8.0-py3-none-any.whl", hash = "sha256:fefa48c560c51ca0acc6261c92a8f61a067a8aa977c8820d0f149eaa4500e4da", size = 192427, upload-time = "2026-01-30T13:43:28.774Z" },
]
[[package]]
@@ -4860,14 +4846,14 @@ wheels = [
[[package]]
name = "proto-plus"
-version = "1.27.0"
+version = "1.27.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/01/89/9cbe2f4bba860e149108b683bc2efec21f14d5f7ed6e25562ad86acbc373/proto_plus-1.27.0.tar.gz", hash = "sha256:873af56dd0d7e91836aee871e5799e1c6f1bda86ac9a983e0bb9f0c266a568c4", size = 56158, upload-time = "2025-12-16T13:46:25.729Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/3a/02/8832cde80e7380c600fbf55090b6ab7b62bd6825dbedde6d6657c15a1f8e/proto_plus-1.27.1.tar.gz", hash = "sha256:912a7460446625b792f6448bade9e55cd4e41e6ac10e27009ef71a7f317fa147", size = 56929, upload-time = "2026-02-02T17:34:49.035Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/cd/24/3b7a0818484df9c28172857af32c2397b6d8fcd99d9468bd4684f98ebf0a/proto_plus-1.27.0-py3-none-any.whl", hash = "sha256:1baa7f81cf0f8acb8bc1f6d085008ba4171eaf669629d1b6d1673b21ed1c0a82", size = 50205, upload-time = "2025-12-16T13:46:24.76Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/79/ac273cbbf744691821a9cca88957257f41afe271637794975ca090b9588b/proto_plus-1.27.1-py3-none-any.whl", hash = "sha256:e4643061f3a4d0de092d62aa4ad09fa4756b2cbb89d4627f3985018216f9fefc", size = 50480, upload-time = "2026-02-02T17:34:47.339Z" },
]
[[package]]
@@ -5174,11 +5160,11 @@ wheels = [
[[package]]
name = "pyjwt"
-version = "2.10.1"
+version = "2.11.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/5c/5a/b46fa56bf322901eee5b0454a34343cdbdae202cd421775a8ee4e42fd519/pyjwt-2.11.0.tar.gz", hash = "sha256:35f95c1f0fbe5d5ba6e43f00271c275f7a1a4db1dab27bf708073b75318ea623", size = 98019, upload-time = "2026-01-30T19:59:55.694Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/01/c26ce75ba460d5cd503da9e13b21a33804d38c2165dec7b716d06b13010c/pyjwt-2.11.0-py3-none-any.whl", hash = "sha256:94a6bde30eb5c8e04fee991062b534071fd1439ef58d2adc9ccb823e7bcd0469", size = 28224, upload-time = "2026-01-30T19:59:54.539Z" },
]
[package.optional-dependencies]
@@ -5499,7 +5485,7 @@ dependencies = [
{ name = "grpcio", version = "1.76.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.14' and sys_platform == 'darwin') or (python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')" },
{ name = "httpx", extra = ["http2"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "portalocker", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -5530,7 +5516,7 @@ dependencies = [
{ name = "jsonpath-ng", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "ml-dtypes", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "python-ulid", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -5960,7 +5946,7 @@ resolution-markers = [
]
dependencies = [
{ name = "joblib", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "scipy", version = "1.17.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "threadpoolctl", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
]
@@ -6084,7 +6070,7 @@ resolution-markers = [
"python_full_version == '3.11.*' and sys_platform == 'win32'",
]
dependencies = [
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/56/3e/9cca699f3486ce6bc12ff46dc2031f1ec8eb9ccc9a320fdaf925f1417426/scipy-1.17.0.tar.gz", hash = "sha256:2591060c8e648d8b96439e111ac41fd8342fdeff1876be2e19dea3fe8930454e", size = 30396830, upload-time = "2026-01-10T21:34:23.009Z" }
wheels = [
@@ -6157,7 +6143,7 @@ source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "matplotlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
+ { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
{ name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" },
{ name = "pandas", version = "3.0.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
]
@@ -6723,14 +6709,14 @@ wheels = [
[[package]]
name = "tqdm"
-version = "4.67.1"
+version = "4.67.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/27/89/4b0001b2dab8df0a5ee2787dcbe771de75ded01f18f1f8d53dedeea2882b/tqdm-4.67.2.tar.gz", hash = "sha256:649aac53964b2cb8dec76a14b405a4c0d13612cb8933aae547dd144eacc99653", size = 169514, upload-time = "2026-01-30T23:12:06.555Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/e2/31eac96de2915cf20ccaed0225035db149dfb9165a9ed28d4b252ef3f7f7/tqdm-4.67.2-py3-none-any.whl", hash = "sha256:9a12abcbbff58b6036b2167d9d3853042b9d436fe7330f06ae047867f2f8e0a7", size = 78354, upload-time = "2026-01-30T23:12:04.368Z" },
]
[[package]]