From 38dbee7d5661df1d3d18352de11ae045ae400a69 Mon Sep 17 00:00:00 2001 From: Wauplin Date: Thu, 26 Sep 2024 11:15:35 +0200 Subject: [PATCH 1/7] Add code snippets for image-text-to-text --- packages/tasks/src/snippets/curl.ts | 27 ++++++++++++++++++++++++ packages/tasks/src/snippets/js.ts | 30 +++++++++++++++++++++++++++ packages/tasks/src/snippets/python.ts | 27 ++++++++++++++++++++++++ 3 files changed, 84 insertions(+) diff --git a/packages/tasks/src/snippets/curl.ts b/packages/tasks/src/snippets/curl.ts index 2104a0c29c..79b38bb1bd 100644 --- a/packages/tasks/src/snippets/curl.ts +++ b/packages/tasks/src/snippets/curl.ts @@ -27,6 +27,32 @@ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: stri } }; +export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { + if (model.config?.tokenizer_config?.chat_template) { + // Conversational model detected, so we display a code snippet that features the Messages API + return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ +-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ +-H 'Content-Type: application/json' \\ +-d '{ + "model": "${model.id}", + "messages": [ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}}, + {"type": "text", "text": "Describe this image in one sentence."}, + ], + } + ], + "max_tokens": 500, + "stream": false +}' +`; + } else { + return snippetBasic(model, accessToken); + } +}; + export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string => `curl https://api-inference.huggingface.co/models/${model.id} \\ -X POST \\ @@ -51,6 +77,7 @@ export const curlSnippets: Partial { + if (model.config?.tokenizer_config?.chat_template) { + // Conversational model detected, so we display a code snippet that features the Messages API + return `import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); +const image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + +for await (const chunk of inference.chatCompletionStream({ + model: "${model.id}", + messages: [ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "Describe this image in one sentence."}, + ], + } + ], + max_tokens: 500, +})) { + process.stdout.write(chunk.choices[0]?.delta?.content || ""); +}`; + } else { + return snippetBasic(model, accessToken); + } +}; + export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string => `async function query(data) { const response = await fetch( @@ -156,6 +185,7 @@ export const jsSnippets: Partial + `from huggingface_hub import InferenceClient + +client = InferenceClient( + "${model.id}", + token="${accessToken || "{API_TOKEN}"}", +) +image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + +for message in client.chat_completion( + messages=[ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "Describe this image in one sentence."}, + ], + } + ], + max_tokens=500, + stream=True, +): + print(message.choices[0].delta.content, end="")`; + export const snippetZeroShotClassification = (model: ModelDataMinimal): string => `def query(payload): response = requests.post(API_URL, headers=headers, json=payload) @@ -156,6 +180,9 @@ export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) { // Conversational model detected, so we display a code snippet that features the Messages API return snippetConversational(model, accessToken); + } else if (model.pipeline_tag === "image-text-to-text" && model.config?.tokenizer_config?.chat_template) { + // Example sending an image to the Message API + return snippetConversationalWithImage(model, accessToken); } else { const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets From fd712c1d56593b80ad62fb1cc60d0a71965f552c Mon Sep 17 00:00:00 2001 From: Wauplin Date: Thu, 26 Sep 2024 11:19:30 +0200 Subject: [PATCH 2/7] fix curl --- packages/tasks/src/snippets/curl.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/tasks/src/snippets/curl.ts b/packages/tasks/src/snippets/curl.ts index 79b38bb1bd..475c8d4e5f 100644 --- a/packages/tasks/src/snippets/curl.ts +++ b/packages/tasks/src/snippets/curl.ts @@ -40,8 +40,8 @@ export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, access "role": "user", "content": [ {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}}, - {"type": "text", "text": "Describe this image in one sentence."}, - ], + {"type": "text", "text": "Describe this image in one sentence."} + ] } ], "max_tokens": 500, From 9cc3d868ec3fca027ec94cce3026f37bfc8c14ba Mon Sep 17 00:00:00 2001 From: Wauplin Date: Mon, 30 Sep 2024 12:01:00 +0200 Subject: [PATCH 3/7] rely on conversational tag --- packages/tasks/src/snippets/curl.ts | 4 ++-- packages/tasks/src/snippets/js.ts | 4 ++-- packages/tasks/src/snippets/python.ts | 4 ++-- packages/tasks/src/snippets/types.ts | 5 ++++- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/packages/tasks/src/snippets/curl.ts b/packages/tasks/src/snippets/curl.ts index 475c8d4e5f..c8f39b677e 100644 --- a/packages/tasks/src/snippets/curl.ts +++ b/packages/tasks/src/snippets/curl.ts @@ -10,7 +10,7 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`; export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { - if (model.config?.tokenizer_config?.chat_template) { + if (model.tags.includes("conversational")) { // Conversational model detected, so we display a code snippet that features the Messages API return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ @@ -28,7 +28,7 @@ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: stri }; export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { - if (model.config?.tokenizer_config?.chat_template) { + if (model.tags.includes("conversational")) { // Conversational model detected, so we display a code snippet that features the Messages API return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ diff --git a/packages/tasks/src/snippets/js.ts b/packages/tasks/src/snippets/js.ts index 40c828e58e..f8b50bf257 100644 --- a/packages/tasks/src/snippets/js.ts +++ b/packages/tasks/src/snippets/js.ts @@ -24,7 +24,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { });`; export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { - if (model.config?.tokenizer_config?.chat_template) { + if (model.tags.includes("conversational")) { // Conversational model detected, so we display a code snippet that features the Messages API return `import { HfInference } from "@huggingface/inference"; @@ -43,7 +43,7 @@ for await (const chunk of inference.chatCompletionStream({ }; export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { - if (model.config?.tokenizer_config?.chat_template) { + if (model.tags.includes("conversational")) { // Conversational model detected, so we display a code snippet that features the Messages API return `import { HfInference } from "@huggingface/inference"; diff --git a/packages/tasks/src/snippets/python.ts b/packages/tasks/src/snippets/python.ts index 92cb252cac..370544e61a 100644 --- a/packages/tasks/src/snippets/python.ts +++ b/packages/tasks/src/snippets/python.ts @@ -177,10 +177,10 @@ export const pythonSnippets: Partial; +export type ModelDataMinimal = Pick< + ModelData, + "id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags" +>; From f6d7ff8a2be24abe60ded03ee4e7ec4eb2ed33f8 Mon Sep 17 00:00:00 2001 From: Wauplin Date: Mon, 30 Sep 2024 15:44:52 +0200 Subject: [PATCH 4/7] javascript casing --- packages/tasks/src/snippets/js.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/tasks/src/snippets/js.ts b/packages/tasks/src/snippets/js.ts index f8b50bf257..e2afcc724e 100644 --- a/packages/tasks/src/snippets/js.ts +++ b/packages/tasks/src/snippets/js.ts @@ -48,7 +48,7 @@ export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, access return `import { HfInference } from "@huggingface/inference"; const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); -const image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" +const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" for await (const chunk of inference.chatCompletionStream({ model: "${model.id}", @@ -56,7 +56,7 @@ for await (const chunk of inference.chatCompletionStream({ { "role": "user", "content": [ - {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "image_url", "image_url": {"url": imageUrl}}, {"type": "text", "text": "Describe this image in one sentence."}, ], } From d9d5a4353d5ccf8c4fe47fe92217a4e869cecb48 Mon Sep 17 00:00:00 2001 From: Wauplin Date: Mon, 30 Sep 2024 15:45:34 +0200 Subject: [PATCH 5/7] ; --- packages/tasks/src/snippets/js.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/tasks/src/snippets/js.ts b/packages/tasks/src/snippets/js.ts index e2afcc724e..746a4de377 100644 --- a/packages/tasks/src/snippets/js.ts +++ b/packages/tasks/src/snippets/js.ts @@ -48,7 +48,7 @@ export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, access return `import { HfInference } from "@huggingface/inference"; const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); -const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" +const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"; for await (const chunk of inference.chatCompletionStream({ model: "${model.id}", From e4c6cba687930534def4819f54484700781f8644 Mon Sep 17 00:00:00 2001 From: Wauplin Date: Mon, 30 Sep 2024 15:56:34 +0200 Subject: [PATCH 6/7] update python snippets --- packages/tasks/src/snippets/python.ts | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/packages/tasks/src/snippets/python.ts b/packages/tasks/src/snippets/python.ts index 370544e61a..aadd3e97ab 100644 --- a/packages/tasks/src/snippets/python.ts +++ b/packages/tasks/src/snippets/python.ts @@ -5,12 +5,10 @@ import type { ModelDataMinimal } from "./types.js"; export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string => `from huggingface_hub import InferenceClient -client = InferenceClient( - "${model.id}", - token="${accessToken || "{API_TOKEN}"}", -) +client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") for message in client.chat_completion( + model="${model.id}", messages=[{"role": "user", "content": "What is the capital of France?"}], max_tokens=500, stream=True, @@ -20,13 +18,12 @@ for message in client.chat_completion( export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): string => `from huggingface_hub import InferenceClient -client = InferenceClient( - "${model.id}", - token="${accessToken || "{API_TOKEN}"}", -) +client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") + image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" for message in client.chat_completion( + model="${model.id}", messages=[ { "role": "user", From 0f8452c3f8199cb3d0e4c9fd6a21b81c0a86e39b Mon Sep 17 00:00:00 2001 From: Wauplin Date: Mon, 30 Sep 2024 16:01:00 +0200 Subject: [PATCH 7/7] indent --- packages/tasks/src/snippets/python.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/tasks/src/snippets/python.ts b/packages/tasks/src/snippets/python.ts index aadd3e97ab..9ffeed54c2 100644 --- a/packages/tasks/src/snippets/python.ts +++ b/packages/tasks/src/snippets/python.ts @@ -8,7 +8,7 @@ export const snippetConversational = (model: ModelDataMinimal, accessToken: stri client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") for message in client.chat_completion( - model="${model.id}", + model="${model.id}", messages=[{"role": "user", "content": "What is the capital of France?"}], max_tokens=500, stream=True, @@ -23,7 +23,7 @@ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" for message in client.chat_completion( - model="${model.id}", + model="${model.id}", messages=[ { "role": "user",