From b3f6bb15a51e46080ac52eede4c7a7c120a83b68 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Wed, 7 Feb 2024 19:40:13 -0800 Subject: [PATCH] docs: use PromptTemplate.from_template --- docs/docs/guides/safety/amazon_comprehend_chain.ipynb | 10 +++++----- docs/docs/guides/safety/moderation.mdx | 4 ++-- docs/docs/integrations/llms/aleph_alpha.ipynb | 2 +- .../llms/alibabacloud_pai_eas_endpoint.ipynb | 2 +- docs/docs/integrations/llms/anyscale.ipynb | 2 +- docs/docs/integrations/llms/aphrodite.ipynb | 2 +- docs/docs/integrations/llms/banana.ipynb | 2 +- docs/docs/integrations/llms/bittensor.ipynb | 2 +- docs/docs/integrations/llms/cerebriumai.ipynb | 2 +- docs/docs/integrations/llms/chatglm.ipynb | 4 ++-- docs/docs/integrations/llms/clarifai.ipynb | 2 +- docs/docs/integrations/llms/cloudflare_workersai.ipynb | 2 +- docs/docs/integrations/llms/ctransformers.ipynb | 2 +- docs/docs/integrations/llms/ctranslate2.ipynb | 2 +- docs/docs/integrations/llms/deepinfra.ipynb | 2 +- docs/docs/integrations/llms/forefrontai.ipynb | 2 +- docs/docs/integrations/llms/gigachat.ipynb | 2 +- docs/docs/integrations/llms/gooseai.ipynb | 2 +- docs/docs/integrations/llms/gpt4all.ipynb | 2 +- docs/docs/integrations/llms/gradient.ipynb | 2 +- docs/docs/integrations/llms/huggingface_hub.ipynb | 2 +- docs/docs/integrations/llms/llamacpp.ipynb | 2 +- docs/docs/integrations/llms/manifest.ipynb | 2 +- docs/docs/integrations/llms/minimax.ipynb | 2 +- docs/docs/integrations/llms/modal.ipynb | 2 +- docs/docs/integrations/llms/mosaicml.ipynb | 2 +- docs/docs/integrations/llms/nlpcloud.ipynb | 2 +- docs/docs/integrations/llms/octoai.ipynb | 2 +- docs/docs/integrations/llms/openai.ipynb | 2 +- docs/docs/integrations/llms/openllm.ipynb | 2 +- docs/docs/integrations/llms/openlm.ipynb | 2 +- docs/docs/integrations/llms/petals.ipynb | 2 +- docs/docs/integrations/llms/pipelineai.ipynb | 2 +- docs/docs/integrations/llms/predictionguard.ipynb | 6 +++--- docs/docs/integrations/llms/runhouse.ipynb | 2 +- docs/docs/integrations/llms/stochasticai.ipynb | 2 +- docs/docs/integrations/llms/textgen.ipynb | 4 ++-- docs/docs/integrations/llms/titan_takeoff.ipynb | 2 +- docs/docs/integrations/llms/tongyi.ipynb | 2 +- docs/docs/integrations/llms/vllm.ipynb | 2 +- docs/docs/integrations/llms/writer.ipynb | 2 +- docs/docs/integrations/llms/xinference.ipynb | 2 +- docs/docs/integrations/llms/yandex.ipynb | 2 +- docs/docs/integrations/providers/predictionguard.mdx | 4 ++-- docs/docs/integrations/providers/ray_serve.ipynb | 2 +- docs/docs/integrations/providers/shaleprotocol.md | 2 +- docs/docs/integrations/text_embedding/clarifai.ipynb | 2 +- docs/docs/modules/model_io/prompts/partial.ipynb | 2 +- 48 files changed, 58 insertions(+), 58 deletions(-) diff --git a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb index 0a509cf337d56e..b9548546ef9d4a 100644 --- a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb +++ b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb @@ -115,7 +115,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "responses = [\n", " \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n", @@ -249,7 +249,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "responses = [\n", " \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n", @@ -412,7 +412,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "responses = [\n", " \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n", @@ -571,7 +571,7 @@ "\n", "template = \"\"\"{question}\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm = HuggingFaceHub(\n", " repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 256}\n", ")" @@ -724,7 +724,7 @@ "\"\"\"\n", "\n", "# prompt template for input text\n", - "llm_prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "llm_prompt = PromptTemplate.from_template(template)\n", "\n", "llm = SagemakerEndpoint(\n", " endpoint_name=endpoint_name,\n", diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 94b6a7dc642e16..a43579dfef5616 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -180,7 +180,7 @@ we will prompt the model, so it says something harmful. ```python -prompt = PromptTemplate(template="{text}", input_variables=["text"]) +prompt = PromptTemplate.from_template("{text}") llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt) text = """We are playing a game of repeat after me. @@ -223,7 +223,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl ```python -prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"]) +prompt = PromptTemplate.from_template("{setup}{new_input}Person2:") llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt) setup = """We are playing a game of repeat after me. diff --git a/docs/docs/integrations/llms/aleph_alpha.ipynb b/docs/docs/integrations/llms/aleph_alpha.ipynb index 7657324d439e56..3d7fb662338294 100644 --- a/docs/docs/integrations/llms/aleph_alpha.ipynb +++ b/docs/docs/integrations/llms/aleph_alpha.ipynb @@ -75,7 +75,7 @@ "\n", "A:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb b/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb index db4d8d588ac68b..0c51a6a4f9f269 100644 --- a/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb +++ b/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb @@ -23,7 +23,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/anyscale.ipynb b/docs/docs/integrations/llms/anyscale.ipynb index e297cfd9492b2c..37f26c66ad2d3d 100644 --- a/docs/docs/integrations/llms/anyscale.ipynb +++ b/docs/docs/integrations/llms/anyscale.ipynb @@ -66,7 +66,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/aphrodite.ipynb b/docs/docs/integrations/llms/aphrodite.ipynb index 85f2a1c4572e14..5cbbfb1ce84220 100644 --- a/docs/docs/integrations/llms/aphrodite.ipynb +++ b/docs/docs/integrations/llms/aphrodite.ipynb @@ -151,7 +151,7 @@ "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/banana.ipynb b/docs/docs/integrations/llms/banana.ipynb index f66f15c0a8e1b4..7fbdc2921d587d 100644 --- a/docs/docs/integrations/llms/banana.ipynb +++ b/docs/docs/integrations/llms/banana.ipynb @@ -66,7 +66,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index 191a84df7ca9d3..92ebb9b7ac6b8e 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -92,7 +92,7 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n", "llm = NIBittensorLLM(\n", diff --git a/docs/docs/integrations/llms/cerebriumai.ipynb b/docs/docs/integrations/llms/cerebriumai.ipynb index a557a7c2a7c371..e062e4ad496cf2 100644 --- a/docs/docs/integrations/llms/cerebriumai.ipynb +++ b/docs/docs/integrations/llms/cerebriumai.ipynb @@ -101,7 +101,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/chatglm.ipynb b/docs/docs/integrations/llms/chatglm.ipynb index 12de26dacfe474..c004219061f766 100644 --- a/docs/docs/integrations/llms/chatglm.ipynb +++ b/docs/docs/integrations/llms/chatglm.ipynb @@ -53,7 +53,7 @@ "outputs": [], "source": [ "template = \"\"\"{question}\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { @@ -130,7 +130,7 @@ "outputs": [], "source": [ "template = \"\"\"{question}\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/clarifai.ipynb b/docs/docs/integrations/llms/clarifai.ipynb index 023bd3ac501dac..952263de0250d1 100644 --- a/docs/docs/integrations/llms/clarifai.ipynb +++ b/docs/docs/integrations/llms/clarifai.ipynb @@ -114,7 +114,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/cloudflare_workersai.ipynb b/docs/docs/integrations/llms/cloudflare_workersai.ipynb index a4cf57440a0c6b..030b192d093eb6 100644 --- a/docs/docs/integrations/llms/cloudflare_workersai.ipynb +++ b/docs/docs/integrations/llms/cloudflare_workersai.ipynb @@ -26,7 +26,7 @@ "\n", "AI Assistant: \"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/ctransformers.ipynb b/docs/docs/integrations/llms/ctransformers.ipynb index fa9d604cd16442..6231c0a2e1b567 100644 --- a/docs/docs/integrations/llms/ctransformers.ipynb +++ b/docs/docs/integrations/llms/ctransformers.ipynb @@ -109,7 +109,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index 33d41d4233b52c..f80d320bf22cd3 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -201,7 +201,7 @@ "template = \"\"\"{question}\n", "\n", "Let's think step by step. \"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/deepinfra.ipynb b/docs/docs/integrations/llms/deepinfra.ipynb index 14d2730a093244..871a0f4d9ded96 100644 --- a/docs/docs/integrations/llms/deepinfra.ipynb +++ b/docs/docs/integrations/llms/deepinfra.ipynb @@ -146,7 +146,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/forefrontai.ipynb b/docs/docs/integrations/llms/forefrontai.ipynb index 075436f7d8ba4a..eef4fcb8e9803b 100644 --- a/docs/docs/integrations/llms/forefrontai.ipynb +++ b/docs/docs/integrations/llms/forefrontai.ipynb @@ -97,7 +97,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/gigachat.ipynb b/docs/docs/integrations/llms/gigachat.ipynb index 20a708726883c3..8e1e4a43d07f90 100644 --- a/docs/docs/integrations/llms/gigachat.ipynb +++ b/docs/docs/integrations/llms/gigachat.ipynb @@ -80,7 +80,7 @@ "\n", "template = \"What is capital of {country}?\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/gooseai.ipynb b/docs/docs/integrations/llms/gooseai.ipynb index e417f613657c2b..b665106ebe7242 100644 --- a/docs/docs/integrations/llms/gooseai.ipynb +++ b/docs/docs/integrations/llms/gooseai.ipynb @@ -111,7 +111,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/gpt4all.ipynb b/docs/docs/integrations/llms/gpt4all.ipynb index 3fd8b51a76a689..a1593774587935 100644 --- a/docs/docs/integrations/llms/gpt4all.ipynb +++ b/docs/docs/integrations/llms/gpt4all.ipynb @@ -73,7 +73,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/gradient.ipynb b/docs/docs/integrations/llms/gradient.ipynb index 8d46fa089684cd..d1bfe21e658308 100644 --- a/docs/docs/integrations/llms/gradient.ipynb +++ b/docs/docs/integrations/llms/gradient.ipynb @@ -175,7 +175,7 @@ "\n", "Answer: \"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/huggingface_hub.ipynb b/docs/docs/integrations/llms/huggingface_hub.ipynb index b05e3b15ad6f13..67dbe3c41f3855 100644 --- a/docs/docs/integrations/llms/huggingface_hub.ipynb +++ b/docs/docs/integrations/llms/huggingface_hub.ipynb @@ -118,7 +118,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/llamacpp.ipynb b/docs/docs/integrations/llms/llamacpp.ipynb index a3a22acb7bb9db..9868fab6ae9116 100644 --- a/docs/docs/integrations/llms/llamacpp.ipynb +++ b/docs/docs/integrations/llms/llamacpp.ipynb @@ -234,7 +234,7 @@ "\n", "Answer: Let's work this out in a step by step way to be sure we have the right answer.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/manifest.ipynb b/docs/docs/integrations/llms/manifest.ipynb index a4a09fe1804387..005141cf9add6e 100644 --- a/docs/docs/integrations/llms/manifest.ipynb +++ b/docs/docs/integrations/llms/manifest.ipynb @@ -91,7 +91,7 @@ "\n", "\n", "CONCISE SUMMARY:\"\"\"\n", - "prompt = PromptTemplate(template=_prompt, input_variables=[\"text\"])\n", + "prompt = PromptTemplate.from_template(_prompt)\n", "\n", "text_splitter = CharacterTextSplitter()\n", "\n", diff --git a/docs/docs/integrations/llms/minimax.ipynb b/docs/docs/integrations/llms/minimax.ipynb index efb3a924b5db4e..b4ed78c2e173a2 100644 --- a/docs/docs/integrations/llms/minimax.ipynb +++ b/docs/docs/integrations/llms/minimax.ipynb @@ -113,7 +113,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/modal.ipynb b/docs/docs/integrations/llms/modal.ipynb index f81abf6f434312..de601cf8e60508 100644 --- a/docs/docs/integrations/llms/modal.ipynb +++ b/docs/docs/integrations/llms/modal.ipynb @@ -122,7 +122,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/mosaicml.ipynb b/docs/docs/integrations/llms/mosaicml.ipynb index c20bf8c4f2cfac..48307b409d01bc 100644 --- a/docs/docs/integrations/llms/mosaicml.ipynb +++ b/docs/docs/integrations/llms/mosaicml.ipynb @@ -55,7 +55,7 @@ "source": [ "template = \"\"\"Question: {question}\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/nlpcloud.ipynb b/docs/docs/integrations/llms/nlpcloud.ipynb index 342e8df591f894..dd93614efbdc34 100644 --- a/docs/docs/integrations/llms/nlpcloud.ipynb +++ b/docs/docs/integrations/llms/nlpcloud.ipynb @@ -90,7 +90,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/octoai.ipynb b/docs/docs/integrations/llms/octoai.ipynb index 589880f293f5c7..242588d07f8e13 100644 --- a/docs/docs/integrations/llms/octoai.ipynb +++ b/docs/docs/integrations/llms/octoai.ipynb @@ -61,7 +61,7 @@ "outputs": [], "source": [ "template = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\\n Instruction:\\n{question}\\n Response: \"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index cbaab6002f7492..8e072675373c7a 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -84,7 +84,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/openllm.ipynb b/docs/docs/integrations/llms/openllm.ipynb index 6fc14b3d469701..0bcd3a9bb14696 100644 --- a/docs/docs/integrations/llms/openllm.ipynb +++ b/docs/docs/integrations/llms/openllm.ipynb @@ -119,7 +119,7 @@ "\n", "template = \"What is a good name for a company that makes {product}?\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"product\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/openlm.ipynb b/docs/docs/integrations/llms/openlm.ipynb index 766a4419c3ba4b..5d800e130f6f11 100644 --- a/docs/docs/integrations/llms/openlm.ipynb +++ b/docs/docs/integrations/llms/openlm.ipynb @@ -97,7 +97,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "for model in [\"text-davinci-003\", \"huggingface.co/gpt2\"]:\n", " llm = OpenLM(model=model)\n", diff --git a/docs/docs/integrations/llms/petals.ipynb b/docs/docs/integrations/llms/petals.ipynb index 4d0900a53ee025..779a8d9e2bf476 100644 --- a/docs/docs/integrations/llms/petals.ipynb +++ b/docs/docs/integrations/llms/petals.ipynb @@ -133,7 +133,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/pipelineai.ipynb b/docs/docs/integrations/llms/pipelineai.ipynb index 7ed359465630f6..ed97a58e00cd88 100644 --- a/docs/docs/integrations/llms/pipelineai.ipynb +++ b/docs/docs/integrations/llms/pipelineai.ipynb @@ -107,7 +107,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/predictionguard.ipynb b/docs/docs/integrations/llms/predictionguard.ipynb index c6daa2d51851b1..1200680cd9bef1 100644 --- a/docs/docs/integrations/llms/predictionguard.ipynb +++ b/docs/docs/integrations/llms/predictionguard.ipynb @@ -118,7 +118,7 @@ "Query: {query}\n", "\n", "Result: \"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"query\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { @@ -191,7 +191,7 @@ "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n", "\n", "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", @@ -209,7 +209,7 @@ "outputs": [], "source": [ "template = \"\"\"Write a {adjective} poem about {subject}.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"adjective\", \"subject\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n", "\n", "llm_chain.predict(adjective=\"sad\", subject=\"ducks\")" diff --git a/docs/docs/integrations/llms/runhouse.ipynb b/docs/docs/integrations/llms/runhouse.ipynb index 30fb5654946f42..fe44389d52aa6b 100644 --- a/docs/docs/integrations/llms/runhouse.ipynb +++ b/docs/docs/integrations/llms/runhouse.ipynb @@ -83,7 +83,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/stochasticai.ipynb b/docs/docs/integrations/llms/stochasticai.ipynb index 1f3ecd98d0dadf..6a58aae7361508 100644 --- a/docs/docs/integrations/llms/stochasticai.ipynb +++ b/docs/docs/integrations/llms/stochasticai.ipynb @@ -96,7 +96,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/textgen.ipynb b/docs/docs/integrations/llms/textgen.ipynb index b7bf0941c0551a..1b4aed8320a16e 100644 --- a/docs/docs/integrations/llms/textgen.ipynb +++ b/docs/docs/integrations/llms/textgen.ipynb @@ -53,7 +53,7 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm = TextGen(model_url=model_url)\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n", @@ -104,7 +104,7 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm = TextGen(\n", " model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()]\n", ")\n", diff --git a/docs/docs/integrations/llms/titan_takeoff.ipynb b/docs/docs/integrations/llms/titan_takeoff.ipynb index ce2ad3179745a4..b7df1bb00167a2 100644 --- a/docs/docs/integrations/llms/titan_takeoff.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff.ipynb @@ -146,7 +146,7 @@ "\n", "template = \"What is the capital of {country}\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(llm=llm, prompt=prompt)\n", "\n", diff --git a/docs/docs/integrations/llms/tongyi.ipynb b/docs/docs/integrations/llms/tongyi.ipynb index 1e3da253910549..7b57e4c462d32a 100644 --- a/docs/docs/integrations/llms/tongyi.ipynb +++ b/docs/docs/integrations/llms/tongyi.ipynb @@ -95,7 +95,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/vllm.ipynb b/docs/docs/integrations/llms/vllm.ipynb index 6abf9a1376bc46..4d88a2714fa9ba 100644 --- a/docs/docs/integrations/llms/vllm.ipynb +++ b/docs/docs/integrations/llms/vllm.ipynb @@ -135,7 +135,7 @@ "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/writer.ipynb b/docs/docs/integrations/llms/writer.ipynb index fed4af1669065d..5c2206d1f56b66 100644 --- a/docs/docs/integrations/llms/writer.ipynb +++ b/docs/docs/integrations/llms/writer.ipynb @@ -72,7 +72,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/xinference.ipynb b/docs/docs/integrations/llms/xinference.ipynb index a290744670576f..5643750a4b99c7 100644 --- a/docs/docs/integrations/llms/xinference.ipynb +++ b/docs/docs/integrations/llms/xinference.ipynb @@ -126,7 +126,7 @@ "\n", "template = \"Where can we visit in the capital of {country}?\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/yandex.ipynb b/docs/docs/integrations/llms/yandex.ipynb index 0dfa53bd61cca7..42093fec89660f 100644 --- a/docs/docs/integrations/llms/yandex.ipynb +++ b/docs/docs/integrations/llms/yandex.ipynb @@ -56,7 +56,7 @@ "outputs": [], "source": [ "template = \"What is the capital of {country}?\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/providers/predictionguard.mdx b/docs/docs/integrations/providers/predictionguard.mdx index 72d36bc0193e90..fdb0f0a397cc21 100644 --- a/docs/docs/integrations/providers/predictionguard.mdx +++ b/docs/docs/integrations/providers/predictionguard.mdx @@ -55,7 +55,7 @@ Head to stories to get ALL the deets on each box! 👆 BONUS: Save 50% on your f Query: {query} Result: """ -prompt = PromptTemplate(template=template, input_variables=["query"]) +prompt = PromptTemplate.from_template(template) # With "guarding" or controlling the output of the LLM. See the # Prediction Guard docs (https://docs.predictionguard.com) to learn how to @@ -93,7 +93,7 @@ pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct") template = """Question: {question} Answer: Let's think step by step.""" -prompt = PromptTemplate(template=template, input_variables=["question"]) +prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True) question = "What NFL team won the Super Bowl in the year Justin Beiber was born?" diff --git a/docs/docs/integrations/providers/ray_serve.ipynb b/docs/docs/integrations/providers/ray_serve.ipynb index 144fb5723346de..b48e76710d09b5 100644 --- a/docs/docs/integrations/providers/ray_serve.ipynb +++ b/docs/docs/integrations/providers/ray_serve.ipynb @@ -135,7 +135,7 @@ " # We initialize the LLM, template and the chain here\n", " llm = OpenAI(openai_api_key=OPENAI_API_KEY)\n", " template = \"Question: {question}\\n\\nAnswer: Let's think step by step.\"\n", - " prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + " prompt = PromptTemplate.from_template(template)\n", " self.chain = LLMChain(llm=llm, prompt=prompt)\n", "\n", " def _run_chain(self, text: str):\n", diff --git a/docs/docs/integrations/providers/shaleprotocol.md b/docs/docs/integrations/providers/shaleprotocol.md index 2aced9cb6383a4..dbdd3caa6cf236 100644 --- a/docs/docs/integrations/providers/shaleprotocol.md +++ b/docs/docs/integrations/providers/shaleprotocol.md @@ -33,7 +33,7 @@ template = """Question: {question} # Answer: Let's think step by step.""" -prompt = PromptTemplate(template=template, input_variables=["question"]) +prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=llm) diff --git a/docs/docs/integrations/text_embedding/clarifai.ipynb b/docs/docs/integrations/text_embedding/clarifai.ipynb index ab006c48b353ce..f10a9a463a671c 100644 --- a/docs/docs/integrations/text_embedding/clarifai.ipynb +++ b/docs/docs/integrations/text_embedding/clarifai.ipynb @@ -101,7 +101,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/modules/model_io/prompts/partial.ipynb b/docs/docs/modules/model_io/prompts/partial.ipynb index 4c937ba7286770..274cc6fb720455 100644 --- a/docs/docs/modules/model_io/prompts/partial.ipynb +++ b/docs/docs/modules/model_io/prompts/partial.ipynb @@ -37,7 +37,7 @@ "source": [ "from langchain.prompts import PromptTemplate\n", "\n", - "prompt = PromptTemplate(template=\"{foo}{bar}\", input_variables=[\"foo\", \"bar\"])\n", + "prompt = PromptTemplate.from_template(\"{foo}{bar}\")\n", "partial_prompt = prompt.partial(foo=\"foo\")\n", "print(partial_prompt.format(bar=\"baz\"))" ]