Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/component_code_gen/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ cp .env.example .env
- OPENAI_API_KEY=azure-api-key
- OPENAI_MODEL=gpt-4-32k

4. Create a file named `instructions.md` with the same structure as the `instructions.md.exaple` file:
5. Create a file named `instructions.md` with the same structure as the `instructions.md.example` file:

```
## Prompt
Expand Down
44 changes: 18 additions & 26 deletions packages/component_code_gen/code_gen/generate_component_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,19 @@


def generate_code(app, prompt, templates, tries):
validate_inputs(app, prompt, templates, tries)
db = supabase_helpers.SupabaseConnector()
docs_meta = db.get_app_docs_meta(app)
results = []

auth_example = None
auth_meta = db.get_app_auth_meta(app)
if auth_meta.get('component_code_scaffold_raw'):
auth_example = f"Here's how authentication is done in {app}:\n\n{auth_meta['component_code_scaffold_raw']}\n\n"

for i in range(tries):
logger.debug(f'Attempt {i+1} of {tries}')
validate_inputs(app, prompt, templates, tries)

db = supabase_helpers.SupabaseConnector()

auth_meta = db.get_app_auth_meta(app)
# TODO: is this needed only for actions?
# add_code_example(templates, auth_meta['component_code_scaffold_raw'])

docs_meta = db.get_app_docs_meta(app)
# Initialize a flag to track if we obtained any results with docs
has_docs_result = False

Expand All @@ -31,20 +31,20 @@ def generate_code(app, prompt, templates, tries):
if contents:
docs = {row['url']: row['content'] for row in contents}
results.append(call_langchain(
app, prompt, templates, docs, 'api reference'))
app, prompt, templates, auth_example, docs, 'api reference'))
has_docs_result = True

if 'openapi_url' in docs_meta:
contents = db.get_openapi_contents(app)
if contents:
docs = {row['path']: row['content'] for row in contents}
results.append(call_langchain(
app, prompt, templates, docs, 'openapi'))
app, prompt, templates, auth_example, docs, 'openapi'))
has_docs_result = True

# If we haven't obtained any results using docs
if not has_docs_result:
results.append(call_langchain(app, prompt, templates))
results.append(call_langchain(app, prompt, templates, auth_example))

# Create a new prompt string
new_prompt = "I've asked other GPT agents to generate the following code based on the prompt and the instructions below. One set of code (or all) may not follow the rules laid out in the prompt or in the instructions below, so you'll need to review it for accuracy. Try to evaluate the examples according to the rules, combine the best parts of each example, and generate a final set of code that solves the problem posed by the prompt and follows all of the rules below. Here are the attempts + code:\n\n---\n\n"
Expand All @@ -54,42 +54,34 @@ def generate_code(app, prompt, templates, tries):

# Call the model again with the new prompt to get the final result
logger.debug(f"Calling the model a final time to summarize the attempts")
return call_langchain(app, new_prompt, templates)
return call_langchain(app, new_prompt, templates, auth_example)


def call_langchain(app, prompt, templates, docs=None, docs_type=None, attempts=0, max_attempts=3):
def call_langchain(app, prompt, templates, auth_example, docs=None, docs_type=None, attempts=0, max_attempts=3):
logger.debug(f"Calling langchain")
# If we don't have docs, or if we can't reach OpenAI to get the parsed docs
if not docs:
logger.debug('No docs available, calling the model directly')
return langchain_helpers.no_docs(app, prompt, templates)
return langchain_helpers.no_docs(app, prompt, templates, auth_example)

if attempts >= max_attempts:
logger.debug('Max attempts reached, calling the model directly')
return langchain_helpers.no_docs(app, prompt, templates)
return langchain_helpers.no_docs(app, prompt, templates, auth_example)

# else if we have docs, call the model with docs
logger.debug(f"Using {docs_type} docs")

result = langchain_helpers.ask_agent(prompt, docs, templates)
result = langchain_helpers.ask_agent(prompt, docs, templates, auth_example)

if result != "I don't know":
return result

logger.debug("Trying again without docs")
return call_langchain(app, prompt, templates, attempts=attempts+1)


def add_code_example(templates, example):
return templates.no_docs_system_instructions % example
return call_langchain(app, prompt, templates, auth_example, attempts=attempts+1)


def validate_inputs(app, prompt, templates, tries):
assert app and type(app) == str
assert prompt and type(prompt) == str
assert tries and type(tries) == int
assert templates.no_docs_user_prompt
assert templates.no_docs_system_instructions
assert templates.with_docs_system_instructions
assert templates.suffix
assert templates.format_instructions
assert templates.system_instructions
34 changes: 12 additions & 22 deletions packages/component_code_gen/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,37 +3,28 @@
load_dotenv()


DEFAULTS = {
"OPENAI_API_TYPE": "openai",
"OPENAI_MODEL": "gpt-4",
"OPENAI_API_VERSION": "2023-05-15",
"LOGGING_LEVEL": "WARN",
"ENABLE_DOCS": False
}


def get_env_var(var_name, required=False):
def get_env_var(var_name, required=False, default=None):
if os.environ.get(var_name):
return os.environ.get(var_name)
if required and var_name not in DEFAULTS:
if default is not None:
return default
if required:
raise Exception(f"Environment variable {var_name} is required")
if var_name in DEFAULTS:
return DEFAULTS[var_name]


config = {
"temperature": get_env_var("OPENAI_TEMPERATURE") or 0.5,
"openai_api_type": get_env_var("OPENAI_API_TYPE"),
"temperature": get_env_var("OPENAI_TEMPERATURE", default=0.5),
"openai_api_type": get_env_var("OPENAI_API_TYPE", default="azure"),
"openai": {
"api_key": get_env_var("OPENAI_API_KEY", required=True),
"model": get_env_var("OPENAI_MODEL"),
"model": get_env_var("OPENAI_MODEL", default="gpt-4"),
},
"azure": {
"deployment_name": get_env_var("OPENAI_DEPLOYMENT_NAME"),
"api_version": get_env_var("OPENAI_API_VERSION"),
"api_base": get_env_var("OPENAI_API_BASE"),
"deployment_name": get_env_var("OPENAI_DEPLOYMENT_NAME", required=True),
"api_version": get_env_var("OPENAI_API_VERSION", default="2023-05-15"),
"api_base": get_env_var("OPENAI_API_BASE", required=True),
"api_key": get_env_var("OPENAI_API_KEY", required=True),
"model": get_env_var("OPENAI_MODEL"),
"model": get_env_var("OPENAI_MODEL", default="gpt-4-32k"),
},
"browserless": {
"api_key": get_env_var("BROWSERLESS_API_KEY"),
Expand All @@ -43,7 +34,6 @@ def get_env_var(var_name, required=False):
"api_key": get_env_var("SUPABASE_API_KEY", required=True),
},
"logging": {
"level": get_env_var("LOGGING_LEVEL"),
"level": get_env_var("LOGGING_LEVEL", default="WARN"),
},
"enable_docs": get_env_var("ENABLE_DOCS") or False,
}
38 changes: 24 additions & 14 deletions packages/component_code_gen/helpers/langchain_helpers.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
from templates.common.suffix import suffix
from templates.common.format_instructions import format_instructions
from templates.common.docs_system_instructions import docs_system_instructions
from langchain.schema import (
# AIMessage,
HumanMessage,
Expand All @@ -24,19 +27,25 @@ def create_tools(docs):


class PipedreamOpenAPIAgent:
def __init__(self, docs, templates):
def __init__(self, docs, templates, auth_example):
system_instructions = format_template(
f"{templates.system_instructions(auth_example)}\n{docs_system_instructions}")

tools = OpenAPIExplorerTool.create_tools(docs)
tool_names = [tool.name for tool in tools]

prompt_template = ZeroShotAgent.create_prompt(
tools=tools,
prefix=format_template(templates.with_docs_system_instructions),
suffix=templates.suffix,
format_instructions=templates.format_instructions,
prefix=system_instructions,
suffix=suffix,
format_instructions=format_instructions,
input_variables=['input', 'agent_scratchpad']
)

llm_chain = LLMChain(llm=get_llm(), prompt=prompt_template)
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
verbose = True if config['logging']['level'] == 'DEBUG' else False

self.agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=verbose)

Expand Down Expand Up @@ -66,26 +75,27 @@ def format_result(result):
def get_llm():
if config['openai_api_type'] == "azure":
azure_config = config["azure"]
llm = AzureChatOpenAI(deployment_name=azure_config['deployment_name'],
model_name=azure_config["model"], temperature=config["temperature"], request_timeout=300)
return AzureChatOpenAI(deployment_name=azure_config['deployment_name'],
model_name=azure_config["model"], temperature=config["temperature"], request_timeout=300)
else:
openai_config = config["openai"]
llm = ChatOpenAI(
return ChatOpenAI(
model_name=openai_config["model"], temperature=config["temperature"], request_timeout=300)
return llm


def ask_agent(user_prompt, docs, templates):
agent = PipedreamOpenAPIAgent(docs, templates)
def ask_agent(user_prompt, docs, templates, auth_example):
agent = PipedreamOpenAPIAgent(docs, templates, auth_example)
result = agent.run(user_prompt)
return result


def no_docs(app, prompt, templates):
def no_docs(app, prompt, templates, auth_example):
user_prompt = f"{prompt}.The app is {app}."
system_instructions = format_template(templates.system_instructions(auth_example))

result = get_llm()(messages=[
SystemMessage(content=format_template(
templates.no_docs_system_instructions)),
HumanMessage(content=templates.no_docs_user_prompt % (prompt, app)),
SystemMessage(content=system_instructions),
HumanMessage(content=user_prompt),
])

return format_result(result.content)
32 changes: 32 additions & 0 deletions packages/component_code_gen/templates/actions/additional_rules.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
additional_rules = """## Additional rules for actions

1. `return` the final value from the step. The data returned from steps must be JSON-serializable. The returned data is displayed in Pipedream. Think about it: if you don't return the data, the user won't see it.

2. Always use this signature for the run method:

async run({steps, $}) {
// your code here
}

Always pass {steps, $}, even if you don't use them in the code. Think about it: the user needs to access the steps and $ context when they edit the code.

9. Remember that `@pipedream/platform` axios returns a Promise that resolves to the HTTP response data. There is NO `data` property in the response that contains the data. The data from the HTTP response is returned directly in the response, not in the `data` property. Think about it: if you try to extract a data property that doesn't exist, the variable will hold the value `undefined`. You must return the data from the response directly and extract the proper data in the format provided by the API docs.

For example, do this:

const response = await axios(this, {
url: `https://api.stability.ai/v1/engines/list`,
headers: {
Authorization: `Bearer ${this.dreamstudio.$auth.api_key}`,
},
});
// process the response data. response.data is undefined

not this:

const { data } = await axios(this, {
url: `https://api.stability.ai/v1/engines/list`,
headers: {
Authorization: `Bearer ${this.dreamstudio.$auth.api_key}`,
},
});"""
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export_summary = """A short summary should be exported before the end so that the user can quickly read what has happened. This is done by calling `$.export("$summary", "Your summary here")`. This is not optional."""
12 changes: 12 additions & 0 deletions packages/component_code_gen/templates/actions/introduction.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Experimenting with prompt from an AI researcher: https://twitter.com/jeremyphoward/status/1689464589191454720
introduction = """## Instructions

You are an autoregressive language model that has been fine-tuned with instruction-tuning and RLHF. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning.

Your goal is to create Pipedream Action Components. Your code should solve the requirements provided below.

Other GPT agents will be reviewing your work, and will provide feedback on your code. You will be rewarded for code that is accurate, factual, thoughtful, nuanced, and solves the requirements provided in the instructions.

## Pipedream components

All Pipedream components are Node.js modules that have a default export: `defineComponent`. `defineComponent` is provided to the environment as a global — you do not need to import `defineComponent`. `defineComponent` is a function that takes an object — a Pipedream component — as its single argument."""
28 changes: 28 additions & 0 deletions packages/component_code_gen/templates/actions/main_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
main_example = """Here's an example component:

```javascript
import { axios } from "@pipedream/platform"
export default defineComponent({
key: "openai-list-models",
name: "List Models",
description: "Lists all models available to the user.",
version: "0.0.{{ts}}",
type: "action",
props: {
openai: {
type: "app",
app: "openai",
}
},
async run({steps, $}) {
const response = await axios($, {
url: `https://api.openai.com/v1/models`,
headers: {
Authorization: `Bearer ${this.openai.$auth.api_key}`,
},
})
$.export("$summary", "Successfully listed models")
return response
},
})
```"""
69 changes: 69 additions & 0 deletions packages/component_code_gen/templates/actions/other_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
other_example = """Here's an example Pipedream component that makes a test request against the Slack API:

export default defineComponent({
key: "slack-send-message",
name: "Send Message",
version: "0.0.{{ts}}",
description: "Sends a message to a channel. [See docs here]()",
type: "action",
props: {
slack: {
type: "app",
app: "slack",
},
channel: {
type: "string",
label: "Channel",
description: "The channel to post the message to",
},
text: {
type: "string",
label: "Message Text",
description: "The text of the message to post",
},
},
async run({ steps, $ }) {
const response = await axios($, {
method: "POST",
url: `https://slack.com/api/chat.postMessage`,
headers: {
Authorization: `Bearer ${this.slack.$auth.oauth_access_token}`,
},
data: {
channel: this.channel,
text: this.text,
},
})
$.export("$summary", "Sent message successfully")
return response
},
});

Notice this section:

data: {
channel: this.channel,
text: this.text,
},

This shows you how to pass the values of props (e.g. this.channel and this.text) as params to the API. This is one of the most important things to know: you MUST generate code that adds inputs as props so that users can enter their own values when making the API request. You MUST NOT pass static values. See rule #2 below for more detail.

The code you generate should be placed within the `run` method of the Pipedream component:

import { axios } from "@pipedream/platform";

export default defineComponent({
props: {
the_app_name_slug: {
type: "app",
app: "the_app_name_slug",
},
},
async run({ steps, $ }) {
const response = await axios($, {
// Add the axios configuration object to make the HTTP request here
})
$.export("$summary", "Your summary here")
return response
},
});"""
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
additional_rules = """"""
Loading