Skip to content

Commit a19eb1a

Browse files
Rename gen code to query llm (#480)
The main reason for this is that the `generate_code` does not really generate any code but rather it queries a given LLM using a specified prompt. Since we now have prompts of various sort, I feel it might be a bit misplaced the name. This could also make it a bit more clear which API to use if you're working on using LLMs for tasks other than explicit code generation. This came up while doing #479 where one consideration was to have the LLM generate a corpus explicit without going a seed-corpus-by-way-of-python generation. Ref: #482 --------- Signed-off-by: David Korczynski <david@adalogics.com>
1 parent eb42759 commit a19eb1a

File tree

5 files changed

+23
-25
lines changed

5 files changed

+23
-25
lines changed

experimental/manual/prompter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,4 +72,4 @@ def construct_prompt() -> prompts.Prompt:
7272
model = setup_model()
7373
prompt = construct_prompt()
7474
os.makedirs(args.response_dir, exist_ok=True)
75-
model.generate_code(prompt, response_dir=args.response_dir)
75+
model.query_llm(prompt, response_dir=args.response_dir)

llm_toolkit/code_fixer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ def apply_llm_fix(ai_binary: str,
424424
error_desc, errors, context, instruction)
425425
prompt.save(prompt_path)
426426

427-
fixer_model.generate_code(prompt, response_dir)
427+
fixer_model.query_llm(prompt, response_dir)
428428

429429

430430
def _collect_context(benchmark: benchmarklib.Benchmark,

llm_toolkit/crash_triager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,4 +107,4 @@ def apply_llm_triage(
107107
crash_func)
108108
prompt.save(prompt_path)
109109

110-
triage_model.generate_code(prompt, response_dir)
110+
triage_model.query_llm(prompt, response_dir)

llm_toolkit/models.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -125,11 +125,11 @@ def estimate_token_num(self, text) -> int:
125125

126126
# ============================== Generation ============================== #
127127
@abstractmethod
128-
def generate_code(self,
129-
prompt: prompts.Prompt,
130-
response_dir: str,
131-
log_output: bool = False) -> None:
132-
"""Generates fuzz targets to the |response_dir|."""
128+
def query_llm(self,
129+
prompt: prompts.Prompt,
130+
response_dir: str,
131+
log_output: bool = False) -> None:
132+
"""Queries the LLM and stores responses in |response_dir|."""
133133

134134
@abstractmethod
135135
def prompt_type(self) -> type[prompts.Prompt]:
@@ -222,11 +222,11 @@ def prompt_type(self) -> type[prompts.Prompt]:
222222
return prompts.OpenAIPrompt
223223

224224
# ============================== Generation ============================== #
225-
def generate_code(self,
226-
prompt: prompts.Prompt,
227-
response_dir: str,
228-
log_output: bool = False) -> None:
229-
"""Generates code with OpenAI's API."""
225+
def query_llm(self,
226+
prompt: prompts.Prompt,
227+
response_dir: str,
228+
log_output: bool = False) -> None:
229+
"""Queries OpenAI's API and stores response in |response_dir|."""
230230
if self.ai_binary:
231231
print(f'OpenAI does not use local AI binary: {self.ai_binary}')
232232
if self.temperature_list:
@@ -273,11 +273,11 @@ def estimate_token_num(self, text) -> int:
273273
return int(len(re.split('[^a-zA-Z0-9]+', text)) * 1.5 + 0.5)
274274

275275
# ============================== Generation ============================== #
276-
def generate_code(self,
277-
prompt: prompts.Prompt,
278-
response_dir: str,
279-
log_output: bool = False) -> None:
280-
"""Generates code with internal LLM."""
276+
def query_llm(self,
277+
prompt: prompts.Prompt,
278+
response_dir: str,
279+
log_output: bool = False) -> None:
280+
"""Queries a Google LLM and stores results in |response_dir|."""
281281
if not self.ai_binary:
282282
print(f'Error: This model requires a local AI binary: {self.ai_binary}')
283283
sys.exit(1)
@@ -349,10 +349,10 @@ def _prepare_parameters(self) -> list[dict]:
349349
self._max_output_tokens
350350
} for index in range(self.num_samples)]
351351

352-
def generate_code(self,
353-
prompt: prompts.Prompt,
354-
response_dir: str,
355-
log_output: bool = False) -> None:
352+
def query_llm(self,
353+
prompt: prompts.Prompt,
354+
response_dir: str,
355+
log_output: bool = False) -> None:
356356
del log_output
357357
if self.ai_binary:
358358
print(f'VertexAI does not use local AI binary: {self.ai_binary}')

run_one_experiment.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,7 @@ def generate_targets(benchmark: Benchmark,
8787
"""Generates fuzz target with LLM."""
8888
print(f'Generating targets for {benchmark.project} '
8989
f'{benchmark.function_signature} using {model.name}..')
90-
model.generate_code(prompt,
91-
response_dir=work_dirs.raw_targets,
92-
log_output=debug)
90+
model.query_llm(prompt, response_dir=work_dirs.raw_targets, log_output=debug)
9391

9492
_, target_ext = os.path.splitext(benchmark.target_path)
9593
generated_targets = []

0 commit comments

Comments
 (0)