diff --git a/webui/app.py b/webui/app.py index 20116fc0..ebb0f909 100644 --- a/webui/app.py +++ b/webui/app.py @@ -19,6 +19,8 @@ from graphgen.graphgen import GraphGen from graphgen.models import OpenAIModel, Tokenizer, TraverseStrategy from graphgen.models.llm.limitter import RPM, TPM +from graphgen.utils import set_logger + css = """ .center-row { @@ -30,8 +32,9 @@ def init_graph_gen(config: dict, env: dict) -> GraphGen: # Set up working directory - working_dir = setup_workspace(os.path.join(root_dir, "cache")) + log_file, working_dir = setup_workspace(os.path.join(root_dir, "cache")) + set_logger(log_file, if_stream=False) graph_gen = GraphGen( working_dir=working_dir ) @@ -86,7 +89,7 @@ def sum_tokens(client): "tokenizer": arguments[2], "qa_form": arguments[3], "web_search": False, - "quiz_samples": 2, + "quiz_samples": arguments[19], "traverse_strategy": { "bidirectional": arguments[4], "expand_method": arguments[5], @@ -159,7 +162,7 @@ def sum_tokens(client): if config['if_trainee_model']: # Generate quiz - graph_gen.quiz(max_samples=quiz_samples) + graph_gen.quiz(max_samples=config['quiz_samples']) # Judge statements graph_gen.judge() @@ -472,7 +475,7 @@ def sum_tokens(client): bidirectional, expand_method, max_extra_edges, max_tokens, max_depth, edge_sampling, isolated_node_strategy, loss_strategy, base_url, synthesizer_model, trainee_model, - api_key, chunk_size, rpm, tpm, token_counter + api_key, chunk_size, rpm, tpm, quiz_samples, token_counter ], outputs=[output, token_counter], ) diff --git a/webui/cache_utils.py b/webui/cache_utils.py index 43ec9b9d..96c7d4d9 100644 --- a/webui/cache_utils.py +++ b/webui/cache_utils.py @@ -9,7 +9,11 @@ def setup_workspace(folder): working_dir = os.path.join(folder, request_id) os.makedirs(working_dir, exist_ok=True) - return working_dir + log_dir = os.path.join(folder, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"{request_id}.log") + + return log_file, working_dir def cleanup_workspace(folder):