11"""Callback classes for logging, saving, and tracking optimization progress."""
22
33import os
4- import time
4+ from datetime import datetime
55from typing import Literal
66
77import numpy as np
@@ -64,7 +64,8 @@ def __init__(self, logger):
6464 def on_step_end (self , optimizer ):
6565 """Log information about the current step."""
6666 self .step += 1
67- self .logger .critical (f"✨Step { self .step } ended✨" )
67+ time = datetime .now ().strftime ("%d-%m-%y %H:%M:%S:%f" )
68+ self .logger .critical (f"{ time } - ✨Step { self .step } ended✨" )
6869 for i , (prompt , score ) in enumerate (zip (optimizer .prompts , optimizer .scores )):
6970 self .logger .critical (f"*** Prompt { i } : Score: { score } " )
7071 self .logger .critical (f"{ prompt } " )
@@ -78,10 +79,11 @@ def on_train_end(self, optimizer, logs=None):
7879 optimizer: The optimizer object that called the callback.
7980 logs: Additional information to log.
8081 """
82+ time = datetime .now ().strftime ("%d-%m-%y %H:%M:%S:%f" )
8183 if logs is None :
82- self .logger .critical (" Training ended" )
84+ self .logger .critical (f" { time } - Training ended" )
8385 else :
84- self .logger .critical (f"Training ended - { logs } " )
86+ self .logger .critical (f"{ time } - Training ended - { logs } " )
8587
8688 return True
8789
@@ -109,8 +111,8 @@ def __init__(self, dir):
109111 self .step = 0
110112 self .input_tokens = 0
111113 self .output_tokens = 0
112- self .start_time = time . time ()
113- self .step_time = time . time ()
114+ self .start_time = datetime . now ()
115+ self .step_time = datetime . now ()
114116
115117 def on_step_end (self , optimizer ):
116118 """Save prompts and scores to csv.
@@ -124,12 +126,12 @@ def on_step_end(self, optimizer):
124126 "step" : [self .step ] * len (optimizer .prompts ),
125127 "input_tokens" : [optimizer .meta_llm .input_token_count - self .input_tokens ] * len (optimizer .prompts ),
126128 "output_tokens" : [optimizer .meta_llm .output_token_count - self .output_tokens ] * len (optimizer .prompts ),
127- "time_elapsed" : [time . time () - self .step_time ] * len (optimizer .prompts ),
129+ "time_elapsed" : [( datetime . now () - self .step_time ). total_seconds () ] * len (optimizer .prompts ),
128130 "score" : optimizer .scores ,
129131 "prompt" : optimizer .prompts ,
130132 }
131133 )
132- self .step_time = time . time ()
134+ self .step_time = datetime . now ()
133135 self .input_tokens = optimizer .meta_llm .input_token_count
134136 self .output_tokens = optimizer .meta_llm .output_token_count
135137
@@ -151,7 +153,8 @@ def on_train_end(self, optimizer):
151153 steps = self .step ,
152154 input_tokens = optimizer .meta_llm .input_token_count ,
153155 output_tokens = optimizer .meta_llm .output_token_count ,
154- time_elapsed = time .time () - self .start_time ,
156+ time_elapsed = (datetime .now () - self .start_time ).total_seconds (),
157+ time = datetime .now (),
155158 score = np .array (optimizer .scores ).mean (),
156159 best_prompts = str (optimizer .prompts ),
157160 ),
0 commit comments