From 71d8b1dd616fd6437adf2abfcc970fa526057849 Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Mon, 24 Nov 2025 15:44:24 +0000 Subject: [PATCH 1/8] add job price estimation --- src/together/lib/cli/api/fine_tuning.py | 53 +++++++++-- src/together/lib/resources/fine_tuning.py | 46 ++++++++++ src/together/lib/types/fine_tuning.py | 33 +++++++ src/together/resources/fine_tuning.py | 107 +++++++++++++++++++++- 4 files changed, 229 insertions(+), 10 deletions(-) diff --git a/src/together/lib/cli/api/fine_tuning.py b/src/together/lib/cli/api/fine_tuning.py index c116d352..bc8cfba3 100644 --- a/src/together/lib/cli/api/fine_tuning.py +++ b/src/together/lib/cli/api/fine_tuning.py @@ -31,6 +31,15 @@ "Do you want to proceed?" ) +_CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS = ( + "The estimated price of the fine-tuning job is {} which is significantly " + "greater than your current credit limit and balance. " + "It will likely fail due to insufficient funds. " + "Please consider increasing your credit limit at https://api.together.xyz/settings/profile\n" + "You can pass `-y` or `--confirm` to your command to skip this message.\n\n" + "Do you want to proceed?" +) + _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$" @@ -324,16 +333,44 @@ def create( raise click.BadParameter("You have specified a number of evaluation loops but no validation file.") if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True): - response = client.fine_tuning.create( - **training_args, - verbose=True, + finetune_price_estimation_result = client.fine_tuning.estimate_price( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type="lora" if lora else "full", + training_method=training_method, ) - report_string = f"Successfully submitted a fine-tuning job {response.id}" - # created_at reports UTC time, we use .astimezone() to convert to local time - formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S") - report_string += f" at {formatted_time}" - rprint(report_string) + proceed = \ + confirm or \ + finetune_price_estimation_result.allowed_to_proceed or \ + ( + not finetune_price_estimation_result.allowed_to_proceed and \ + click.confirm( + click.style( + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( + finetune_price_estimation_result.estimated_total_price + ), + fg="red", + ), + default=True, + show_default=True, + ) + ) + + if proceed: + response = client.fine_tuning.create( + **training_args, + verbose=True, + ) + + report_string = f"Successfully submitted a fine-tuning job {response.id}" + # created_at reports UTC time, we use .astimezone() to convert to local time + formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S") + report_string += f" at {formatted_time}" + rprint(report_string) else: click.echo("No confirmation received, stopping job launch") diff --git a/src/together/lib/resources/fine_tuning.py b/src/together/lib/resources/fine_tuning.py index f4779191..5fc18697 100644 --- a/src/together/lib/resources/fine_tuning.py +++ b/src/together/lib/resources/fine_tuning.py @@ -21,6 +21,7 @@ CosineLRSchedulerArgs, LinearLRSchedulerArgs, FinetuneTrainingLimits, + FinetunePriceEstimationRequest, ) AVAILABLE_TRAINING_METHODS = { @@ -236,6 +237,51 @@ def create_finetune_request( return finetune_request +def create_finetune_price_estimation_request( + training_file: str, + validation_file: str | None = None, + model: str | None = None, + n_epochs: int = 1, + n_evals: int | None = 0, + training_type: str | None = "lora", + training_method: str | None = "sft", +) -> FinetunePriceEstimationRequest: + """ + Create a fine-tune price estimation request + """ + + if training_method == "sft": + training_method_cls = TrainingMethodSFT(train_on_inputs="auto") + elif training_method == "dpo": + training_method_cls = TrainingMethodDPO( + dpo_beta=None, + dpo_normalize_logratios_by_length=False, + dpo_reference_free=False, + rpo_alpha=None, + simpo_gamma=None, + ) + else: + raise ValueError(f"Invalid training method: {training_method}. Must be 'sft' or 'dpo'") + + if training_type == "full": + training_type_cls = FullTrainingType(type="Full") + elif training_type == "lora": + # lora parameters do not matter for price estimation + training_type_cls = LoRATrainingType(type="Lora", lora_r=10, lora_alpha=10) + else: + raise ValueError(f"Invalid training type: {training_type}. Must be 'full' or 'lora'") + + return FinetunePriceEstimationRequest( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type=training_type_cls, + training_method=training_method_cls, + ) + + def get_model_limits(client: Together, model: str) -> FinetuneTrainingLimits: """ Requests training limits for a specific model diff --git a/src/together/lib/types/fine_tuning.py b/src/together/lib/types/fine_tuning.py index 55327e5a..4173009e 100644 --- a/src/together/lib/types/fine_tuning.py +++ b/src/together/lib/types/fine_tuning.py @@ -395,3 +395,36 @@ class FinetuneRequest(BaseModel): # hf related fields hf_api_token: Union[str, None] = None hf_output_repo_name: Union[str, None] = None + + +class FinetunePriceEstimationRequest(BaseModel): + """ + Fine-tune price estimation request type + """ + + # training file ID + training_file: str + # validation file id + validation_file: Union[str, None] = None + # base model string + model: Union[str, None] = None + # number of epochs to train for + n_epochs: int + # number of evaluation loops to run + n_evals: Union[int, None] = None + # training type + training_type: Union[TrainingType, None] = None + # training method + training_method: Union[TrainingMethodSFT, TrainingMethodDPO] = Field(default_factory=TrainingMethodSFT) + + +class FinetunePriceEstimationResponse(BaseModel): + """ + Fine-tune price estimation request type + """ + + allowed_to_proceed: bool + estimated_train_token_count: int + estimated_eval_token_count: int + user_limit: float + estimated_total_price: float diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py index 72ce96dc..eb80a891 100644 --- a/src/together/resources/fine_tuning.py +++ b/src/together/resources/fine_tuning.py @@ -28,14 +28,21 @@ ) from .._base_client import make_request_options from ..types.fine_tune import FineTune -from ..lib.types.fine_tuning import FinetuneResponse, FinetuneTrainingLimits -from ..lib.resources.fine_tuning import get_model_limits, async_get_model_limits, create_finetune_request +from ..lib.types.fine_tuning import FinetuneResponse, FinetuneTrainingLimits, FinetunePriceEstimationResponse +from ..lib.resources.fine_tuning import get_model_limits, async_get_model_limits, create_finetune_request, create_finetune_price_estimation_request from ..types.fine_tuning_list_response import FineTuningListResponse from ..types.fine_tuning_cancel_response import FineTuningCancelResponse from ..types.fine_tuning_delete_response import FineTuningDeleteResponse from ..types.fine_tuning_list_events_response import FineTuningListEventsResponse from ..types.fine_tuning_list_checkpoints_response import FineTuningListCheckpointsResponse +_CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS = ( + "The estimated price of the fine-tuning job is {} which is significantly " + "greater than your current credit limit and balance. " + "It will likely fail due to insufficient funds. " + "Please proceed at your own risk." +) + __all__ = ["FineTuningResource", "AsyncFineTuningResource"] @@ -217,12 +224,28 @@ def create( hf_api_token=hf_api_token, hf_output_repo_name=hf_output_repo_name, ) + + price_estimation_result = self.estimate_price( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type="lora" if lora else "full", + training_method=training_method, + ) if verbose: rprint( "Submitting a fine-tuning job with the following parameters:", finetune_request, ) + if not price_estimation_result.allowed_to_proceed: + rprint( + "[red]" + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( + price_estimation_result.estimated_total_price + ) + "[/red]", + ) parameter_payload = finetune_request.model_dump(exclude_none=True) return self._client.post( @@ -231,6 +254,38 @@ def create( cast_to=FinetuneResponse, ) + def estimate_price( + self, + *, + training_file: str, + validation_file: str | None = None, + model: str | None = None, + n_epochs: int = 1, + n_evals: int | None = 0, + training_type: str | None = "lora", + training_method: str | None = "sft", + ) -> FinetunePriceEstimationResponse: + """ + Estimate the price of a fine-tuning job + """ + + + finetune_price_estimation_request = create_finetune_price_estimation_request( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type=training_type, + training_method=training_method, + ) + parameter_payload = finetune_price_estimation_request.model_dump(exclude_none=True) + return self._client.post( + "/fine-tunes/estimate-price", + body=parameter_payload, + cast_to=FinetunePriceEstimationResponse, + ) + def retrieve( self, id: str, @@ -659,11 +714,27 @@ async def create( hf_output_repo_name=hf_output_repo_name, ) + price_estimation_result = await self.estimate_price( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type="lora" if lora else "full", + training_method=training_method, + ) + if verbose: rprint( "Submitting a fine-tuning job with the following parameters:", finetune_request, ) + if not price_estimation_result.allowed_to_proceed: + rprint( + "[red]" + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( + price_estimation_result.estimated_total_price + ) + "[/red]", + ) parameter_payload = finetune_request.model_dump(exclude_none=True) return await self._client.post( @@ -672,6 +743,38 @@ async def create( cast_to=FinetuneResponse, ) + async def estimate_price( + self, + *, + training_file: str, + validation_file: str | None = None, + model: str | None = None, + n_epochs: int = 1, + n_evals: int | None = 0, + training_type: str | None = "lora", + training_method: str | None = "sft", + ) -> FinetunePriceEstimationResponse: + """ + Estimate the price of a fine-tuning job + """ + + + finetune_price_estimation_request = create_finetune_price_estimation_request( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type=training_type, + training_method=training_method, + ) + parameter_payload = finetune_price_estimation_request.model_dump(exclude_none=True) + return await self._client.post( + "/fine-tunes/estimate-price", + body=parameter_payload, + cast_to=FinetunePriceEstimationResponse, + ) + async def retrieve( self, id: str, From abacabaafd315866ee1b522029fe0461e5bad1ad Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Mon, 24 Nov 2025 16:05:22 +0000 Subject: [PATCH 2/8] Update fine_tuning.py --- src/together/resources/fine_tuning.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py index eb80a891..69dcee62 100644 --- a/src/together/resources/fine_tuning.py +++ b/src/together/resources/fine_tuning.py @@ -29,7 +29,12 @@ from .._base_client import make_request_options from ..types.fine_tune import FineTune from ..lib.types.fine_tuning import FinetuneResponse, FinetuneTrainingLimits, FinetunePriceEstimationResponse -from ..lib.resources.fine_tuning import get_model_limits, async_get_model_limits, create_finetune_request, create_finetune_price_estimation_request +from ..lib.resources.fine_tuning import ( + get_model_limits, + async_get_model_limits, + create_finetune_price_estimation_request, + create_finetune_request, +) from ..types.fine_tuning_list_response import FineTuningListResponse from ..types.fine_tuning_cancel_response import FineTuningCancelResponse from ..types.fine_tuning_delete_response import FineTuningDeleteResponse From 0bb518c30d933740de508644e927b5a503b1fe95 Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Mon, 24 Nov 2025 16:15:35 +0000 Subject: [PATCH 3/8] code style --- src/together/lib/cli/api/fine_tuning.py | 13 +++++++------ src/together/resources/fine_tuning.py | 20 +++++++++----------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/together/lib/cli/api/fine_tuning.py b/src/together/lib/cli/api/fine_tuning.py index bc8cfba3..2374a489 100644 --- a/src/together/lib/cli/api/fine_tuning.py +++ b/src/together/lib/cli/api/fine_tuning.py @@ -343,12 +343,12 @@ def create( training_method=training_method, ) - proceed = \ - confirm or \ - finetune_price_estimation_result.allowed_to_proceed or \ - ( - not finetune_price_estimation_result.allowed_to_proceed and \ - click.confirm( + proceed = ( + confirm + or finetune_price_estimation_result.allowed_to_proceed + or ( + not finetune_price_estimation_result.allowed_to_proceed + and click.confirm( click.style( _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( finetune_price_estimation_result.estimated_total_price @@ -359,6 +359,7 @@ def create( show_default=True, ) ) + ) if proceed: response = client.fine_tuning.create( diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py index 69dcee62..f28e27c8 100644 --- a/src/together/resources/fine_tuning.py +++ b/src/together/resources/fine_tuning.py @@ -30,10 +30,10 @@ from ..types.fine_tune import FineTune from ..lib.types.fine_tuning import FinetuneResponse, FinetuneTrainingLimits, FinetunePriceEstimationResponse from ..lib.resources.fine_tuning import ( - get_model_limits, + get_model_limits, async_get_model_limits, - create_finetune_price_estimation_request, create_finetune_request, + create_finetune_price_estimation_request, ) from ..types.fine_tuning_list_response import FineTuningListResponse from ..types.fine_tuning_cancel_response import FineTuningCancelResponse @@ -229,7 +229,7 @@ def create( hf_api_token=hf_api_token, hf_output_repo_name=hf_output_repo_name, ) - + price_estimation_result = self.estimate_price( training_file=training_file, validation_file=validation_file, @@ -247,9 +247,9 @@ def create( ) if not price_estimation_result.allowed_to_proceed: rprint( - "[red]" + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( - price_estimation_result.estimated_total_price - ) + "[/red]", + "[red]" + + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format(price_estimation_result.estimated_total_price) + + "[/red]", ) parameter_payload = finetune_request.model_dump(exclude_none=True) @@ -274,7 +274,6 @@ def estimate_price( Estimate the price of a fine-tuning job """ - finetune_price_estimation_request = create_finetune_price_estimation_request( training_file=training_file, validation_file=validation_file, @@ -736,9 +735,9 @@ async def create( ) if not price_estimation_result.allowed_to_proceed: rprint( - "[red]" + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( - price_estimation_result.estimated_total_price - ) + "[/red]", + "[red]" + + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format(price_estimation_result.estimated_total_price) + + "[/red]", ) parameter_payload = finetune_request.model_dump(exclude_none=True) @@ -763,7 +762,6 @@ async def estimate_price( Estimate the price of a fine-tuning job """ - finetune_price_estimation_request = create_finetune_price_estimation_request( training_file=training_file, validation_file=validation_file, From 0ce35a14fb2133239941483ed2d7466118559bb4 Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Mon, 24 Nov 2025 16:18:06 +0000 Subject: [PATCH 4/8] Update fine_tuning.py --- src/together/lib/resources/fine_tuning.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/together/lib/resources/fine_tuning.py b/src/together/lib/resources/fine_tuning.py index 5fc18697..5d4682c0 100644 --- a/src/together/lib/resources/fine_tuning.py +++ b/src/together/lib/resources/fine_tuning.py @@ -250,6 +250,7 @@ def create_finetune_price_estimation_request( Create a fine-tune price estimation request """ + training_method_cls: TrainingMethodSFT | TrainingMethodDPO if training_method == "sft": training_method_cls = TrainingMethodSFT(train_on_inputs="auto") elif training_method == "dpo": From 0912438b9e24989cdf20010ad0f8909204f5ffa0 Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Mon, 24 Nov 2025 16:20:46 +0000 Subject: [PATCH 5/8] Update fine_tuning.py --- src/together/lib/resources/fine_tuning.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/together/lib/resources/fine_tuning.py b/src/together/lib/resources/fine_tuning.py index 5d4682c0..002f9bfc 100644 --- a/src/together/lib/resources/fine_tuning.py +++ b/src/together/lib/resources/fine_tuning.py @@ -264,6 +264,7 @@ def create_finetune_price_estimation_request( else: raise ValueError(f"Invalid training method: {training_method}. Must be 'sft' or 'dpo'") + training_type_cls: FullTrainingType | LoRATrainingType if training_type == "full": training_type_cls = FullTrainingType(type="Full") elif training_type == "lora": From ba0643e075be5ecb5ce4307ce044f731277e47f5 Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Wed, 26 Nov 2025 09:45:08 +0000 Subject: [PATCH 6/8] Update fine_tuning.py --- src/together/resources/fine_tuning.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py index a1b20579..a81ce263 100644 --- a/src/together/resources/fine_tuning.py +++ b/src/together/resources/fine_tuning.py @@ -27,8 +27,8 @@ async_to_custom_streamed_response_wrapper, ) from .._base_client import make_request_options -from ..types.fine_tune import FineTune -from ..lib.types.fine_tuning import FinetuneResponse, FinetuneTrainingLimits, FinetunePriceEstimationResponse +from ..lib.types.fine_tuning import FinetuneResponse as FinetuneResponseLib, FinetuneTrainingLimits, FinetunePriceEstimationResponse +from ..types.finetune_response import FinetuneResponse from ..lib.resources.fine_tuning import ( get_model_limits, async_get_model_limits, From db072edd1e5dc80a7676650ec6d855853b1a010d Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Wed, 26 Nov 2025 09:46:38 +0000 Subject: [PATCH 7/8] code style --- src/together/resources/fine_tuning.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py index a81ce263..ad7b2153 100644 --- a/src/together/resources/fine_tuning.py +++ b/src/together/resources/fine_tuning.py @@ -27,7 +27,11 @@ async_to_custom_streamed_response_wrapper, ) from .._base_client import make_request_options -from ..lib.types.fine_tuning import FinetuneResponse as FinetuneResponseLib, FinetuneTrainingLimits, FinetunePriceEstimationResponse +from ..lib.types.fine_tuning import ( + FinetuneResponse as FinetuneResponseLib, + FinetuneTrainingLimits, + FinetunePriceEstimationResponse, +) from ..types.finetune_response import FinetuneResponse from ..lib.resources.fine_tuning import ( get_model_limits, @@ -35,7 +39,6 @@ create_finetune_request, create_finetune_price_estimation_request, ) - from ..types.fine_tuning_list_response import FineTuningListResponse from ..types.fine_tuning_cancel_response import FineTuningCancelResponse from ..types.fine_tuning_delete_response import FineTuningDeleteResponse From d0243f9a55373026f7062b52bd00074cc745f09a Mon Sep 17 00:00:00 2001 From: newokaerinasai Date: Wed, 26 Nov 2025 12:27:37 +0000 Subject: [PATCH 8/8] clause revamp --- src/together/lib/cli/api/fine_tuning.py | 83 ++++++++++++------------- src/together/resources/fine_tuning.py | 6 +- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/src/together/lib/cli/api/fine_tuning.py b/src/together/lib/cli/api/fine_tuning.py index 7558ca3c..c7190d75 100644 --- a/src/together/lib/cli/api/fine_tuning.py +++ b/src/together/lib/cli/api/fine_tuning.py @@ -24,20 +24,19 @@ _CONFIRMATION_MESSAGE = ( "You are about to create a fine-tuning job. " - "The cost of your job will be determined by the model size, the number of tokens " + "The estimated price of this job is {price}. " + "The actual cost of your job will be determined by the model size, the number of tokens " "in the training file, the number of tokens in the validation file, the number of epochs, and " - "the number of evaluations. Visit https://www.together.ai/pricing to get a price estimate.\n" + "the number of evaluations. Visit https://www.together.ai/pricing to learn more about pricing.\n" + "{warning}" "You can pass `-y` or `--confirm` to your command to skip this message.\n\n" "Do you want to proceed?" ) -_CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS = ( - "The estimated price of the fine-tuning job is {} which is significantly " - "greater than your current credit limit and balance. " +_WARNING_MESSAGE_INSUFFICIENT_FUNDS = ( + "The estimated price of this job is significantly greater than your current credit limit and balance. " "It will likely fail due to insufficient funds. " "Please consider increasing your credit limit at https://api.together.xyz/settings/profile\n" - "You can pass `-y` or `--confirm` to your command to skip this message.\n\n" - "Do you want to proceed?" ) _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$" @@ -332,46 +331,42 @@ def create( elif n_evals > 0 and not validation_file: raise click.BadParameter("You have specified a number of evaluation loops but no validation file.") - if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True): - finetune_price_estimation_result = client.fine_tuning.estimate_price( - training_file=training_file, - validation_file=validation_file, - model=model, - n_epochs=n_epochs, - n_evals=n_evals, - training_type="lora" if lora else "full", - training_method=training_method, - ) + finetune_price_estimation_result = client.fine_tuning.estimate_price( + training_file=training_file, + validation_file=validation_file, + model=model, + n_epochs=n_epochs, + n_evals=n_evals, + training_type="lora" if lora else "full", + training_method=training_method, + ) - proceed = ( - confirm - or finetune_price_estimation_result.allowed_to_proceed - or ( - not finetune_price_estimation_result.allowed_to_proceed - and click.confirm( - click.style( - _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format( - finetune_price_estimation_result.estimated_total_price - ), - fg="red", - ), - default=True, - show_default=True, - ) - ) - ) + price = click.style( + f"${finetune_price_estimation_result.estimated_total_price:.2f}", + bold=True, + ) - if proceed: - response = client.fine_tuning.create( - **training_args, - verbose=True, - ) + if not finetune_price_estimation_result.allowed_to_proceed: + warning = click.style(_WARNING_MESSAGE_INSUFFICIENT_FUNDS, fg="red", bold=True) + else: + warning = "" + + confirmation_message = _CONFIRMATION_MESSAGE.format( + price=price, + warning=warning, + ) + + if confirm or click.confirm(confirmation_message, default=True, show_default=True): + response = client.fine_tuning.create( + **training_args, + verbose=True, + ) - report_string = f"Successfully submitted a fine-tuning job {response.id}" - # created_at reports UTC time, we use .astimezone() to convert to local time - formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S") - report_string += f" at {formatted_time}" - rprint(report_string) + report_string = f"Successfully submitted a fine-tuning job {response.id}" + # created_at reports UTC time, we use .astimezone() to convert to local time + formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S") + report_string += f" at {formatted_time}" + rprint(report_string) else: click.echo("No confirmation received, stopping job launch") diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py index ad7b2153..7b1df510 100644 --- a/src/together/resources/fine_tuning.py +++ b/src/together/resources/fine_tuning.py @@ -45,7 +45,7 @@ from ..types.fine_tuning_list_events_response import FineTuningListEventsResponse from ..types.fine_tuning_list_checkpoints_response import FineTuningListCheckpointsResponse -_CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS = ( +_WARNING_MESSAGE_INSUFFICIENT_FUNDS = ( "The estimated price of the fine-tuning job is {} which is significantly " "greater than your current credit limit and balance. " "It will likely fail due to insufficient funds. " @@ -252,7 +252,7 @@ def create( if not price_estimation_result.allowed_to_proceed: rprint( "[red]" - + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format(price_estimation_result.estimated_total_price) + + _WARNING_MESSAGE_INSUFFICIENT_FUNDS.format(price_estimation_result.estimated_total_price) + "[/red]", ) parameter_payload = finetune_request.model_dump(exclude_none=True) @@ -740,7 +740,7 @@ async def create( if not price_estimation_result.allowed_to_proceed: rprint( "[red]" - + _CONFIRMATION_MESSAGE_INSUFFICIENT_FUNDS.format(price_estimation_result.estimated_total_price) + + _WARNING_MESSAGE_INSUFFICIENT_FUNDS.format(price_estimation_result.estimated_total_price) + "[/red]", ) parameter_payload = finetune_request.model_dump(exclude_none=True)