From d93f9cb5f308f230a6a70eebdd111ee3a5881ef5 Mon Sep 17 00:00:00 2001 From: aggarwal-k <117632441+aggarwal-k@users.noreply.github.com> Date: Wed, 14 Jun 2023 13:00:24 +0530 Subject: [PATCH] Add compute validations (#2371) * Add compute validations * fix formatting --- .../question-answering/extractive-qa.ipynb | 16 ++++++++++++++-- .../finetune/summarization/news-summary.ipynb | 16 ++++++++++++++-- .../text-classification/emotion-detection.ipynb | 16 ++++++++++++++-- .../token-classification.ipynb | 14 +++++++++++++- .../finetune/translation/translation.ipynb | 14 +++++++++++++- 5 files changed, 68 insertions(+), 8 deletions(-) diff --git a/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb b/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb index b3bf513ac5..2b0047aa94 100644 --- a/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb +++ b/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb @@ -122,10 +122,10 @@ " gpu_count_found = True\n", "# if gpu_count_found not found, then print an error\n", "if gpu_count_found:\n", - " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", + " print(f\"Number of GPU's in compute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -135,6 +135,18 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb b/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb index 9f74abceff..94941ce897 100644 --- a/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb +++ b/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb @@ -121,10 +121,10 @@ " gpu_count_found = True\n", "# if gpu_count_found not found, then print an error\n", "if gpu_count_found:\n", - " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", + " print(f\"Number of GPU's in compute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -134,6 +134,18 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb b/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb index cc54389022..d3a6a4e200 100644 --- a/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb +++ b/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb @@ -122,10 +122,10 @@ " gpu_count_found = True\n", "# if gpu_count_found not found, then print an error\n", "if gpu_count_found:\n", - " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", + " print(f\"Number of GPU's in compute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -135,6 +135,18 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", + "\n", "# generating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb b/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb index d7ab8f8560..3e37ae1a02 100644 --- a/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb +++ b/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb @@ -125,7 +125,7 @@ " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -135,6 +135,18 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/translation/translation.ipynb b/sdk/python/foundation-models/system/finetune/translation/translation.ipynb index 0063a645f1..2f44f09a92 100644 --- a/sdk/python/foundation-models/system/finetune/translation/translation.ipynb +++ b/sdk/python/foundation-models/system/finetune/translation/translation.ipynb @@ -124,7 +124,7 @@ " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -134,6 +134,18 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ]