From 49c47165551f699dbd0697d34f905e03fdc0a224 Mon Sep 17 00:00:00 2001 From: Kunal Aggarwal Date: Tue, 13 Jun 2023 21:33:14 +0530 Subject: [PATCH 1/2] Add compute validations --- .../finetune/question-answering/extractive-qa.ipynb | 9 +++++++-- .../system/finetune/summarization/news-summary.ipynb | 9 +++++++-- .../finetune/text-classification/emotion-detection.ipynb | 9 +++++++-- .../token-classification/token-classification.ipynb | 7 ++++++- .../system/finetune/translation/translation.ipynb | 7 ++++++- 5 files changed, 33 insertions(+), 8 deletions(-) diff --git a/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb b/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb index b3bf513ac5..58e125b61f 100644 --- a/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb +++ b/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb @@ -122,10 +122,10 @@ " gpu_count_found = True\n", "# if gpu_count_found not found, then print an error\n", "if gpu_count_found:\n", - " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", + " print(f\"Number of GPU's in compute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -135,6 +135,11 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb b/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb index 9f74abceff..2f1cb64aba 100644 --- a/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb +++ b/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb @@ -121,10 +121,10 @@ " gpu_count_found = True\n", "# if gpu_count_found not found, then print an error\n", "if gpu_count_found:\n", - " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", + " print(f\"Number of GPU's in compute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -134,6 +134,11 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb b/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb index cc54389022..1606e75cf8 100644 --- a/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb +++ b/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb @@ -122,10 +122,10 @@ " gpu_count_found = True\n", "# if gpu_count_found not found, then print an error\n", "if gpu_count_found:\n", - " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", + " print(f\"Number of GPU's in compute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -135,6 +135,11 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + "\n", "# generating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb b/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb index d7ab8f8560..2f571cfe6c 100644 --- a/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb +++ b/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb @@ -125,7 +125,7 @@ " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -135,6 +135,11 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] diff --git a/sdk/python/foundation-models/system/finetune/translation/translation.ipynb b/sdk/python/foundation-models/system/finetune/translation/translation.ipynb index 0063a645f1..736b7eaa3d 100644 --- a/sdk/python/foundation-models/system/finetune/translation/translation.ipynb +++ b/sdk/python/foundation-models/system/finetune/translation/translation.ipynb @@ -124,7 +124,7 @@ " print(f\"Number of GPU's in copute {compute.size}: {gpus_per_node}\")\n", "else:\n", " raise ValueError(\n", - " f\"Number of GPU's in copute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", + " f\"Number of GPU's in compute {compute.size} not found. Available skus are: {available_sku_sizes}.\"\n", " f\"This should not happen. Please check the selected compute cluster: {compute_cluster} and try again.\"\n", " )\n", "# CPU based finetune works only for single-node single-process\n", @@ -134,6 +134,11 @@ " )\n", " gpus_per_node = 1\n", "\n", + "# Computes with K80 GPUs are not supported\n", + "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "if compute.size.lower() in unsupported_gpu_vm_list:\n", + " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" ] From d218ee61dddf9da0dfa50f1ae28b124fa5433d90 Mon Sep 17 00:00:00 2001 From: Kunal Aggarwal Date: Wed, 14 Jun 2023 12:36:57 +0530 Subject: [PATCH 2/2] fix formatting --- .../finetune/question-answering/extractive-qa.ipynb | 11 +++++++++-- .../system/finetune/summarization/news-summary.ipynb | 11 +++++++++-- .../text-classification/emotion-detection.ipynb | 11 +++++++++-- .../token-classification/token-classification.ipynb | 11 +++++++++-- .../system/finetune/translation/translation.ipynb | 11 +++++++++-- 5 files changed, 45 insertions(+), 10 deletions(-) diff --git a/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb b/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb index 58e125b61f..2b0047aa94 100644 --- a/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb +++ b/sdk/python/foundation-models/system/finetune/question-answering/extractive-qa.ipynb @@ -136,9 +136,16 @@ " gpus_per_node = 1\n", "\n", "# Computes with K80 GPUs are not supported\n", - "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", "if compute.size.lower() in unsupported_gpu_vm_list:\n", - " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" diff --git a/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb b/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb index 2f1cb64aba..94941ce897 100644 --- a/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb +++ b/sdk/python/foundation-models/system/finetune/summarization/news-summary.ipynb @@ -135,9 +135,16 @@ " gpus_per_node = 1\n", "\n", "# Computes with K80 GPUs are not supported\n", - "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", "if compute.size.lower() in unsupported_gpu_vm_list:\n", - " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" diff --git a/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb b/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb index 1606e75cf8..d3a6a4e200 100644 --- a/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb +++ b/sdk/python/foundation-models/system/finetune/text-classification/emotion-detection.ipynb @@ -136,9 +136,16 @@ " gpus_per_node = 1\n", "\n", "# Computes with K80 GPUs are not supported\n", - "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", "if compute.size.lower() in unsupported_gpu_vm_list:\n", - " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", "\n", "# generating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" diff --git a/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb b/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb index 2f571cfe6c..3e37ae1a02 100644 --- a/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb +++ b/sdk/python/foundation-models/system/finetune/token-classification/token-classification.ipynb @@ -136,9 +136,16 @@ " gpus_per_node = 1\n", "\n", "# Computes with K80 GPUs are not supported\n", - "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", "if compute.size.lower() in unsupported_gpu_vm_list:\n", - " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))" diff --git a/sdk/python/foundation-models/system/finetune/translation/translation.ipynb b/sdk/python/foundation-models/system/finetune/translation/translation.ipynb index 736b7eaa3d..2f44f09a92 100644 --- a/sdk/python/foundation-models/system/finetune/translation/translation.ipynb +++ b/sdk/python/foundation-models/system/finetune/translation/translation.ipynb @@ -135,9 +135,16 @@ " gpus_per_node = 1\n", "\n", "# Computes with K80 GPUs are not supported\n", - "unsupported_gpu_vm_list = [\"standard_nc6\", \"standard_nc12\", \"standard_nc24\", \"standard_nc24r\"]\n", + "unsupported_gpu_vm_list = [\n", + " \"standard_nc6\",\n", + " \"standard_nc12\",\n", + " \"standard_nc24\",\n", + " \"standard_nc24r\",\n", + "]\n", "if compute.size.lower() in unsupported_gpu_vm_list:\n", - " raise ValueError(f\"VM size {compute.size} is currently not supported for finetuning\")\n", + " raise ValueError(\n", + " f\"VM size {compute.size} is currently not supported for finetuning\"\n", + " )\n", "\n", "# genrating a unique timestamp that can be used for names and versions that need to be unique\n", "timestamp = str(int(time.time()))"