From 24e8e630107615938b86244e7e1fc8cbac0751e1 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 01:09:30 -0700 Subject: [PATCH 01/16] patch with accelerate xpu --- src/transformers/__init__.py | 2 + src/transformers/testing_utils.py | 26 ++++++++++++ src/transformers/trainer_utils.py | 52 +++++++++++++++++------ src/transformers/training_args.py | 39 +++++++++++++---- src/transformers/utils/__init__.py | 1 + src/transformers/utils/import_utils.py | 23 ++++++++++ tests/trainer/test_trainer_distributed.py | 15 +++++++ 7 files changed, 138 insertions(+), 20 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 9b95aadffccc6f..87e73347c65d34 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -740,6 +740,7 @@ "is_torch_available", "is_torch_neuroncore_available", "is_torch_npu_available", + "is_torch_xpu_available", "is_torch_tpu_available", "is_torchvision_available", "is_vision_available", @@ -4764,6 +4765,7 @@ is_torch_available, is_torch_neuroncore_available, is_torch_npu_available, + is_torch_xpu_available, is_torch_tpu_available, is_torchvision_available, is_vision_available, diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index c8c66657792e4d..11ab5aee7c8203 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -97,6 +97,7 @@ is_torch_bf16_gpu_available, is_torch_neuroncore_available, is_torch_npu_available, + is_torch_xpu_available, is_torch_tensorrt_fx_available, is_torch_tf32_available, is_torch_tpu_available, @@ -624,6 +625,29 @@ def require_torch_multi_npu(test_case): return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case) +def require_torch_xpu(test_case): + """ + Decorator marking a test that requires XPU and IPEX. + + These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch + version. + """ + return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case) + + +def require_torch_multi_xpu(test_case): + """ + Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are skipped + on a machine without IPEX or multiple XPUs. + + To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu" + """ + if not is_torch_xpu_available(): + return unittest.skip("test requires IPEX and atleast one XPU device")(test_case) + + return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case) + + if is_torch_available(): # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode import torch @@ -641,6 +665,8 @@ def require_torch_multi_npu(test_case): torch_device = "cuda" elif _run_third_party_device_tests and is_torch_npu_available(): torch_device = "npu" + elif _run_third_party_device_tests and is_torch_xpu_available(): + torch_device = "xpu" else: torch_device = "cpu" diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 30571597c235d1..41831635b88c02 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -38,6 +38,7 @@ is_torch_mps_available, is_torch_npu_available, is_torch_tpu_available, + is_torch_xpu_available, requires_backends, ) @@ -97,6 +98,8 @@ def set_seed(seed: int): # ^^ safe to call this function even if cuda is not available if is_torch_npu_available(): torch.npu.manual_seed_all(seed) + if is_torch_xpu_available(): + torch.xpu.manual_seed_all(seed) if is_tf_available(): tf.random.set_seed(seed) @@ -420,6 +423,11 @@ def __init__(self, skip_memory_metrics=False): elif is_torch_mps_available(): import torch + self.torch = torch + self.gpu = {} + elif is_torch_xpu_available(): + import torch + self.torch = torch self.gpu = {} else: @@ -472,12 +480,19 @@ def start(self): gc.collect() if self.torch is not None: - self.torch.cuda.reset_peak_memory_stats() - self.torch.cuda.empty_cache() + if torch.cuda.is_available(): + self.torch.cuda.reset_peak_memory_stats() + self.torch.cuda.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.reset_peak_memory_stats() + self.torch.xpu.empty_cache() # gpu if self.torch is not None: - self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() + if torch.cuda.is_available(): + self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() # cpu self.cpu_mem_used_at_start = self.cpu_mem_used() @@ -501,7 +516,10 @@ def stop(self, stage): gc.collect() if self.torch is not None: - self.torch.cuda.empty_cache() + if torch.cuda.is_available(): + self.torch.cuda.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.empty_cache() # concepts: # - alloc_delta: the difference of allocated memory between the end and the start @@ -510,14 +528,24 @@ def stop(self, stage): # gpu if self.torch is not None: - self.gpu_mem_used_now = self.torch.cuda.memory_allocated() - self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() - self.gpu[self.cur_stage] = { - "begin": self.gpu_mem_used_at_start, - "end": self.gpu_mem_used_now, - "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), - "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), - } + if torch.cuda.is_available(): + self.gpu_mem_used_now = self.torch.cuda.memory_allocated() + self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), + } + elif is_torch_xpu_available(): + self.gpu_mem_used_now = self.torch.xpu.memory_allocated() + self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), + } # cpu self.cpu_mem_used_now = self.cpu_mem_used() diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 62e3b515bd6a42..cb803548289495 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -48,6 +48,7 @@ is_torch_bf16_gpu_available, is_torch_neuroncore_available, is_torch_npu_available, + is_torch_xpu_available, is_torch_tf32_available, is_torch_tpu_available, logging, @@ -193,9 +194,9 @@ class TrainingArguments: prediction_loss_only (`bool`, *optional*, defaults to `False`): When performing evaluation and generating predictions, only returns the loss. per_device_train_batch_size (`int`, *optional*, defaults to 8): - The batch size per GPU/TPU/MPS/NPU core/CPU for training. + The batch size per GPU/XPU)/TPU/MPS/NPU core/CPU for training. per_device_eval_batch_size (`int`, *optional*, defaults to 8): - The batch size per GPU/TPU/MPS/NPU core/CPU for evaluation. + The batch size per GPU/XPU/TPU/MPS/NPU core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. @@ -1352,11 +1353,18 @@ def __post_init__(self): if self.use_cpu and not is_torch_bf16_cpu_available() and not is_torch_tpu_available(): # cpu raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10") - elif not self.use_cpu and torch.cuda.is_available() and not is_torch_bf16_gpu_available(): - # gpu - raise ValueError( - "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" - ) + elif not self.use_cpu: + if torch.cuda.is_available() and not is_torch_bf16_gpu_available(): + # gpu + raise ValueError( + "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" + ) + elif not is_torch_xpu_available(): + #xpu + raise ValueError( + "Your setup doesn't support bf16/xpu. You need torch>=1.12, using Intel XPU/GPU with IPEX installed" + ) + if self.fp16 and self.bf16: raise ValueError("At most one of fp16 and bf16 can be True, but not both") @@ -1411,6 +1419,7 @@ def __post_init__(self): self.framework == "pt" and is_torch_available() and (self.device.type != "cuda") + and (self.device.type != "xpu") and (get_xla_device_type(self.device) != "GPU") and (get_xla_device_type(self.device) != "TPU") and (self.device.type != "cpu") @@ -1418,7 +1427,7 @@ def __post_init__(self): ): raise ValueError( "BF16 Mixed precision training with AMP (`--bf16`) and BF16 half precision evaluation" - " (`--bf16_full_eval`) can only be used on CUDA or CPU/TPU/NeuronCore devices." + " (`--bf16_full_eval`) can only be used on CUDA, XPU (with IPEX) or CPU/TPU/NeuronCore devices." ) if self.torchdynamo is not None: @@ -1784,6 +1793,10 @@ def _setup_devices(self) -> "torch.device": device = torch.device("cuda", local_rank) self._n_gpu = 1 torch.cuda.set_device(device) + elif is_torch_xpu_available() and "ACCELERATE_USE_XPU" not in os.environ: + os.environ["ACCELERATE_USE_XPU"] = "true" + device = torch.device("xpu:0") + self._n_gpu = 1 elif is_sagemaker_dp_enabled(): self.distributed_state = PartialState(_use_sagemaker_dp=True) self._n_gpu = 1 @@ -1812,6 +1825,12 @@ def _setup_devices(self) -> "torch.device": elif is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled(): # Already set _n_gpu pass + elif self.distributed_state.distributed_type == DistributedType.MULTI_XPU: + if "ACCELERATE_USE_XPU" not in os.environ: + os.environ["ACCELERATE_USE_XPU"] = "true" + self._n_gpu = torch.xpu.device_count() + device = torch.device("xpu:0") + torch.xpu.set_device(device) elif self.distributed_state.distributed_type == DistributedType.NO: if self.use_mps_device: warnings.warn( @@ -1829,6 +1848,10 @@ def _setup_devices(self) -> "torch.device": elif self.use_cpu: device = torch.device("cpu") self._n_gpu = 0 + elif is_torch_xpu_available(): + device = torch.device("xpu:0") + torch.xpu.set_device(device) + self._n_gpu =1 elif is_torch_npu_available(): device = torch.device("npu:0") torch.npu.set_device(device) diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 050ccae9c03d5f..be4a2ebb01f1fb 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -169,6 +169,7 @@ is_torch_mps_available, is_torch_neuroncore_available, is_torch_npu_available, + is_torch_xpu_available, is_torch_tensorrt_fx_available, is_torch_tf32_available, is_torch_tpu_available, diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 0045d3345b21be..80678b34dfc587 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -528,6 +528,29 @@ def get_major_and_minor_from_version(full_version): return True +@lru_cache +def is_torch_xpu_available(check_device=False): + "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" + if is_ipex_available(): + import torch + + if is_torch_version("<=", "1.12"): + return False + else: + return False + + import intel_extension_for_pytorch # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no XPU is found + _ = torch.xpu.device_count() + return torch.xpu.is_available() + except RuntimeError: + return False + return hasattr(torch, "xpu") and torch.xpu.is_available() + + def is_bitsandbytes_available(): if not is_torch_available(): return False diff --git a/tests/trainer/test_trainer_distributed.py b/tests/trainer/test_trainer_distributed.py index f8b59d967c7244..3ccff915a29fe0 100644 --- a/tests/trainer/test_trainer_distributed.py +++ b/tests/trainer/test_trainer_distributed.py @@ -24,6 +24,7 @@ get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, + require_torch_multi_xpu, require_torch_npu, ) from transformers.training_args import ParallelMode @@ -159,6 +160,20 @@ def test_trainer(self): # successful return here == success - any errors would have caused an error in the sub-call +class TestTrainerDistributed(TestCasePlus): + @require_torch_multi_xpu + def test_trainer(self): + distributed_args = f"""--nproc_per_node={torch.xpu.device_count()} + --master_port={get_torch_dist_unique_port()} + {self.test_file_dir}/test_trainer_distributed.py + """.split() + output_dir = self.get_auto_remove_tmp_dir() + args = f"--output_dir {output_dir}".split() + cmd = ["torchrun"] + distributed_args + args + execute_subprocess_async(cmd, env=self.get_env()) + # successful return here == success - any errors would have caused an error in the sub-call + + if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # From 9adc60dbc51211f6a95e4b6e52a8aa9a7d0ef910 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 01:20:30 -0700 Subject: [PATCH 02/16] patch with accelerate xpu --- src/transformers/utils/import_utils.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 80678b34dfc587..14444b15748063 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -533,9 +533,6 @@ def is_torch_xpu_available(check_device=False): "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if is_ipex_available(): import torch - - if is_torch_version("<=", "1.12"): - return False else: return False From eea523d2668992b6c78b0936d9f5a74c869d8281 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 04:16:29 -0700 Subject: [PATCH 03/16] formatting --- src/transformers/testing_utils.py | 6 +++--- src/transformers/trainer_utils.py | 2 +- src/transformers/training_args.py | 7 +++---- src/transformers/utils/import_utils.py | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 11ab5aee7c8203..a0da831588fb98 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -628,7 +628,7 @@ def require_torch_multi_npu(test_case): def require_torch_xpu(test_case): """ Decorator marking a test that requires XPU and IPEX. - + These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch version. """ @@ -637,9 +637,9 @@ def require_torch_xpu(test_case): def require_torch_multi_xpu(test_case): """ - Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are skipped + Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are skipped on a machine without IPEX or multiple XPUs. - + To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu" """ if not is_torch_xpu_available(): diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 41831635b88c02..26f55ecc0e7877 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -427,7 +427,7 @@ def __init__(self, skip_memory_metrics=False): self.gpu = {} elif is_torch_xpu_available(): import torch - + self.torch = torch self.gpu = {} else: diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index cb803548289495..51ed697c53c553 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1360,11 +1360,10 @@ def __post_init__(self): "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" ) elif not is_torch_xpu_available(): - #xpu + # xpu raise ValueError( "Your setup doesn't support bf16/xpu. You need torch>=1.12, using Intel XPU/GPU with IPEX installed" - ) - + ) if self.fp16 and self.bf16: raise ValueError("At most one of fp16 and bf16 can be True, but not both") @@ -1851,7 +1850,7 @@ def _setup_devices(self) -> "torch.device": elif is_torch_xpu_available(): device = torch.device("xpu:0") torch.xpu.set_device(device) - self._n_gpu =1 + self._n_gpu = 1 elif is_torch_npu_available(): device = torch.device("npu:0") torch.npu.set_device(device) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 14444b15748063..a5cec5a83f9e4b 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -546,7 +546,7 @@ def is_torch_xpu_available(check_device=False): except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() - + def is_bitsandbytes_available(): if not is_torch_available(): From efe5e559428e5463a2f97ab9b7d1235dcbac1e57 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 04:34:52 -0700 Subject: [PATCH 04/16] fix tests --- src/transformers/__init__.py | 2 +- src/transformers/models/idefics/modeling_idefics.py | 2 +- src/transformers/models/llama/tokenization_llama.py | 4 ++-- src/transformers/models/llama/tokenization_llama_fast.py | 4 ++-- src/transformers/testing_utils.py | 2 +- src/transformers/training_args.py | 2 +- src/transformers/utils/__init__.py | 2 +- tests/models/markuplm/test_processor_markuplm.py | 2 +- tests/trainer/test_trainer_distributed.py | 4 ++-- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 87e73347c65d34..2569208eae6dba 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -4765,8 +4765,8 @@ is_torch_available, is_torch_neuroncore_available, is_torch_npu_available, - is_torch_xpu_available, is_torch_tpu_available, + is_torch_xpu_available, is_torchvision_available, is_vision_available, logging, diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index ed9b255693b156..943276357e56f3 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -259,7 +259,7 @@ def freeze_model(model, module_exceptions=[]): } module_exceptions_mapped = [mapping[m] for m in module_exceptions] for module in model.modules(): - if module_exceptions and any([isinstance(module, t) for t in module_exceptions_mapped]): + if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped): module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes else: module.requires_grad_(False) diff --git a/src/transformers/models/llama/tokenization_llama.py b/src/transformers/models/llama/tokenization_llama.py index 808bb0ea52e7a8..0bf80a99d0d6a0 100644 --- a/src/transformers/models/llama/tokenization_llama.py +++ b/src/transformers/models/llama/tokenization_llama.py @@ -410,8 +410,8 @@ def _build_conversation_input_ids(self, conversation: "Conversation") -> List[in raise ValueError("Last message must be from user") dialogue = list(conversation.iter_texts()) - if not all([is_user for is_user, msg in dialogue[::2]]) or not all( - [not is_user for is_user, msg in dialogue[1::2]] + if not all(is_user for is_user, msg in dialogue[::2]) or not all( + not is_user for is_user, msg in dialogue[1::2] ): raise ValueError( "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" diff --git a/src/transformers/models/llama/tokenization_llama_fast.py b/src/transformers/models/llama/tokenization_llama_fast.py index 785869ea66bc29..5feba4d60058a7 100644 --- a/src/transformers/models/llama/tokenization_llama_fast.py +++ b/src/transformers/models/llama/tokenization_llama_fast.py @@ -230,8 +230,8 @@ def _build_conversation_input_ids(self, conversation: "Conversation"): raise ValueError("Last message must be from user") dialogue = list(conversation.iter_texts()) - if not all([is_user for is_user, msg in dialogue[::2]]) or not all( - [not is_user for is_user, msg in dialogue[1::2]] + if not all(is_user for is_user, msg in dialogue[::2]) or not all( + not is_user for is_user, msg in dialogue[1::2] ): raise ValueError( "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index a0da831588fb98..65337e08fd2231 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -97,10 +97,10 @@ is_torch_bf16_gpu_available, is_torch_neuroncore_available, is_torch_npu_available, - is_torch_xpu_available, is_torch_tensorrt_fx_available, is_torch_tf32_available, is_torch_tpu_available, + is_torch_xpu_available, is_torchaudio_available, is_torchdynamo_available, is_torchvision_available, diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 51ed697c53c553..8de5f5adef8aa6 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -48,9 +48,9 @@ is_torch_bf16_gpu_available, is_torch_neuroncore_available, is_torch_npu_available, - is_torch_xpu_available, is_torch_tf32_available, is_torch_tpu_available, + is_torch_xpu_available, logging, requires_backends, ) diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index be4a2ebb01f1fb..68c39c732e3c35 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -169,10 +169,10 @@ is_torch_mps_available, is_torch_neuroncore_available, is_torch_npu_available, - is_torch_xpu_available, is_torch_tensorrt_fx_available, is_torch_tf32_available, is_torch_tpu_available, + is_torch_xpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, diff --git a/tests/models/markuplm/test_processor_markuplm.py b/tests/models/markuplm/test_processor_markuplm.py index 4bafc32335dcdf..3959b231ff7472 100644 --- a/tests/models/markuplm/test_processor_markuplm.py +++ b/tests/models/markuplm/test_processor_markuplm.py @@ -340,7 +340,7 @@ def test_processor_case_3(self): # verify xpath_tags_seq # fmt: off - expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # noqa: + expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # noqa: # fmt: on self.assertSequenceEqual(inputs.xpath_tags_seq[1].tolist(), expected_xpaths_tags_seq) diff --git a/tests/trainer/test_trainer_distributed.py b/tests/trainer/test_trainer_distributed.py index 3ccff915a29fe0..3c4f31f5f5c3b4 100644 --- a/tests/trainer/test_trainer_distributed.py +++ b/tests/trainer/test_trainer_distributed.py @@ -23,8 +23,8 @@ execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, - require_torch_neuroncore, require_torch_multi_xpu, + require_torch_neuroncore, require_torch_npu, ) from transformers.training_args import ParallelMode @@ -160,7 +160,7 @@ def test_trainer(self): # successful return here == success - any errors would have caused an error in the sub-call -class TestTrainerDistributed(TestCasePlus): +class TestTrainerDistributedXPU(TestCasePlus): @require_torch_multi_xpu def test_trainer(self): distributed_args = f"""--nproc_per_node={torch.xpu.device_count()} From 8cacdd4c803c9779c3439aaf2b363a61df069800 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 05:03:55 -0700 Subject: [PATCH 05/16] revert ruff unrelated fixes --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 8de5f5adef8aa6..65f1b167ecbb8d 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -194,7 +194,7 @@ class TrainingArguments: prediction_loss_only (`bool`, *optional*, defaults to `False`): When performing evaluation and generating predictions, only returns the loss. per_device_train_batch_size (`int`, *optional*, defaults to 8): - The batch size per GPU/XPU)/TPU/MPS/NPU core/CPU for training. + The batch size per GPU/XPU/TPU/MPS/NPU core/CPU for training. per_device_eval_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/XPU/TPU/MPS/NPU core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): From 9fe01723a322259a566f30d020549ffe68767bef Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 05:08:20 -0700 Subject: [PATCH 06/16] revert ruff unrelated fixes --- src/transformers/models/idefics/modeling_idefics.py | 2 +- src/transformers/models/llama/tokenization_llama.py | 9 +++------ src/transformers/models/llama/tokenization_llama_fast.py | 4 ++-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 943276357e56f3..ed9b255693b156 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -259,7 +259,7 @@ def freeze_model(model, module_exceptions=[]): } module_exceptions_mapped = [mapping[m] for m in module_exceptions] for module in model.modules(): - if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped): + if module_exceptions and any([isinstance(module, t) for t in module_exceptions_mapped]): module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes else: module.requires_grad_(False) diff --git a/src/transformers/models/llama/tokenization_llama.py b/src/transformers/models/llama/tokenization_llama.py index e4263329e8c20b..808bb0ea52e7a8 100644 --- a/src/transformers/models/llama/tokenization_llama.py +++ b/src/transformers/models/llama/tokenization_llama.py @@ -154,10 +154,7 @@ def __init__( self.use_default_system_prompt = use_default_system_prompt self.sp_model = self.get_spm_processor() - - @property - def unk_token_length(self): - return len(self.sp_model.encode(str(self.unk_token))) + self.unk_token_length = len(self.sp_model.encode(str(self.unk_token))) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor def get_spm_processor(self): @@ -413,8 +410,8 @@ def _build_conversation_input_ids(self, conversation: "Conversation") -> List[in raise ValueError("Last message must be from user") dialogue = list(conversation.iter_texts()) - if not all(is_user for is_user, msg in dialogue[::2]) or not all( - not is_user for is_user, msg in dialogue[1::2] + if not all([is_user for is_user, msg in dialogue[::2]]) or not all( + [not is_user for is_user, msg in dialogue[1::2]] ): raise ValueError( "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" diff --git a/src/transformers/models/llama/tokenization_llama_fast.py b/src/transformers/models/llama/tokenization_llama_fast.py index 5feba4d60058a7..785869ea66bc29 100644 --- a/src/transformers/models/llama/tokenization_llama_fast.py +++ b/src/transformers/models/llama/tokenization_llama_fast.py @@ -230,8 +230,8 @@ def _build_conversation_input_ids(self, conversation: "Conversation"): raise ValueError("Last message must be from user") dialogue = list(conversation.iter_texts()) - if not all(is_user for is_user, msg in dialogue[::2]) or not all( - not is_user for is_user, msg in dialogue[1::2] + if not all([is_user for is_user, msg in dialogue[::2]]) or not all( + [not is_user for is_user, msg in dialogue[1::2]] ): raise ValueError( "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" From 8f71fde996e3a269b75fa11ad69f720868ca2894 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 24 Aug 2023 05:12:08 -0700 Subject: [PATCH 07/16] revert ruff unrelated fixes --- src/transformers/models/llama/tokenization_llama.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/llama/tokenization_llama.py b/src/transformers/models/llama/tokenization_llama.py index 808bb0ea52e7a8..7dfe092e407017 100644 --- a/src/transformers/models/llama/tokenization_llama.py +++ b/src/transformers/models/llama/tokenization_llama.py @@ -154,7 +154,10 @@ def __init__( self.use_default_system_prompt = use_default_system_prompt self.sp_model = self.get_spm_processor() - self.unk_token_length = len(self.sp_model.encode(str(self.unk_token))) + + @property + def unk_token_length(self): + return len(self.sp_model.encode(str(self.unk_token))) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor def get_spm_processor(self): From 604e11c22ad995daeaebfbcfc0d92321568f4a7e Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Tue, 29 Aug 2023 09:42:14 -0700 Subject: [PATCH 08/16] fix test --- tests/trainer/test_trainer_distributed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/test_trainer_distributed.py b/tests/trainer/test_trainer_distributed.py index 3c4f31f5f5c3b4..4749edb69b64b1 100644 --- a/tests/trainer/test_trainer_distributed.py +++ b/tests/trainer/test_trainer_distributed.py @@ -160,8 +160,8 @@ def test_trainer(self): # successful return here == success - any errors would have caused an error in the sub-call +@require_torch_multi_xpu class TestTrainerDistributedXPU(TestCasePlus): - @require_torch_multi_xpu def test_trainer(self): distributed_args = f"""--nproc_per_node={torch.xpu.device_count()} --master_port={get_torch_dist_unique_port()} From 9cf738658438b92d46067a9bebf8f2669bc2fade Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Tue, 29 Aug 2023 20:40:59 -0700 Subject: [PATCH 09/16] review fixes --- src/transformers/trainer_utils.py | 21 +++++++++------------ src/transformers/utils/import_utils.py | 4 +--- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 26f55ecc0e7877..5524ac68b36590 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -531,21 +531,18 @@ def stop(self, stage): if torch.cuda.is_available(): self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() - self.gpu[self.cur_stage] = { - "begin": self.gpu_mem_used_at_start, - "end": self.gpu_mem_used_now, - "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), - "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), - } elif is_torch_xpu_available(): self.gpu_mem_used_now = self.torch.xpu.memory_allocated() self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() - self.gpu[self.cur_stage] = { - "begin": self.gpu_mem_used_at_start, - "end": self.gpu_mem_used_now, - "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), - "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), - } + else: + raise ValueError("No available GPU device found!") + + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), + } # cpu self.cpu_mem_used_now = self.cpu_mem_used() diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index a5cec5a83f9e4b..5d5401cc166977 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -531,9 +531,7 @@ def get_major_and_minor_from_version(full_version): @lru_cache def is_torch_xpu_available(check_device=False): "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" - if is_ipex_available(): - import torch - else: + if not is_ipex_available(): return False import intel_extension_for_pytorch # noqa: F401 From d15249408a598dfcad839db946ac1ac10c0da42b Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Tue, 29 Aug 2023 20:56:31 -0700 Subject: [PATCH 10/16] review fixes --- src/transformers/trainer_utils.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 5524ac68b36590..16843c0a5f65cf 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -536,14 +536,14 @@ def stop(self, stage): self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() else: raise ValueError("No available GPU device found!") - - self.gpu[self.cur_stage] = { - "begin": self.gpu_mem_used_at_start, - "end": self.gpu_mem_used_now, - "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), - "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), - } - + + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), + } + # cpu self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = { From 9072924e47918081889ce63bff39de7bbd6dbc1c Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Wed, 30 Aug 2023 09:09:29 -0700 Subject: [PATCH 11/16] black fixed --- src/transformers/testing_utils.py | 4 ++-- src/transformers/trainer_utils.py | 4 ++-- src/transformers/utils/import_utils.py | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 1e796dd69abe0c..a7e36322a6b099 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -637,8 +637,8 @@ def require_torch_xpu(test_case): def require_torch_multi_xpu(test_case): """ - Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are skipped - on a machine without IPEX or multiple XPUs. + Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are + skipped on a machine without IPEX or multiple XPUs. To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu" """ diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 16843c0a5f65cf..931d0067e99d23 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -536,14 +536,14 @@ def stop(self, stage): self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() else: raise ValueError("No available GPU device found!") - + self.gpu[self.cur_stage] = { "begin": self.gpu_mem_used_at_start, "end": self.gpu_mem_used_now, "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), } - + # cpu self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = { diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 5d5401cc166977..e30eeba8730206 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -29,6 +29,7 @@ from types import ModuleType from typing import Any, Tuple, Union +import torch from packaging import version from . import logging From b6056c3a76e86023b5c3009b5f896c8ec7f889a9 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 31 Aug 2023 22:45:56 -0700 Subject: [PATCH 12/16] review commits --- src/transformers/training_args.py | 10 +++++++--- src/transformers/utils/import_utils.py | 4 ++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 20c019a9e5b0c5..d703a6d67b3582 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1365,9 +1365,13 @@ def __post_init__(self): ) elif not is_torch_xpu_available(): # xpu - raise ValueError( - "Your setup doesn't support bf16/xpu. You need torch>=1.12, using Intel XPU/GPU with IPEX installed" - ) + import torch + parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) + is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") + if not is_torch_greater_or_equal_than_1_12: + raise ValueError( + "Your setup doesn't support bf16/xpu. You need torch>=1.12, using Intel XPU/GPU with IPEX installed" + ) if self.fp16 and self.bf16: raise ValueError("At most one of fp16 and bf16 can be True, but not both") diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index e30eeba8730206..96d66663828a7d 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -29,7 +29,6 @@ from types import ModuleType from typing import Any, Tuple, Union -import torch from packaging import version from . import logging @@ -534,7 +533,8 @@ def is_torch_xpu_available(check_device=False): "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if not is_ipex_available(): return False - + + import torch import intel_extension_for_pytorch # noqa: F401 if check_device: From 94adf0a3ad387546e97724961222f620d7b253c1 Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Thu, 31 Aug 2023 23:07:23 -0700 Subject: [PATCH 13/16] review commits --- src/transformers/training_args.py | 1 + src/transformers/utils/import_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index d703a6d67b3582..1492ed3f928aaa 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1366,6 +1366,7 @@ def __post_init__(self): elif not is_torch_xpu_available(): # xpu import torch + parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") if not is_torch_greater_or_equal_than_1_12: diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 96d66663828a7d..ae76a78ce21707 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -533,9 +533,9 @@ def is_torch_xpu_available(check_device=False): "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if not is_ipex_available(): return False - - import torch + import intel_extension_for_pytorch # noqa: F401 + import torch if check_device: try: From c372fd0d9e0d933802ea06865721537234a22d1d Mon Sep 17 00:00:00 2001 From: abhilash1910 Date: Fri, 1 Sep 2023 01:01:47 -0700 Subject: [PATCH 14/16] style fix --- src/transformers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 3ebf5017d7809f..0c1bb2fad65f80 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -741,9 +741,9 @@ "is_torch_available", "is_torch_neuroncore_available", "is_torch_npu_available", - "is_torch_xpu_available", "is_torch_tpu_available", "is_torchvision_available", + "is_torch_xpu_available", "is_vision_available", "logging", ], From eb054cee3af366aa6623ef86d6521e08adfe7451 Mon Sep 17 00:00:00 2001 From: Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com> Date: Tue, 5 Sep 2023 16:45:18 +0530 Subject: [PATCH 15/16] use pytorch_utils --- src/transformers/training_args.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 3223c0da1ad87f..a17966928a4fa8 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1366,10 +1366,8 @@ def __post_init__(self): ) elif not is_torch_xpu_available(): # xpu - import torch + from .pytorch_utils import is_torch_greater_or_equal_than_1_12 - parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) - is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") if not is_torch_greater_or_equal_than_1_12: raise ValueError( "Your setup doesn't support bf16/xpu. You need torch>=1.12, using Intel XPU/GPU with IPEX installed" From a3899994b1d44e455afc9e7383dfc5a1e752cb99 Mon Sep 17 00:00:00 2001 From: Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com> Date: Tue, 5 Sep 2023 16:47:05 +0530 Subject: [PATCH 16/16] revert markuplm test --- tests/models/markuplm/test_processor_markuplm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/markuplm/test_processor_markuplm.py b/tests/models/markuplm/test_processor_markuplm.py index 3959b231ff7472..4bafc32335dcdf 100644 --- a/tests/models/markuplm/test_processor_markuplm.py +++ b/tests/models/markuplm/test_processor_markuplm.py @@ -340,7 +340,7 @@ def test_processor_case_3(self): # verify xpath_tags_seq # fmt: off - expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # noqa: + expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # noqa: # fmt: on self.assertSequenceEqual(inputs.xpath_tags_seq[1].tolist(), expected_xpaths_tags_seq)