You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
these are my files in (rzr_llmshearing) root@dsw70428-7485c78d87-pp4rr:/mnt/workspace/workgroup/qianqin.rzr/LLM-Shearing# ls ../model/llama-2-7b-hf/
config.json LICENSE.txt model-00002-of-00002.safetensors pytorch_model-00001-of-00002.bin pytorch_model.bin.index.json Responsible-Use-Guide.pdf tokenizer_config.json tokenizer.model
generation_config.json model-00001-of-00002.safetensors model.safetensors.index.json pytorch_model-00002-of-00002.bin README.md special_tokens_map.json tokenizer.json USE_POLICY.md
Please what can I do to fix this error?
The text was updated successfully, but these errors were encountered:
(rzr_llmshearing) root@dsw70428-7485c78d87-pp4rr:/mnt/workspace/workgroup/qianqin.rzr/LLM-Shearing# sh modelprepare.sh
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /mnt/workspace/workgroup/qianli.myf/anaconda3/envs/rzr_llmshearing/lib/python3.9/runpy.py:197 in │
│ _run_module_as_main │
│ │
│ 194 │ main_globals = sys.modules["main"].dict │
│ 195 │ if alter_argv: │
│ 196 │ │ sys.argv[0] = mod_spec.origin │
│ ❱ 197 │ return _run_code(code, main_globals, None, │
│ 198 │ │ │ │ │ "main", mod_spec) │
│ 199 │
│ 200 def run_module(mod_name, init_globals=None, │
│ │
│ /mnt/workspace/workgroup/qianli.myf/anaconda3/envs/rzr_llmshearing/lib/python3.9/runpy.py:87 in │
│ _run_code │
│ │
│ 84 │ │ │ │ │ loader = loader, │
│ 85 │ │ │ │ │ package = pkg_name, │
│ 86 │ │ │ │ │ spec = mod_spec) │
│ ❱ 87 │ exec(code, run_globals) │
│ 88 │ return run_globals │
│ 89 │
│ 90 def _run_module_code(code, init_globals=None, │
│ │
│ /mnt/workspace/workgroup/qianqin.rzr/LLM-Shearing/llmshearing/utils/composer_to_hf.py:107 in │
│ │
│ │
│ 104 if name == "main": │
│ 105 │ composer_model_path, output_path, other_args = sys.argv[1], sys.argv[2], sys.argv[3: │
│ 106 │ cli_cfg = om.from_cli(other_args) │
│ ❱ 107 │ save_composer_to_hf(composer_model_path, output_path, cli_cfg) │
│ 108 │
│ │
│ /mnt/workspace/workgroup/qianqin.rzr/LLM-Shearing/llmshearing/utils/composer_to_hf.py:89 in │
│ save_composer_to_hf │
│ │
│ 86 def save_composer_to_hf(composer_model_path, output_path=None, model_config:om = None): │
│ 87 │ """ convert composer ckpt's weights to huggingface """ │
│ 88 │ │
│ ❱ 89 │ weights = torch.load(composer_model_path)["state"]["model"] │
│ 90 │ num_layers = get_layer_num_from_weights(weights) │
│ 91 │ keymap = get_key_map_from_composer_to_hf(num_layers) │
│ 92 │ hf_weights = {keymap[key]: weights[key] for key in weights if "rotary" not in key} │
│ │
│ /mnt/workspace/workgroup/qianli.myf/anaconda3/envs/rzr_llmshearing/lib/python3.9/site-packages/t │
│ orch/serialization.py:791 in load │
│ │
│ 788 │ if 'encoding' not in pickle_load_args.keys(): │
│ 789 │ │ pickle_load_args['encoding'] = 'utf-8' │
│ 790 │ │
│ ❱ 791 │ with _open_file_like(f, 'rb') as opened_file: │
│ 792 │ │ if _is_zipfile(opened_file): │
│ 793 │ │ │ # The zipfile reader is going to advance the current file position. │
│ 794 │ │ │ # If we want to actually tail call to torch.jit.load, we need to │
│ │
│ /mnt/workspace/workgroup/qianli.myf/anaconda3/envs/rzr_llmshearing/lib/python3.9/site-packages/t │
│ orch/serialization.py:271 in _open_file_like │
│ │
│ 268 │
│ 269 def _open_file_like(name_or_buffer, mode): │
│ 270 │ if _is_path(name_or_buffer): │
│ ❱ 271 │ │ return _open_file(name_or_buffer, mode) │
│ 272 │ else: │
│ 273 │ │ if 'w' in mode: │
│ 274 │ │ │ return _open_buffer_writer(name_or_buffer) │
│ │
│ /mnt/workspace/workgroup/qianli.myf/anaconda3/envs/rzr_llmshearing/lib/python3.9/site-packages/t │
│ orch/serialization.py:252 in init │
│ │
│ 249 │
│ 250 class _open_file(_opener): │
│ 251 │ def init(self, name, mode): │
│ ❱ 252 │ │ super().init(open(name, mode)) │
│ 253 │ │
│ 254 │ def exit(self, *args): │
│ 255 │ │ self.file_like.close() │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
IsADirectoryError: [Errno 21] Is a directory: '/mnt/workspace/workgroup/qianqin.rzr/model/llama-2-7b-hf'
these are my files in (rzr_llmshearing) root@dsw70428-7485c78d87-pp4rr:/mnt/workspace/workgroup/qianqin.rzr/LLM-Shearing# ls ../model/llama-2-7b-hf/
config.json LICENSE.txt model-00002-of-00002.safetensors pytorch_model-00001-of-00002.bin pytorch_model.bin.index.json Responsible-Use-Guide.pdf tokenizer_config.json tokenizer.model
generation_config.json model-00001-of-00002.safetensors model.safetensors.index.json pytorch_model-00002-of-00002.bin README.md special_tokens_map.json tokenizer.json USE_POLICY.md
Please what can I do to fix this error?
The text was updated successfully, but these errors were encountered: