RecursionError: maximum recursion depth exceeded
logged as "PMC_LLaMA_13B" model & while loading the registered model got into RecursionError: maximum recursion depth exceeded
transformers==4.30.2
1 # loaded_model = mlflow.pyfunc.load_model(f"models:/{registered_name}/2",dst_path='/local_disk0')
----> 3 loaded_model = mlflow.pyfunc.load_model(f"models:/")
File /databricks/python/lib/python3.10/site-packages/mlflow/pyfunc/init.py:597, in load_model(model_uri, suppress_warnings, dst_path)
595 _add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
596 data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
--> 597 model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
598 predict_fn = conf.get("predict_fn", "predict")
599 return PyFuncModel(model_meta=model_meta, model_impl=model_impl, predict_fn=predict_fn)
File /databricks/python/lib/python3.10/site-packages/mlflow/pyfunc/model.py:302, in _load_pyfunc(model_path)
297 artifacts[saved_artifact_name] = os.path.join(
298 model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]
299 )
301 context = PythonModelContext(artifacts=artifacts)
--> 302 python_model.load_context(context=context)
303 signature = mlflow.models.Model.load(model_path).signature
304 return _PythonModelPyfuncWrapper(
305 python_model=python_model, context=context, signature=signature
306 )
File ~/.ipykernel/3009/command-3731584185085058-309509596:58, in load_context(self, context)
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py:727, in AutoTokenizer.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
723 if tokenizer_class is None:
724 raise ValueError(
725 f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
726 )
--> 727 return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
729 # Otherwise we have to be creative.
730 # if model is an encoder decoder, the encoder tokenizer class is used by default
731 if isinstance(config, EncoderDecoderConfig):
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1854, in PreTrainedTokenizerBase.from_pretrained(cls, pretrained_model_name_or_path, cache_dir, force_download, local_files_only, token, revision, *init_inputs, **kwargs)
1851 else:
1852 logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
-> 1854 return cls._from_pretrained(
1855 resolved_vocab_files,
1856 pretrained_model_name_or_path,
1857 init_configuration,
1858 *init_inputs,
1859 token=token,
1860 cache_dir=cache_dir,
1861 local_files_only=local_files_only,
1862 _commit_hash=commit_hash,
1863 _is_local=is_local,
1864 **kwargs,
1865 )
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:2017, in PreTrainedTokenizerBase._from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, token, cache_dir, local_files_only, _commit_hash, _is_local, *init_inputs, **kwargs)
2015 # Instantiate tokenizer.
2016 try:
-> 2017 tokenizer = cls(*init_inputs, **init_kwargs)
2018 except OSError:
2019 raise OSError(
2020 "Unable to load vocabulary from file. "
2021 "Please check that the provided vocabulary is accessible and not corrupted."
2022 )
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py:128, in LlamaTokenizerFast.init(self, vocab_file, tokenizer_file, clean_up_tokenization_spaces, unk_token, bos_token, eos_token, add_bos_token, add_eos_token, use_default_system_prompt, **kwargs)
126 self._add_bos_token = add_bos_token
127 self._add_eos_token = add_eos_token
--> 128 self.update_post_processor()
129 self.use_default_system_prompt = use_default_system_prompt
130 self.vocab_file = vocab_file
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py:138, in LlamaTokenizerFast.update_post_processor(self)
134 """
135 Updates the underlying post processor with the current bos_token
and eos_token
.
136 """
137 bos = self.bos_token
--> 138 bos_token_id = self.bos_token_id
140 eos = self.eos_token
141 eos_token_id = self.eos_token_id
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1141, in SpecialTokensMixin.bos_token_id(self)
1139 if self._bos_token is None:
1140 return None
-> 1141 return self.convert_tokens_to_ids(self.bos_token)
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:270, in PreTrainedTokenizerFast.convert_tokens_to_ids(self, tokens)
267 return None
269 if isinstance(tokens, str):
--> 270 return self._convert_token_to_id_with_added_voc(tokens)
272 return [self._convert_token_to_id_with_added_voc(token) for token in tokens]
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:277, in PreTrainedTokenizerFast._convert_token_to_id_with_added_voc(self, token)
275 index = self._tokenizer.token_to_id(token)
276 if index is None:
--> 277 return self.unk_token_id
278 return index
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1160, in SpecialTokensMixin.unk_token_id(self)
1158 if self._unk_token is None:
1159 return None
-> 1160 return self.convert_tokens_to_ids(self.unk_token)
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:270, in PreTrainedTokenizerFast.convert_tokens_to_ids(self, tokens)
267 return None
269 if isinstance(tokens, str):
--> 270 return self._convert_token_to_id_with_added_voc(tokens)
272 return [self._convert_token_to_id_with_added_voc(token) for token in tokens]
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:277, in PreTrainedTokenizerFast._convert_token_to_id_with_added_voc(self, token)
275 index = self._tokenizer.token_to_id(token)
276 if index is None:
--> 277 return self.unk_token_id
278 return index
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1160, in SpecialTokensMixin.unk_token_id(self)
1158 if self._unk_token is None:
1159 return None
-> 1160 return self.convert_tokens_to_ids(self.unk_token)
[... skipping similar frames: PreTrainedTokenizerFast._convert_token_to_id_with_added_voc at line 277 (986 times), PreTrainedTokenizerFast.convert_tokens_to_ids at line 270 (986 times), SpecialTokensMixin.unk_token_id at line 1160 (986 times)]
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:270, in PreTrainedTokenizerFast.convert_tokens_to_ids(self, tokens)
267 return None
269 if isinstance(tokens, str):
--> 270 return self._convert_token_to_id_with_added_voc(tokens)
272 return [self._convert_token_to_id_with_added_voc(token) for token in tokens]
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:277, in PreTrainedTokenizerFast._convert_token_to_id_with_added_voc(self, token)
275 index = self._tokenizer.token_to_id(token)
276 if index is None:
--> 277 return self.unk_token_id
278 return index
File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1160, in SpecialTokensMixin.unk_token_id(self)
1158 if self._unk_token is None:
1159 return None
-> 1160 return self.convert_tokens_to_ids(self.unk_token)
RecursionError: maximum recursion depth exceeded