Error loading model for koboldai4bit version"Exception in thread Thread-29:"

#1
by Eilian - opened

Hi, it shows the following.
"Exception in thread Thread-29:
Traceback (most recent call last):
File "B:\python\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "B:\python\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "B:\python\lib\site-packages\socketio\server.py", line 731, in _handle_event_internal
r = server._trigger_event(data[0], namespace, sid, *data[1:])
File "B:\python\lib\site-packages\socketio\server.py", line 756, in trigger_event
return self.handlers[namespace]event
File "B:\python\lib\site-packages\flask_socketio_init
.py", line 282, in _handler
return self.handle_event(handler, message, namespace, sid,
File "B:\python\lib\site-packages\flask_socketio_init
.py", line 828, in _handle_event
ret = handler(*args)
File "aiserver.py", line 615, in g
return f(*a, **k)
File "aiserver.py", line 3191, in get_message
load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model'])
File "aiserver.py", line 1980, in load_model
model.load(
File "G:\KoboldAI\modeling\inference_model.py", line 177, in load
self._load(save_model=save_model, initial_load=initial_load)
File "G:\KoboldAI\modeling\inference_models\hf_torch_4bit.py", line 199, in _load
self.tokenizer = self._get_tokenizer(self.get_local_model_path())
File "G:\KoboldAI\modeling\inference_models\hf_torch_4bit.py", line 391, in _get_tokenizer
tokenizer = LlamaTokenizer.from_pretrained(utils.koboldai_vars.custmodpth)
File "aiserver.py", line 112, in new_pretrainedtokenizerbase_from_pretrained
tokenizer = old_pretrainedtokenizerbase_from_pretrained(cls, *args, **kwargs)
File "B:\python\lib\site-packages\transformers\tokenization_utils_base.py", line 1811, in from_pretrained
return cls.from_pretrained(
File "B:\python\lib\site-packages\transformers\tokenization_utils_base.py", line 1965, in from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "B:\python\lib\site-packages\transformers\models\llama\tokenization_llama.py", line 96, in init
self.sp_model.Load(vocab_file)
File "B:\python\lib\site-packages\sentencepiece_init
.py", line 905, in Load
return self.LoadFromFile(model_file)
File "B:\python\lib\site-packages\sentencepiece_init
.py", line 310, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
TypeError: not a string
Connection Attempt: 127.0.0.1"

Sign up or log in to comment