Commit
•
da863dd
1
Parent(s):
f221b0a
Fix Loading Model with Modified Config File (#52)
Browse files- Fix Loading Model with Modified Config File (0f046a80a6135dd47a4663c94b290a8e2ffa2af6)
Co-authored-by: Samuele Marino <SamMaggioli@users.noreply.huggingface.co>
- modeling_lora.py +11 -1
modeling_lora.py
CHANGED
@@ -337,7 +337,17 @@ class XLMRobertaLoRA(XLMRobertaPreTrainedModel):
|
|
337 |
):
|
338 |
if config.load_trained_adapters: # checkpoint already contains LoRA adapters
|
339 |
return super().from_pretrained(
|
340 |
-
pretrained_model_name_or_path,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
)
|
342 |
else: # initializing new adapters
|
343 |
roberta = XLMRobertaModel.from_pretrained(
|
|
|
337 |
):
|
338 |
if config.load_trained_adapters: # checkpoint already contains LoRA adapters
|
339 |
return super().from_pretrained(
|
340 |
+
pretrained_model_name_or_path,
|
341 |
+
*model_args,
|
342 |
+
config=config,
|
343 |
+
cache_dir=cache_dir,
|
344 |
+
ignore_mismatched_sizes=ignore_mismatched_sizes,
|
345 |
+
force_download=force_download,
|
346 |
+
local_files_only=local_files_only,
|
347 |
+
token=token,
|
348 |
+
revision=revision,
|
349 |
+
use_safetensors=use_safetensors,
|
350 |
+
**kwargs
|
351 |
)
|
352 |
else: # initializing new adapters
|
353 |
roberta = XLMRobertaModel.from_pretrained(
|