Spaces:
Runtime error
Runtime error
latest checkpoint push
Browse files- __pycache__/model.cpython-311.pyc +0 -0
- __pycache__/trainer.cpython-311.pyc +0 -0
- app.py +1 -1
- checkpoints/pretrained_checkpoint.ckpt +1 -1
- trainer.py +2 -2
__pycache__/model.cpython-311.pyc
ADDED
Binary file (2.41 kB). View file
|
|
__pycache__/trainer.cpython-311.pyc
ADDED
Binary file (5.28 kB). View file
|
|
app.py
CHANGED
@@ -34,7 +34,7 @@ def plot_mel_spectrogram(mel_spec):
|
|
34 |
def get_or_load_model():
|
35 |
if 'model' not in st.session_state or 'tokenizer' not in st.session_state or 'processor' not in st.session_state:
|
36 |
ckpt_path = "checkpoints/pretrained_checkpoint.ckpt"
|
37 |
-
model = SpeechLLMLightning.load_from_checkpoint(ckpt_path, quantize=
|
38 |
tokenizer = model.llm_tokenizer
|
39 |
model.eval()
|
40 |
model.freeze()
|
|
|
34 |
def get_or_load_model():
|
35 |
if 'model' not in st.session_state or 'tokenizer' not in st.session_state or 'processor' not in st.session_state:
|
36 |
ckpt_path = "checkpoints/pretrained_checkpoint.ckpt"
|
37 |
+
model = SpeechLLMLightning.load_from_checkpoint(ckpt_path, quantize=False)
|
38 |
tokenizer = model.llm_tokenizer
|
39 |
model.eval()
|
40 |
model.freeze()
|
checkpoints/pretrained_checkpoint.ckpt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 8644540057
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaaec0efc7f2531bc81a4e11f875d270c000ed0bd4ee1b94e7c70ee887429f4b
|
3 |
size 8644540057
|
trainer.py
CHANGED
@@ -18,7 +18,7 @@ else:
|
|
18 |
device = "cpu"
|
19 |
|
20 |
class SpeechLLMLightning(pl.LightningModule):
|
21 |
-
def __init__(self, audio_enc_dim=512, llm_dim=2048, llm_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0", quantize=
|
22 |
super().__init__()
|
23 |
self.save_hyperparameters()
|
24 |
|
@@ -52,7 +52,7 @@ class SpeechLLMLightning(pl.LightningModule):
|
|
52 |
self.llm_model.eval()
|
53 |
|
54 |
if quantize:
|
55 |
-
self.llm_model = jit.script(self.llm_model)
|
56 |
self.llm_model = quantize_dynamic(
|
57 |
self.llm_model, {nn.Linear}, dtype=torch.qint8
|
58 |
)
|
|
|
18 |
device = "cpu"
|
19 |
|
20 |
class SpeechLLMLightning(pl.LightningModule):
|
21 |
+
def __init__(self, audio_enc_dim=512, llm_dim=2048, llm_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0", quantize=False):
|
22 |
super().__init__()
|
23 |
self.save_hyperparameters()
|
24 |
|
|
|
52 |
self.llm_model.eval()
|
53 |
|
54 |
if quantize:
|
55 |
+
# self.llm_model = jit.script(self.llm_model)
|
56 |
self.llm_model = quantize_dynamic(
|
57 |
self.llm_model, {nn.Linear}, dtype=torch.qint8
|
58 |
)
|