shangeth commited on
Commit
3594d09
1 Parent(s): 0ffa9cf

Upload model

Browse files
config.json CHANGED
@@ -1,13 +1,18 @@
1
  {
 
 
 
2
  "audio_enc_dim": 1280,
3
  "audio_encoder_name": "facebook/hubert-xlarge-ll60k",
4
  "audio_processor_name": "facebook/hubert-large-ls960-ft",
5
  "auto_map": {
6
- "AutoConfig": "config.SpeechLLMModelConfig"
 
7
  },
8
  "llm_dim": 2048,
9
  "llm_model_checkpoint": "hf_repo/llm_model_checkpoint",
10
  "llm_model_name": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
  "model_type": "custom_model",
 
12
  "transformers_version": "4.38.2"
13
  }
 
1
  {
2
+ "architectures": [
3
+ "SpeechLLMModel"
4
+ ],
5
  "audio_enc_dim": 1280,
6
  "audio_encoder_name": "facebook/hubert-xlarge-ll60k",
7
  "audio_processor_name": "facebook/hubert-large-ls960-ft",
8
  "auto_map": {
9
+ "AutoConfig": "config.SpeechLLMModelConfig",
10
+ "AutoModel": "model.SpeechLLMModel"
11
  },
12
  "llm_dim": 2048,
13
  "llm_model_checkpoint": "hf_repo/llm_model_checkpoint",
14
  "llm_model_name": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
15
  "model_type": "custom_model",
16
+ "torch_dtype": "float32",
17
  "transformers_version": "4.38.2"
18
  }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:778179f27c443457a4ac527afe4c58d25902f410bdc492e7f4e09ffd23dfc6c7
3
+ size 4975727392
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ad7d5e98460221a543bba83cb187111a948f22a34be26ab16fda691f2d83bc2
3
+ size 3405770712
model.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torchaudio
4
+ from transformers import PreTrainedModel, AutoModelForCausalLM, AutoTokenizer, HubertModel, AutoProcessor, AutoConfig, AutoModel
5
+ from .config import SpeechLLMModelConfig
6
+ from peft import LoraConfig, get_peft_model
7
+
8
+ class HubertXCNNEnoder(nn.Module):
9
+ def __init__(self, audio_enc_dim, llm_dim, encoder_name):
10
+ super().__init__()
11
+ config = AutoConfig.from_pretrained(encoder_name)
12
+ self.encoder = AutoModel.from_config(config)
13
+
14
+ self.cnn = nn.Sequential(
15
+ nn.ReLU(),
16
+ nn.Conv1d(audio_enc_dim, llm_dim // 2, kernel_size=5, stride=1, padding=0),
17
+ nn.ReLU(),
18
+ nn.Conv1d(llm_dim // 2, llm_dim, kernel_size=5, stride=2, padding=0),
19
+ nn.ReLU(),
20
+ nn.Conv1d(llm_dim, llm_dim, kernel_size=3, stride=1, padding=0),
21
+ )
22
+
23
+ def forward(self, x):
24
+ x = self.encoder(x).last_hidden_state
25
+ x = self.cnn(x.transpose(1, 2)).transpose(1, 2)
26
+ return x
27
+
28
+ def return_device(self):
29
+ return next(self.parameters()).device
30
+
31
+ class SpeechLLMModel(PreTrainedModel):
32
+ config_class = SpeechLLMModelConfig
33
+
34
+ def __init__(self, config):
35
+ super().__init__(config)
36
+ self.audio_processor = AutoProcessor.from_pretrained(config.audio_processor_name)
37
+ self.audio_encoder = HubertXCNNEnoder(config.audio_enc_dim, config.llm_dim, config.audio_encoder_name)
38
+
39
+ llm_config = AutoConfig.from_pretrained(config.llm_model_name)
40
+ self.llm_model = AutoModelForCausalLM.from_config(llm_config)
41
+ self.llm_tokenizer = AutoTokenizer.from_pretrained(config.llm_model_name)
42
+
43
+ peft_config = LoraConfig(
44
+ r=4,
45
+ lora_alpha=8,
46
+ target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj', 'up_proj', 'down_proj', 'gate_proj'],
47
+ lora_dropout=0.05,
48
+ task_type="CAUSAL_LM",
49
+ )
50
+ self.llm_model = get_peft_model(self.llm_model, peft_config)
51
+
52
+ def encode(self, mel, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids):
53
+ batch_size = mel.shape[0]
54
+
55
+ with torch.no_grad():
56
+ speech_embeds = self.audio_encoder(mel)
57
+ embedder = self.llm_model.model.model.embed_tokens
58
+ pre_prompt_embeds = embedder(pre_tokenized_ids)
59
+ post_prompt_embeds = embedder(post_tokenized_ids)
60
+ output_prompt_embeds = embedder(output_tokenized_ids)
61
+
62
+ combined_embeds = torch.cat([pre_prompt_embeds, speech_embeds, post_prompt_embeds, output_prompt_embeds], dim=1)
63
+ atts = torch.ones(combined_embeds.size()[:-1], dtype=torch.long).to(combined_embeds.device)
64
+
65
+ input_token_length = pre_tokenized_ids.shape[1] + speech_embeds.shape[1] + post_tokenized_ids.shape[1]
66
+ label_ids = torch.cat([
67
+ torch.ones([batch_size, input_token_length], device=combined_embeds.device) * -100,
68
+ output_tokenized_ids
69
+ ], 1).to(combined_embeds.device).to(torch.int64)
70
+ return combined_embeds, atts, label_ids
71
+
72
+ def forward(self, wav_tensor, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids, attention_mask=None):
73
+ combined_embeds, atts, label_ids = self.encode(wav_tensor, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids)
74
+ outputs = self.llm_model(inputs_embeds=combined_embeds, attention_mask=attention_mask)
75
+ return outputs
76
+
77
+ def generate_meta(self, audio_path, instruction="Give me the following information about the audio [Transcript]", max_new_tokens=2000):
78
+ device = self.audio_encoder.return_device()
79
+ pre_speech_prompt = f'''Instruction:
80
+ {instruction}
81
+
82
+ Input:
83
+ <speech>'''
84
+ post_speech_prompt = f'''</speech>
85
+
86
+ Output:'''
87
+ output_prompt = '\n<s>'
88
+
89
+ with torch.no_grad():
90
+ wav_tensor, sr = torchaudio.load(audio_path)
91
+ wav_tensor = self.audio_processor(wav_tensor.squeeze(), return_tensors="pt", sampling_rate=16000).input_values
92
+
93
+ pre_tokenized_ids = self.llm_tokenizer(pre_speech_prompt, padding="do_not_pad", return_tensors='pt', truncation=False, add_special_tokens=False)["input_ids"]
94
+ post_tokenized_ids = self.llm_tokenizer(post_speech_prompt, padding="do_not_pad", return_tensors='pt', truncation=False, add_special_tokens=False)["input_ids"]
95
+ output_tokenized_ids = self.llm_tokenizer(output_prompt, padding="do_not_pad", return_tensors='pt', truncation=False, add_special_tokens=False)["input_ids"]
96
+
97
+ combined_embeds, atts, label_ids = self.encode(wav_tensor.to(device), pre_tokenized_ids.to(device), post_tokenized_ids.to(device), output_tokenized_ids.to(device))
98
+
99
+ out = self.llm_model.generate(
100
+ inputs_embeds=combined_embeds,
101
+ max_new_tokens=max_new_tokens,
102
+ ).cpu().tolist()[0]
103
+
104
+ output_text = self.llm_tokenizer.decode(out, skip_special_tokens=True)
105
+ return output_text
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff