guptavishal79 commited on
Commit
e54d2b2
·
verified ·
1 Parent(s): 3fe99f4

load model

Browse files
Files changed (1) hide show
  1. app.py +17 -0
app.py CHANGED
@@ -7,6 +7,23 @@ hub_path = 'guptavishal79/aimlops'
7
  loaded_model = GPT2LMHeadModel.from_pretrained(hub_path)
8
  loaded_tokenizer = GPT2Tokenizer.from_pretrained(hub_path)
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # Function for response generation
11
  def generate_query_response(prompt, max_length=200):
12
 
 
7
  loaded_model = GPT2LMHeadModel.from_pretrained(hub_path)
8
  loaded_tokenizer = GPT2Tokenizer.from_pretrained(hub_path)
9
 
10
+
11
+ def generate_response(model, tokenizer, prompt, max_length=200):
12
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
13
+ # Create the attention mask and pad token id
14
+ attention_mask = torch.ones_like(input_ids)
15
+ pad_token_id = tokenizer.eos_token_id
16
+
17
+ output = model.generate(
18
+ input_ids,
19
+ max_length=max_length,
20
+ num_return_sequences=1,
21
+ attention_mask=attention_mask,
22
+ pad_token_id=pad_token_id
23
+ )
24
+
25
+ return tokenizer.decode(output[0], skip_special_tokens=True)
26
+
27
  # Function for response generation
28
  def generate_query_response(prompt, max_length=200):
29