Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -23,7 +23,7 @@ rm_model = AutoModelForSequenceClassification.from_pretrained('OpenAssistant/rew
|
|
23 |
def generate_text(usertitle, content, temperature, max_length, N=3):
|
24 |
input_text = f"title: {usertitle}\ncontent: {content}"
|
25 |
inputs = tokenizer(input_text, return_tensors='pt').to('cuda')
|
26 |
-
attention_mask = torch.ones(inputs.shape, dtype=torch.long)
|
27 |
generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
|
28 |
decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
|
29 |
|
|
|
23 |
def generate_text(usertitle, content, temperature, max_length, N=3):
|
24 |
input_text = f"title: {usertitle}\ncontent: {content}"
|
25 |
inputs = tokenizer(input_text, return_tensors='pt').to('cuda')
|
26 |
+
attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
|
27 |
generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
|
28 |
decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
|
29 |
|