Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
model = AutoModelForCausalLM.from_pretrained("AdaptLLM/finance-LLM-13B")
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("AdaptLLM/finance-LLM-13B", use_fast=False)
|
5 |
+
|
6 |
+
|
7 |
+
# Put your input here:
|
8 |
+
user_input = '''Use this fact to answer the question: Title of each class Trading Symbol(s) Name of each exchange on which registered
|
9 |
+
Common Stock, Par Value $.01 Per Share MMM New York Stock Exchange
|
10 |
+
MMM Chicago Stock Exchange, Inc.
|
11 |
+
1.500% Notes due 2026 MMM26 New York Stock Exchange
|
12 |
+
1.750% Notes due 2030 MMM30 New York Stock Exchange
|
13 |
+
1.500% Notes due 2031 MMM31 New York Stock Exchange
|
14 |
+
|
15 |
+
Which debt securities are registered to trade on a national securities exchange under 3M's name as of Q2 of 2023?'''
|
16 |
+
|
17 |
+
# Simply use your input as the prompt for base models
|
18 |
+
prompt = user_input
|
19 |
+
|
20 |
+
inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).input_ids.to(model.device)
|
21 |
+
outputs = model.generate(input_ids=inputs, max_length=2048)[0]
|
22 |
+
|
23 |
+
answer_start = int(inputs.shape[-1])
|
24 |
+
pred = tokenizer.decode(outputs[answer_start:], skip_special_tokens=True)
|
25 |
+
|
26 |
+
print(f'### User Input:\n{user_input}\n\n### Assistant Output:\n{pred}')
|