|
import transformers |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
model = AutoModelForCausalLM.from_pretrained("AdaptLLM/finance-LLM-13B") |
|
tokenizer = AutoTokenizer.from_pretrained("AdaptLLM/finance-LLM-13B", use_fast=False) |
|
|
|
|
|
|
|
user_input = '''Use this fact to answer the question: Title of each class Trading Symbol(s) Name of each exchange on which registered |
|
Common Stock, Par Value $.01 Per Share MMM New York Stock Exchange |
|
MMM Chicago Stock Exchange, Inc. |
|
1.500% Notes due 2026 MMM26 New York Stock Exchange |
|
1.750% Notes due 2030 MMM30 New York Stock Exchange |
|
1.500% Notes due 2031 MMM31 New York Stock Exchange |
|
|
|
Which debt securities are registered to trade on a national securities exchange under 3M's name as of Q2 of 2023?''' |
|
|
|
|
|
prompt = user_input |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).input_ids.to(model.device) |
|
outputs = model.generate(input_ids=inputs, max_length=2048)[0] |
|
|
|
answer_start = int(inputs.shape[-1]) |
|
pred = tokenizer.decode(outputs[answer_start:], skip_special_tokens=True) |
|
|
|
print(f'### User Input:\n{user_input}\n\n### Assistant Output:\n{pred}') |
|
|