mjmanashti commited on
Commit
545f8f3
1 Parent(s): 36be831

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +19 -17
README.md CHANGED
@@ -14,27 +14,29 @@ This model was trained using AutoTrain. For more information, please visit [Auto
14
  # Usage
15
 
16
  ```python
 
17
 
18
- from transformers import AutoModelForCausalLM, AutoTokenizer
19
 
20
- model_path = "PATH_TO_THIS_REPO"
21
 
22
- tokenizer = AutoTokenizer.from_pretrained(model_path)
23
- model = AutoModelForCausalLM.from_pretrained(
24
- model_path,
25
- device_map="auto",
26
- torch_dtype='auto'
27
- ).eval()
28
 
29
- # Prompt content: "hi"
30
- messages = [
31
- {"role": "user", "content": "hi"}
32
- ]
 
 
33
 
34
- input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
35
- output_ids = model.generate(input_ids.to('cuda'))
36
- response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
 
 
 
 
 
 
37
 
38
- # Model response: "Hello! How can I assist you today?"
39
- print(response)
40
  ```
 
14
  # Usage
15
 
16
  ```python
17
+ !pip install transformers
18
 
19
+ !pip install accelerate
20
 
 
21
 
22
+ from huggingface_hub import notebook_login
23
+ notebook_login()
 
 
 
 
24
 
25
+ from transformers import AutoTokenizer, AutoModelForCausalLM
26
+ import torch
27
+
28
+
29
+ tokenizer = AutoTokenizer.from_pretrained("mjmanashti/autotrain-cff1t-gk81o")
30
+ torch.set_default_dtype(torch.float16)
31
 
32
+ model = AutoModelForCausalLM.from_pretrained("mjmanashti/autotrain-cff1t-gk81o", device_map="auto")
33
+
34
+ chat = [
35
+ { "role": "user", "content": "Based on the following input data: [Time: 2024-01-29 23:00:00, Open: 1.0834, High: 1.0837, Low: 1.08334, Close: 1.08338, Volume: 722] what trading signal (BUY, SELL, or HOLD) should be executed to maximize profit? If the signal is BUY, what would be the entry price and If the signal is SELL, what would be the exit price for profit maximization? " },
36
+ ]
37
+ prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
38
+ inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
39
+ outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150)
40
+ print(tokenizer.decode(outputs[0]))
41
 
 
 
42
  ```