nayem-ng commited on
Commit
67aa02d
·
verified ·
1 Parent(s): 473f025

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -8
README.md CHANGED
@@ -54,17 +54,46 @@ Users should consider implementing bias mitigation strategies and ensure thoroug
54
  Use the following code snippet to get started with loading and using the model:
55
 
56
  ```python
 
57
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
58
 
59
- model_name = "nayem-ng/mdjannatulnayem_llama2_7b_finetuned_casuallm_lora"
60
- tokenizer = AutoTokenizer.from_pretrained(model_name)
61
- model = AutoModelForCausalLM.from_pretrained(model_name)
62
 
63
- # Example of generating text
64
- input_text = "Your prompt here"
65
- inputs = tokenizer(input_text, return_tensors="pt")
66
- outputs = model.generate(**inputs)
67
- print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  ```
69
 
70
  ## Training Details
 
54
  Use the following code snippet to get started with loading and using the model:
55
 
56
  ```python
57
+ # Import necessary libraries
58
  from transformers import AutoModelForCausalLM, AutoTokenizer
59
+ import torch
60
+ import intel_extension_for_pytorch as ipex # Optional for Intel optimization
61
 
62
+ # Specify your Hugging Face model repository
63
+ hf_model = "nayem-ng/mdjannatulnayem_llama2_7b_finetuned_casuallm_lora"
 
64
 
65
+ # Load the fine-tuned model and tokenizer
66
+ model = AutoModelForCausalLM.from_pretrained(hf_model)
67
+ tokenizer = AutoTokenizer.from_pretrained(hf_model)
68
+
69
+ # Move the model to the desired device
70
+ device = "cuda" if torch.cuda.is_available() else "cpu"
71
+ model.to(device)
72
+
73
+ # Set the model to evaluation mode
74
+ model.eval()
75
+
76
+ # Optional: Optimize with Intel extensions for PyTorch
77
+ # Uncomment the next line if you want to use Intel optimizations
78
+ # model = ipex.optimize(model)
79
+
80
+ # Function to generate text
81
+ def generate_text(prompt, max_length=50):
82
+ # Tokenize the input prompt
83
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
84
+
85
+ # Generate output
86
+ with torch.no_grad():
87
+ outputs = model.generate(**inputs, max_length=max_length)
88
+
89
+ # Decode and return the generated text
90
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
91
+
92
+ # Example usage
93
+ if __name__ == "__main__":
94
+ prompt = "Once upon a time"
95
+ generated_text = generate_text(prompt)
96
+ print("Generated Text:", generated_text)
97
  ```
98
 
99
  ## Training Details