bakrianoo commited on
Commit
3931fec
1 Parent(s): 85419c4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +45 -31
README.md CHANGED
@@ -65,16 +65,18 @@ messages = [
65
  outputs = pipe(messages, max_new_tokens=256)
66
  assistant_response = outputs[0]["generated_text"][-1]["content"].strip()
67
  print(assistant_response)
 
 
 
 
 
 
68
 
69
- #السلام عليكم ورحمة الله وبركاته،
70
- #
71
- # أودّ أن أعتذر عن عدم الحضور إلى العمل اليوم بسبب مرضي. أشعر بالسوء الشديد وأحتاج إلى الراحة. سأعود إلى العمل فور تعافيي.
72
- #
73
- # شكراً لتفهمكم.
74
- #
75
- #مع تحياتي،
76
- #[اسمك]
77
 
 
 
78
  ```
79
 
80
  #### Running the model on a single / multi GPU
@@ -105,9 +107,11 @@ input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt", return_
105
  outputs = model.generate(**input_ids, max_new_tokens=256)
106
 
107
  print(tokenizer.decode(outputs[0]))
 
108
 
109
- # الشمس
110
-
 
111
  ```
112
 
113
  You can ensure the correct chat template is applied by using `tokenizer.apply_chat_template` as follows:
@@ -132,24 +136,25 @@ input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt", return_
132
 
133
  outputs = model.generate(**input_ids, max_new_tokens=256)
134
  print(tokenizer.decode(outputs[0]).split("<start_of_turn>model")[-1])
 
135
 
136
- # def generate_even_numbers(n):
137
- # """
138
- # This function generates a list of even numbers from 1 to n.
139
- #
140
- # Args:
141
- # n: The upper limit of the range.
142
- #
143
- # Returns:
144
- # A list of even numbers.
145
- # """
146
- # return [i for i in range(1, n + 1) if i % 2 == 0]
147
- #
148
- # Example usage
149
- # n = 10
150
- # even_numbers = generate_even_numbers(n)
151
- # print(f"The first {n} even numbers are: {even_numbers}")
152
 
 
 
 
 
153
  ```
154
 
155
  #### Quantized Versions through `bitsandbytes`
@@ -184,10 +189,13 @@ input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt", return_
184
 
185
  outputs = model.generate(**input_ids, max_new_tokens=256)
186
  print(tokenizer.decode(outputs[0]).split("<start_of_turn>model")[-1])
 
187
 
188
- # الليمون، البرتقال، الموز، الكيوي، الفراولة
189
-
 
190
  ```
 
191
  </details>
192
 
193
  <details>
@@ -216,9 +224,13 @@ input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt", return_
216
 
217
  outputs = model.generate(**input_ids, max_new_tokens=256)
218
  print(tokenizer.decode(outputs[0]).split("<start_of_turn>model")[-1])
 
219
 
220
- # 1193
 
 
221
  ```
 
222
  </details>
223
 
224
  #### Advanced Usage
@@ -285,9 +297,11 @@ for idx in range(2):
285
  # fast run
286
  outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128)
287
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
288
 
289
- # جو بايدن
290
-
 
291
  ```
292
 
293
  For more details, refer to the [Transformers documentation](https://huggingface.co/docs/transformers/main/en/llm_optims?static-kv=basic+usage%3A+generation_config).
 
65
  outputs = pipe(messages, max_new_tokens=256)
66
  assistant_response = outputs[0]["generated_text"][-1]["content"].strip()
67
  print(assistant_response)
68
+ ```
69
+
70
+ - Response:
71
+
72
+ ```text
73
+ السلام عليكم ورحمة الله وبركاته
74
 
75
+ أودّ أن أعتذر عن عدم الحضور إلى العمل اليوم بسبب مرضي. أشعر بالسوء الشديد وأحتاج إلى الراحة. سأعود إلى العمل فور تعافيي.
76
+ شكراً لتفهمكم.
 
 
 
 
 
 
77
 
78
+ مع تحياتي،
79
+ [اسمك]
80
  ```
81
 
82
  #### Running the model on a single / multi GPU
 
107
  outputs = model.generate(**input_ids, max_new_tokens=256)
108
 
109
  print(tokenizer.decode(outputs[0]))
110
+ ```
111
 
112
+ - Response:
113
+ ```text
114
+ الشمس
115
  ```
116
 
117
  You can ensure the correct chat template is applied by using `tokenizer.apply_chat_template` as follows:
 
136
 
137
  outputs = model.generate(**input_ids, max_new_tokens=256)
138
  print(tokenizer.decode(outputs[0]).split("<start_of_turn>model")[-1])
139
+ ```
140
 
141
+ - Response:
142
+ ```python
143
+ def generate_even_numbers(n):
144
+ """
145
+ This function generates a list of even numbers from 1 to n.
146
+ Args:
147
+ n: The upper limit of the range.
148
+
149
+ Returns:
150
+ A list of even numbers.
151
+ """
152
+ return [i for i in range(1, n + 1) if i % 2 == 0]
 
 
 
 
153
 
154
+ # Example usage
155
+ n = 10
156
+ even_numbers = generate_even_numbers(n)
157
+ print(f"The first {n} even numbers are: {even_numbers}")
158
  ```
159
 
160
  #### Quantized Versions through `bitsandbytes`
 
189
 
190
  outputs = model.generate(**input_ids, max_new_tokens=256)
191
  print(tokenizer.decode(outputs[0]).split("<start_of_turn>model")[-1])
192
+ ```
193
 
194
+ - Response:
195
+ ```text
196
+ الليمون، البرتقال، الموز، الكيوي، الفراولة
197
  ```
198
+
199
  </details>
200
 
201
  <details>
 
224
 
225
  outputs = model.generate(**input_ids, max_new_tokens=256)
226
  print(tokenizer.decode(outputs[0]).split("<start_of_turn>model")[-1])
227
+ ```
228
 
229
+ - Response:
230
+ ```text
231
+ 1193
232
  ```
233
+
234
  </details>
235
 
236
  #### Advanced Usage
 
297
  # fast run
298
  outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128)
299
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
300
+ ```
301
 
302
+ - Response:
303
+ ```text
304
+ جو بايدن
305
  ```
306
 
307
  For more details, refer to the [Transformers documentation](https://huggingface.co/docs/transformers/main/en/llm_optims?static-kv=basic+usage%3A+generation_config).