Update README.md
Browse files
README.md
CHANGED
@@ -85,9 +85,9 @@ terminators = [
|
|
85 |
|
86 |
outputs = generation_pipeline(
|
87 |
input_messages,
|
88 |
-
max_new_tokens=
|
89 |
num_beams=45, # Set this as high as your memory will allow!
|
90 |
-
num_return_sequences=
|
91 |
early_stopping=True,
|
92 |
)
|
93 |
|
@@ -98,16 +98,18 @@ for output in outputs:
|
|
98 |
if item.get('role') == 'assistant':
|
99 |
beam_contents.append(item.get('content'))
|
100 |
|
101 |
-
real_response = "
|
102 |
|
103 |
-
print(f"
|
104 |
for i, content in enumerate(beam_contents, start=1):
|
105 |
print(f"Suggestion {i}: {content}")
|
106 |
```
|
107 |
### Expected Output:
|
108 |
```
|
109 |
-
|
110 |
-
Suggestion 1:
|
|
|
|
|
111 |
```
|
112 |
## Usage on free tier in Google Colab
|
113 |
|
@@ -141,9 +143,9 @@ quant_config = BitsAndBytesConfig(
|
|
141 |
bnb_4bit_compute_dtype=torch.bfloat16
|
142 |
)
|
143 |
|
144 |
-
model = AutoModelForCausalLM.from_pretrained("Ericu950/Papy_1_Llama-3.1-8B-
|
145 |
device_map = "auto", quantization_config = quant_config)
|
146 |
-
tokenizer = AutoTokenizer.from_pretrained("Ericu950/Papy_1_Llama-3.1-8B-
|
147 |
|
148 |
generation_pipeline = pipeline(
|
149 |
"text-generation",
|
@@ -175,7 +177,7 @@ papyrus_edition = """
|
|
175 |
εφοδον το τε βλαβοσ και επιτιμον αργυριου δραχμασ 0 και εισ το δημοσιον τασ ισασ και μηθεν
|
176 |
ησσον· δ -----ιων ομολογιαν συνεχωρησεν·"""
|
177 |
|
178 |
-
system_prompt = "
|
179 |
|
180 |
input_messages = [
|
181 |
{"role": "system", "content": system_prompt},
|
@@ -184,9 +186,9 @@ input_messages = [
|
|
184 |
|
185 |
outputs = generation_pipeline(
|
186 |
input_messages,
|
187 |
-
max_new_tokens=
|
188 |
num_beams=10,
|
189 |
-
num_return_sequences=
|
190 |
early_stopping=True,
|
191 |
)
|
192 |
|
@@ -197,16 +199,18 @@ for output in outputs:
|
|
197 |
if item.get('role') == 'assistant':
|
198 |
beam_contents.append(item.get('content'))
|
199 |
|
200 |
-
real_response = "
|
201 |
|
202 |
-
print(f"
|
203 |
for i, content in enumerate(beam_contents, start=1):
|
204 |
print(f"Suggestion {i}: {content}")
|
205 |
```
|
206 |
### Expected Output:
|
207 |
```
|
208 |
-
|
209 |
-
Suggestion 1:
|
|
|
|
|
210 |
```
|
211 |
|
212 |
|
|
|
85 |
|
86 |
outputs = generation_pipeline(
|
87 |
input_messages,
|
88 |
+
max_new_tokens=13,
|
89 |
num_beams=45, # Set this as high as your memory will allow!
|
90 |
+
num_return_sequences=3,
|
91 |
early_stopping=True,
|
92 |
)
|
93 |
|
|
|
98 |
if item.get('role') == 'assistant':
|
99 |
beam_contents.append(item.get('content'))
|
100 |
|
101 |
+
real_response = "Oxyrynchos"
|
102 |
|
103 |
+
print(f"Place of origin: {real_response}")
|
104 |
for i, content in enumerate(beam_contents, start=1):
|
105 |
print(f"Suggestion {i}: {content}")
|
106 |
```
|
107 |
### Expected Output:
|
108 |
```
|
109 |
+
Place of origin: Oxyrynchos
|
110 |
+
Suggestion 1: Oxyrhynchos
|
111 |
+
Suggestion 2: Antinoopolis
|
112 |
+
Suggestion 3: Alexandria
|
113 |
```
|
114 |
## Usage on free tier in Google Colab
|
115 |
|
|
|
143 |
bnb_4bit_compute_dtype=torch.bfloat16
|
144 |
)
|
145 |
|
146 |
+
model = AutoModelForCausalLM.from_pretrained("Ericu950/Papy_1_Llama-3.1-8B-Instruct_place",
|
147 |
device_map = "auto", quantization_config = quant_config)
|
148 |
+
tokenizer = AutoTokenizer.from_pretrained("Ericu950/Papy_1_Llama-3.1-8B-Instruct_place")
|
149 |
|
150 |
generation_pipeline = pipeline(
|
151 |
"text-generation",
|
|
|
177 |
εφοδον το τε βλαβοσ και επιτιμον αργυριου δραχμασ 0 και εισ το δημοσιον τασ ισασ και μηθεν
|
178 |
ησσον· δ -----ιων ομολογιαν συνεχωρησεν·"""
|
179 |
|
180 |
+
system_prompt = "Assign this papyrus fragment to an exact place!"
|
181 |
|
182 |
input_messages = [
|
183 |
{"role": "system", "content": system_prompt},
|
|
|
186 |
|
187 |
outputs = generation_pipeline(
|
188 |
input_messages,
|
189 |
+
max_new_tokens=13,
|
190 |
num_beams=10,
|
191 |
+
num_return_sequences=3,
|
192 |
early_stopping=True,
|
193 |
)
|
194 |
|
|
|
199 |
if item.get('role') == 'assistant':
|
200 |
beam_contents.append(item.get('content'))
|
201 |
|
202 |
+
real_response = "Oxyrynchos"
|
203 |
|
204 |
+
print(f"Place of origin: {real_response}")
|
205 |
for i, content in enumerate(beam_contents, start=1):
|
206 |
print(f"Suggestion {i}: {content}")
|
207 |
```
|
208 |
### Expected Output:
|
209 |
```
|
210 |
+
Place of origin: Oxyrynchos
|
211 |
+
Suggestion 1: Oxyrhynchos
|
212 |
+
Suggestion 2: Antinoopolis
|
213 |
+
Suggestion 3: Alexandria
|
214 |
```
|
215 |
|
216 |
|