Update llm_inference.py
Browse files- llm_inference.py +3 -3
llm_inference.py
CHANGED
@@ -184,7 +184,7 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
184 |
response = self.huggingface_client.chat.completions.create(
|
185 |
model=model or "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
186 |
max_tokens=1024,
|
187 |
-
temperature=0
|
188 |
top_p=0.95,
|
189 |
messages=[
|
190 |
{"role": "system", "content": system_message},
|
@@ -198,7 +198,7 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
198 |
response = self.groq_client.chat.completions.create(
|
199 |
model=model or "llama-3.1-70b-versatile",
|
200 |
max_tokens=1024,
|
201 |
-
temperature=0
|
202 |
messages=[
|
203 |
{"role": "system", "content": system_message},
|
204 |
{"role": "user", "content": user_message},
|
@@ -211,7 +211,7 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
211 |
response = self.sambanova_client.chat.completions.create(
|
212 |
model=model or "Meta-Llama-3.1-70B-Instruct",
|
213 |
max_tokens=1024,
|
214 |
-
temperature=0
|
215 |
messages=[
|
216 |
{"role": "system", "content": system_message},
|
217 |
{"role": "user", "content": user_message},
|
|
|
184 |
response = self.huggingface_client.chat.completions.create(
|
185 |
model=model or "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
186 |
max_tokens=1024,
|
187 |
+
temperature=1.0,
|
188 |
top_p=0.95,
|
189 |
messages=[
|
190 |
{"role": "system", "content": system_message},
|
|
|
198 |
response = self.groq_client.chat.completions.create(
|
199 |
model=model or "llama-3.1-70b-versatile",
|
200 |
max_tokens=1024,
|
201 |
+
temperature=1.0,
|
202 |
messages=[
|
203 |
{"role": "system", "content": system_message},
|
204 |
{"role": "user", "content": user_message},
|
|
|
211 |
response = self.sambanova_client.chat.completions.create(
|
212 |
model=model or "Meta-Llama-3.1-70B-Instruct",
|
213 |
max_tokens=1024,
|
214 |
+
temperature=1.0,
|
215 |
messages=[
|
216 |
{"role": "system", "content": system_message},
|
217 |
{"role": "user", "content": user_message},
|