Spaces:
Sleeping
Sleeping
Update actions/actions_llm.py
Browse files- actions/actions_llm.py +6 -2
actions/actions_llm.py
CHANGED
@@ -1,9 +1,13 @@
|
|
1 |
# run_search.py
|
2 |
import openai
|
3 |
from search_content import main_search
|
|
|
4 |
|
|
|
|
|
|
|
|
|
5 |
# Provide your OpenAI API key
|
6 |
-
openai.api_key = ''
|
7 |
|
8 |
def generate_openai_response(query, model_engine="text-davinci-002", max_tokens=124, temperature=0.8):
|
9 |
"""Generate a response using the OpenAI API."""
|
@@ -11,7 +15,7 @@ def generate_openai_response(query, model_engine="text-davinci-002", max_tokens=
|
|
11 |
results = main_search(query)
|
12 |
|
13 |
# Create context from the results
|
14 |
-
context = "".join([f"#{str(i)}" for i in results])[:2014]
|
15 |
prompt_template = f"Relevant context: {context}\n\n Answer the question in detail: {query}"
|
16 |
|
17 |
# Generate a response using the OpenAI API
|
|
|
1 |
# run_search.py
|
2 |
import openai
|
3 |
from search_content import main_search
|
4 |
+
import os
|
5 |
|
6 |
+
# Import api key from secrets
|
7 |
+
secret_value_0 = os.environ.get("openai")
|
8 |
+
|
9 |
+
openai.api_key = secret_value_0
|
10 |
# Provide your OpenAI API key
|
|
|
11 |
|
12 |
def generate_openai_response(query, model_engine="text-davinci-002", max_tokens=124, temperature=0.8):
|
13 |
"""Generate a response using the OpenAI API."""
|
|
|
15 |
results = main_search(query)
|
16 |
|
17 |
# Create context from the results
|
18 |
+
context = "".join([f"#{str(i)}" for i in results])[:2014] # Trim the context to 2014 characters - Modify as necessory
|
19 |
prompt_template = f"Relevant context: {context}\n\n Answer the question in detail: {query}"
|
20 |
|
21 |
# Generate a response using the OpenAI API
|