Spaces:
Sleeping
Sleeping
Upload actions_llm.py
Browse files- actions/actions_llm.py +35 -0
actions/actions_llm.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# run_search.py
|
2 |
+
import openai
|
3 |
+
from search_content import main_search
|
4 |
+
|
5 |
+
# Provide your OpenAI API key
|
6 |
+
openai.api_key = ''
|
7 |
+
|
8 |
+
def generate_openai_response(query, model_engine="text-davinci-002", max_tokens=124, temperature=0.8):
|
9 |
+
"""Generate a response using the OpenAI API."""
|
10 |
+
# Run the main function from search_content.py and store the results in a variable
|
11 |
+
results = main_search(query)
|
12 |
+
|
13 |
+
# Create context from the results
|
14 |
+
context = "".join([f"#{str(i)}" for i in results])[:2014]
|
15 |
+
prompt_template = f"Relevant context: {context}\n\n Answer the question in detail: {query}"
|
16 |
+
|
17 |
+
# Generate a response using the OpenAI API
|
18 |
+
response = openai.Completion.create(
|
19 |
+
engine=model_engine,
|
20 |
+
prompt=prompt_template,
|
21 |
+
max_tokens=max_tokens,
|
22 |
+
temperature=temperature,
|
23 |
+
n=1,
|
24 |
+
stop=None,
|
25 |
+
)
|
26 |
+
|
27 |
+
return response.choices[0].text.strip()
|
28 |
+
|
29 |
+
def main():
|
30 |
+
query = "What is omdena local chapters, how a developer can benifit from it"
|
31 |
+
response = generate_openai_response(query)
|
32 |
+
print(response)
|
33 |
+
|
34 |
+
if __name__ == "__main__":
|
35 |
+
main()
|