leandrocarneiro commited on
Commit
6f82717
·
verified ·
1 Parent(s): 9f9fa14

Upload 3 files

Browse files
Files changed (3) hide show
  1. llm.py +1 -1
  2. main.py +2 -0
  3. rag.py +6 -2
llm.py CHANGED
@@ -14,7 +14,7 @@ def invoke_llm(context, task):
14
  The context is: $$${context}$$$
15
  """
16
 
17
- llm=ChatOpenAI(model_name="gpt-3.5-turbo",
18
  temperature=0,
19
  openai_api_key=os.environ['OPENAI_KEY'],
20
  max_tokens=1000)
 
14
  The context is: $$${context}$$$
15
  """
16
 
17
+ llm=ChatOpenAI(model_name="gpt-3.5-turbo-0125",
18
  temperature=0,
19
  openai_api_key=os.environ['OPENAI_KEY'],
20
  max_tokens=1000)
main.py CHANGED
@@ -39,6 +39,8 @@ def generate_news(subject, min_words, max_words, sites):
39
  print(' Assunto: ' + subject)
40
  obj_rag = rag.Rag(vectorstore, min_words, max_words)
41
  result_news = obj_rag.generate_text(subject)
 
 
42
 
43
  print('\n\n' + '*' * 50 + '\n\n')
44
  print(result_news[0])
 
39
  print(' Assunto: ' + subject)
40
  obj_rag = rag.Rag(vectorstore, min_words, max_words)
41
  result_news = obj_rag.generate_text(subject)
42
+ if type(result_news) == str:
43
+ return 'Erro: ' + result_news
44
 
45
  print('\n\n' + '*' * 50 + '\n\n')
46
  print(result_news[0])
rag.py CHANGED
@@ -59,6 +59,7 @@ class Rag:
59
 
60
  prompt_template = """Your task is to create news to a newspaper based on pieces of texts delimited by <> and a question delimited by <>.
61
  Do not make up any information, create the news just based on the given information on the pieces of texts delimited by <>.
 
62
  The news should have a tittle.
63
  The news should be written in a formal language.
64
  The news should have between {min_words} and {max_words} words and it should be in portuguese language.
@@ -70,12 +71,14 @@ class Rag:
70
  partial_variables={"min_words": min_words, "max_words": max_words})
71
 
72
  self.qa = ConversationalRetrievalChain.from_llm(
73
- llm=ChatOpenAI(model_name="gpt-3.5-turbo",
74
  temperature=0.1,
75
  openai_api_key=os.environ['OPENAI_KEY'],
76
  max_tokens=int(int(max_words) + (int(max_words) / 2))), #número máximo de tokens para a resposta
77
  memory=self.memory,
78
- retriever=vectorstore.as_retriever(), #search_kwargs={'k': 3}
 
 
79
  combine_docs_chain_kwargs={"prompt": self.prompt},
80
  chain_type="stuff",#map_reduce, refine, map_rerank
81
  return_source_documents=True,
@@ -84,6 +87,7 @@ class Rag:
84
  try:
85
  query = f"Elabore uma nova notícia sobre {subject}."
86
  result_text = self.qa.invoke({"question": query})
 
87
 
88
  list_result_sources = []
89
  str_result_sources = ''
 
59
 
60
  prompt_template = """Your task is to create news to a newspaper based on pieces of texts delimited by <> and a question delimited by <>.
61
  Do not make up any information, create the news just based on the given information on the pieces of texts delimited by <>.
62
+ If you need to make up any information, please do not answer the question. Inform that you need more information to answer the question.
63
  The news should have a tittle.
64
  The news should be written in a formal language.
65
  The news should have between {min_words} and {max_words} words and it should be in portuguese language.
 
71
  partial_variables={"min_words": min_words, "max_words": max_words})
72
 
73
  self.qa = ConversationalRetrievalChain.from_llm(
74
+ llm=ChatOpenAI(model_name="gpt-3.5-turbo-0125",
75
  temperature=0.1,
76
  openai_api_key=os.environ['OPENAI_KEY'],
77
  max_tokens=int(int(max_words) + (int(max_words) / 2))), #número máximo de tokens para a resposta
78
  memory=self.memory,
79
+ #retriever=vectorstore.as_retriever(search_type='similarity_score_threshold',
80
+ # search_kwargs={'k':4, 'score_threshold':0.5}), #search_kwargs={'k': 3}
81
+ retriever=vectorstore.as_retriever(),
82
  combine_docs_chain_kwargs={"prompt": self.prompt},
83
  chain_type="stuff",#map_reduce, refine, map_rerank
84
  return_source_documents=True,
 
87
  try:
88
  query = f"Elabore uma nova notícia sobre {subject}."
89
  result_text = self.qa.invoke({"question": query})
90
+ print('##### result', result_text)
91
 
92
  list_result_sources = []
93
  str_result_sources = ''