datawithsuman commited on
Commit
4c8dd3b
·
verified ·
1 Parent(s): 480a31f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -16
app.py CHANGED
@@ -4,6 +4,8 @@ import ast
4
  import json
5
  import openai
6
  from llama_index.llms.openai import OpenAI
 
 
7
  import nest_asyncio
8
 
9
  nest_asyncio.apply()
@@ -14,9 +16,10 @@ nest_asyncio.apply()
14
 
15
 
16
  # OpenAI credentials
17
- key = os.getenv('OPENAI_API_KEY')
18
- openai.api_key = key
19
- os.environ["OPENAI_API_KEY"] = key
 
20
 
21
  # Streamlit UI
22
  st.title("Auto Test Case Generation using LLM")
@@ -50,18 +53,27 @@ if uploaded_files:
50
  list_of_functions = list(functions.keys())
51
  st.write(list_of_functions)
52
 
53
- def res(prompt):
54
 
55
- response = openai.chat.completions.create(
56
- model=model,
57
- messages=[
58
- {"role": "user",
59
- "content": prompt,
60
- }
61
- ]
62
- )
63
 
64
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
65
 
66
  # Initialize session state for chat messages
67
  if "messages" not in st.session_state:
@@ -86,7 +98,8 @@ if uploaded_files:
86
  snippet = functions[func]
87
 
88
  # Generation
89
- model = "gpt-3.5-turbo"
 
90
 
91
  # Generation
92
  # resp = ollama.generate(model='codellama',
@@ -102,9 +115,9 @@ if uploaded_files:
102
  \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
103
  """
104
 
105
- print(prompt)
106
 
107
- resp = res(prompt)
108
  st.session_state.messages.append({"role": "assistant", "content": f"{resp}"})
109
  st.markdown(resp)
110
  # st.session_state.messages.append({"role": "assistant", "content": f"{resp['response']}"})
 
4
  import json
5
  import openai
6
  from llama_index.llms.openai import OpenAI
7
+ from llama_index.core.llms import ChatMessage
8
+ from llama_index.llms.anthropic import Anthropic
9
  import nest_asyncio
10
 
11
  nest_asyncio.apply()
 
16
 
17
 
18
  # OpenAI credentials
19
+ # key = os.getenv('OPENAI_API_KEY')
20
+ key = os.getenv('CLAUDE_API_KEY')
21
+ # openai.api_key = key
22
+ # os.environ["OPENAI_API_KEY"] = key
23
 
24
  # Streamlit UI
25
  st.title("Auto Test Case Generation using LLM")
 
53
  list_of_functions = list(functions.keys())
54
  st.write(list_of_functions)
55
 
56
+ def res(prompt, model):
57
 
58
+ # response = openai.chat.completions.create(
59
+ # model=model,
60
+ # messages=[
61
+ # {"role": "user",
62
+ # "content": prompt,
63
+ # }
64
+ # ]
65
+ # )
66
 
67
+ # return response.choices[0].message.content
68
+
69
+ response = [
70
+ ChatMessage(
71
+ role="system", content="You are a helpful assistant"
72
+ ),
73
+ ChatMessage(role="user", content=prompt),
74
+ ]
75
+ resp = Anthropic(model=model).chat(messages)
76
+ return resp
77
 
78
  # Initialize session state for chat messages
79
  if "messages" not in st.session_state:
 
98
  snippet = functions[func]
99
 
100
  # Generation
101
+ # model = "gpt-3.5-turbo"
102
+ model = "claude-3-haiku-20240307"
103
 
104
  # Generation
105
  # resp = ollama.generate(model='codellama',
 
115
  \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
116
  """
117
 
118
+ # print(prompt)
119
 
120
+ resp = res(prompt, model)
121
  st.session_state.messages.append({"role": "assistant", "content": f"{resp}"})
122
  st.markdown(resp)
123
  # st.session_state.messages.append({"role": "assistant", "content": f"{resp['response']}"})