File size: 5,349 Bytes
dbcf368 c20b1d1 f17dea1 4c8dd3b f17dea1 04a7b4c c20b1d1 f17dea1 4c8dd3b f17dea1 4a84917 c20b1d1 04a7b4c 4c8dd3b f17dea1 4c8dd3b f17dea1 4c8dd3b 4a84917 4c8dd3b ba54893 4c8dd3b f17dea1 04a7b4c f17dea1 4c8dd3b 2f2177f c5705c0 2f2177f 5d2c737 04a7b4c 4a84917 04a7b4c f17dea1 4a84917 db3a3d5 3fb21af 31ea6d1 4a84917 a5d4bf4 4a84917 a5d4bf4 f17dea1 4c8dd3b f17dea1 4c8dd3b f17dea1 04a7b4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import os
import streamlit as st
import ast
import json
import openai
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.llms.anthropic import Anthropic
import nest_asyncio
nest_asyncio.apply()
# import ollama
# from llama_index.llms.ollama import Ollama
# from llama_index.core.llms import ChatMessage
# OpenAI credentials
# key = os.getenv('OPENAI_API_KEY')
# openai.api_key = key
# os.environ["OPENAI_API_KEY"] = key
# Anthropic credentials
key = os.getenv('CLAUDE_API_KEY')
os.environ["ANTHROPIC_API_KEY"] = key
# Streamlit UI
st.title("Auto Test Case Generation using LLM")
uploaded_files = st.file_uploader("Upload a python(.py) file", type=".py", accept_multiple_files=True)
if uploaded_files:
for uploaded_file in uploaded_files:
with open(f"./data/{uploaded_file.name}", 'wb') as f:
f.write(uploaded_file.getbuffer())
st.success("File uploaded...")
st.success("Fetching list of functions...")
file_path = f"./data/{uploaded_file.name}"
def extract_functions_from_file(file_path):
with open(file_path, "r") as file:
file_content = file.read()
parsed_content = ast.parse(file_content)
functions = {}
for node in ast.walk(parsed_content):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_body = ast.get_source_segment(file_content, node)
functions[func_name] = func_body
return functions
functions = extract_functions_from_file(file_path)
list_of_functions = list(functions.keys())
st.write(list_of_functions)
def res(prompt, model):
# response = openai.chat.completions.create(
# model=model,
# messages=[
# {"role": "user",
# "content": prompt,
# }
# ]
# )
# return response.choices[0].message.content
response = [
ChatMessage(role="system", content="You are a sincere and helpful coding assistant"),
ChatMessage(role="user", content=prompt),
]
resp = Anthropic(model=model).chat(response)
return resp
# Initialize session state for chat messages
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if func := st.chat_input("Enter the function name for generating test cases:"):
st.session_state.messages.append({"role": "assistant", "content": f"Generating test cases for {func}"})
st.success(f"Generating test cases for {func}")
func = ''.join(func.split())
if func not in list_of_functions:
st.write("Incorrect function name")
else:
snippet = functions[func]
# Generation
# model = "gpt-3.5-turbo"
# claude-3-sonnet-20240229
# model = "claude-3-haiku-20240307"
# model = "claude-3-sonnet-20240229"
model = "claude-3-opus-20240229"
# Generation
# resp = ollama.generate(model='codellama',
# prompt=f""" Your task is to generate unit test cases for this function : {snippet}\
# \n\n Politely refuse if the function is not suitable for generating test cases.
# \n\n Generate atleast 5 unit test case. Include couple of edge cases as well.
# \n\n There should be no duplicate test cases.
# \n\n Avoid generating repeated statements.
# """)
prompt=f""" Your task is to generate unit test cases for this function : \n\n{snippet}\
\n\n Generate between 3 to 8 unit test cases. Include couple of edge cases as well.
\n\n All the test cases should have the mandatory assert statement.
\n\n Do not generate incomplete test case without the assert statement.
\n\n Politely refuse if the function is not suitable for generating test cases.
\n\n There should be no duplicate and incomplete test case.
\n\n Avoid generating repeated statements.
\n\n Recheck your response before generating.
"""
# print(prompt)
resp = res(prompt, model)
st.session_state.messages.append({"role": "assistant", "content": f"{resp}"})
st.markdown(resp)
# st.session_state.messages.append({"role": "assistant", "content": f"{resp['response']}"})
# st.markdown(resp['response'])
|