File size: 4,534 Bytes
c20b1d1 f17dea1 04a7b4c c20b1d1 f17dea1 c20b1d1 04a7b4c f17dea1 04a7b4c f17dea1 5d2c737 04a7b4c f17dea1 04a7b4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import streamlit as st
import ast
import json
import openai
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
# import ollama
# from llama_index.llms.ollama import Ollama
# from llama_index.core.llms import ChatMessage
# OpenAI credentials
key = os.getenv('OPENAI_API_KEY')
openai.api_key = key
os.environ["OPENAI_API_KEY"] = key
# Streamlit UI
st.title("Auto Test Case Generation using LLM")
uploaded_files = st.file_uploader("Upload a python(.py) file", type=".py", accept_multiple_files=True)
if uploaded_files:
for uploaded_file in uploaded_files:
with open(f"./data/{uploaded_file.name}", 'wb') as f:
f.write(uploaded_file.getbuffer())
st.success("File uploaded...")
st.success("Fetching list of functions...")
file_path = f"./data/{uploaded_file.name}"
def extract_functions_from_file(file_path):
with open(file_path, "r") as file:
file_content = file.read()
parsed_content = ast.parse(file_content)
functions = {}
for node in ast.walk(parsed_content):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_body = ast.get_source_segment(file_content, node)
functions[func_name] = func_body
return functions
functions = extract_functions_from_file(file_path)
list_of_functions = list(functions.keys())
st.write(list_of_functions)
def res(prompt):
response = openai.chat.completions.create(
model=model,
messages=[
{"role":"system",
"content":"You are a helpful coding assistant. Your task is to generate test cases. If the function can't be found, politely refuse"
},
{"role": "user",
"content": prompt,
}
]
)
return response.choices[0].message.content
# Initialize session state for chat messages
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if func := st.chat_input("Enter the function name for generating test cases:"):
st.session_state.messages.append({"role": "assistant", "content": f"Generating test cases for {func}"})
st.success(f"Generating test cases for {func}")
func = ''.join(func.split())
if func not in list_of_functions:
st.write("Incorrect function name")
else:
snippet = functions[func]
# Generation
model = "gpt-3.5-turbo"
# Generation
# resp = ollama.generate(model='codellama',
# prompt=f"""You are a helpful coding assistant. Your task is to generate unit test cases for this function : {snippet}\
# \n\nPolitely refuse if the function is not suitable for generating test cases.
# \n\nGenerate atleast 5 unit test case. Include couple of edge cases as well.
# \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
# """)
prompt=f"""You are a helpful coding assistant. Your task is to generate unit test cases for this function : {snippet}\
\n\nPolitely refuse if the function is not suitable for generating test cases.
\n\nGenerate atleast 5 unit test case. Include couple of edge cases as well.
\n\nThere should be no duplicate test cases. Avoid generating repeated statements.
"""
print(prompt)
resp = res(prompt)
st.session_state.messages.append({"role": "assistant", "content": f"{resp}"})
st.markdown(resp)
# st.session_state.messages.append({"role": "assistant", "content": f"{resp['response']}"})
# st.markdown(resp['response'])
|