Commit
Β·
ed2acac
1
Parent(s):
69f7465
APP
Browse files
app.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import simplejson as json
|
4 |
+
import re
|
5 |
+
from autogen import UserProxyAgent, AssistantAgent
|
6 |
+
|
7 |
+
# Streamlit UI Setup
|
8 |
+
st.set_page_config(page_title="QA Test Case Generator", layout="wide")
|
9 |
+
st.title("π§ͺ AI-Powered QA Test Case Generator")
|
10 |
+
st.write("Enter a user story, and the AI will generate detailed test cases for you!")
|
11 |
+
|
12 |
+
# User Input
|
13 |
+
user_story = st.text_area("π Enter User Story:", "As a user, I want to log in so that I can access my account.")
|
14 |
+
|
15 |
+
config_list = [
|
16 |
+
{
|
17 |
+
"model": "llama3.2",
|
18 |
+
"api_type": "ollama",
|
19 |
+
"stream": False,
|
20 |
+
}
|
21 |
+
]
|
22 |
+
|
23 |
+
# AutoGen Agents Setup
|
24 |
+
qa_agent = AssistantAgent(
|
25 |
+
name="QA_Agent",
|
26 |
+
max_consecutive_auto_reply=1,
|
27 |
+
human_input_mode="NEVER",
|
28 |
+
llm_config={
|
29 |
+
"timeout": 600,
|
30 |
+
"cache_seed": 42,
|
31 |
+
"config_list": config_list,
|
32 |
+
}
|
33 |
+
)
|
34 |
+
|
35 |
+
user_proxy = UserProxyAgent(
|
36 |
+
name="User",
|
37 |
+
max_consecutive_auto_reply=1,
|
38 |
+
human_input_mode="NEVER",
|
39 |
+
code_execution_config=False
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
# Function to Generate Test Cases
|
44 |
+
def generate_test_cases(story):
|
45 |
+
prompt = f"""
|
46 |
+
Generate **detailed** test cases for the following user story:
|
47 |
+
{story}
|
48 |
+
|
49 |
+
Include:
|
50 |
+
- Functional test cases
|
51 |
+
- Edge cases
|
52 |
+
- Negative test cases
|
53 |
+
|
54 |
+
Each test case should include all the following fields and cover the Functional, Regression, Integration, Performance, and Security aspects ,positive and negative test cases:
|
55 |
+
- ID: Unique test case ID
|
56 |
+
- Summary: A brief description of the test objective.
|
57 |
+
- Test Type: Functional, Regression, Integration, Performance, Security, etc.
|
58 |
+
- Priority: High, Medium, or Low based on impact.
|
59 |
+
- Component: The feature or module under test.
|
60 |
+
- Step Description: Actions to be performed.
|
61 |
+
- Expected Result: The expected system behavior after executing the step.
|
62 |
+
- Actual Result: (To be recorded during test execution).
|
63 |
+
- Pass/Fail Status: (To be marked after execution).
|
64 |
+
|
65 |
+
Return the test cases in valid JSON Object format ONLY, inside triple backticks (```) and no additional text:
|
66 |
+
|
67 |
+
[
|
68 |
+
{{"ID": "", "Summary": "", "Test Type": "", "Priority": "", "Component": "", "Step Description": "", "Expected Result": "", "Actual Result": "", "Pass/Fail Status": ""}},
|
69 |
+
|
70 |
+
]
|
71 |
+
|
72 |
+
"""
|
73 |
+
|
74 |
+
# Initiating chat with the QA agent
|
75 |
+
response = user_proxy.initiate_chat(qa_agent, message=prompt)
|
76 |
+
|
77 |
+
# Extract JSON from AI response
|
78 |
+
return extract_json_from_response(response)
|
79 |
+
|
80 |
+
|
81 |
+
# Function to Extract JSON from AI Response
|
82 |
+
|
83 |
+
def extract_json_from_response(response):
|
84 |
+
raw_response = str(response.chat_history)
|
85 |
+
raw_response = raw_response.replace("\\n", "\n")
|
86 |
+
|
87 |
+
|
88 |
+
# Find all JSON code blocks inside triple backticks
|
89 |
+
matches = re.findall(r'```(?:json)?\n(.*?)\n```', raw_response, re.DOTALL)
|
90 |
+
|
91 |
+
for match in matches:
|
92 |
+
match = match.strip() # Normalize the JSON format
|
93 |
+
|
94 |
+
# Clean up any unnecessary whitespace or newlines within the JSON string
|
95 |
+
cleaned_json = match.replace("\n", "").replace("\t", "")
|
96 |
+
|
97 |
+
try:
|
98 |
+
parsed_json = json.loads(cleaned_json)
|
99 |
+
|
100 |
+
return parsed_json # Return the first valid JSON found
|
101 |
+
except json.JSONDecodeError:
|
102 |
+
print("β Failed to parse JSON:", cleaned_json) # Print the failed block for debugging
|
103 |
+
continue # Skip and try the next match
|
104 |
+
|
105 |
+
print("β Failed to parse AI response as JSON.")
|
106 |
+
return None
|
107 |
+
|
108 |
+
|
109 |
+
# Generate Button
|
110 |
+
if st.button("π Generate Test Cases"):
|
111 |
+
with st.spinner("Generating test cases..."):
|
112 |
+
test_cases = generate_test_cases(user_story)
|
113 |
+
|
114 |
+
if test_cases:
|
115 |
+
# Convert JSON data into a DataFrame
|
116 |
+
df = pd.DataFrame(test_cases)
|
117 |
+
st.dataframe(df)
|
118 |
+
|
119 |
+
# Download as CSV
|
120 |
+
csv = df.to_csv(index=False).encode('utf-8')
|
121 |
+
st.download_button("π₯ Download Test Cases as CSV", data=csv, file_name="test_cases.csv", mime="text/csv")
|
122 |
+
else:
|
123 |
+
st.error("β Failed to generate test cases. Check AI response.")
|