Spaces:
Build error
Build error
Canstralian
commited on
Commit
•
58022ea
1
Parent(s):
659ce46
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
-
import
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
-
import json
|
5 |
import logging
|
6 |
import re
|
7 |
|
@@ -13,7 +12,6 @@ logging.basicConfig(
|
|
13 |
)
|
14 |
|
15 |
# Model and tokenizer loading function with caching
|
16 |
-
@st.cache_resource
|
17 |
def load_model():
|
18 |
"""
|
19 |
Loads and caches the pre-trained language model and tokenizer.
|
@@ -36,7 +34,6 @@ def load_model():
|
|
36 |
return model, tokenizer
|
37 |
except Exception as e:
|
38 |
logging.error(f"Error loading model: {e}")
|
39 |
-
st.error("Failed to load model. Please check the logs.")
|
40 |
return None, None
|
41 |
|
42 |
def sanitize_input(text):
|
@@ -83,57 +80,35 @@ def generate_text(model, tokenizer, instruction):
|
|
83 |
logging.error(f"Error generating text: {e}")
|
84 |
return "Error in text generation."
|
85 |
|
86 |
-
|
87 |
-
def
|
88 |
"""
|
89 |
-
|
90 |
-
Returns:
|
91 |
-
list: A list of dictionaries with sample user data.
|
92 |
"""
|
93 |
-
|
94 |
-
|
95 |
-
{"name": "Raja Clarke", "email": "consectetuer@yahoo.edu", "country": "Chile", "company": "Urna Nunc Consulting"},
|
96 |
-
{"name": "Melissa Hobbs", "email": "massa.non@hotmail.couk", "country": "France", "company": "Gravida Mauris Limited"},
|
97 |
-
{"name": "John Doe", "email": "john.doe@example.com", "country": "USA", "company": "Example Corp"},
|
98 |
-
{"name": "Jane Smith", "email": "jane.smith@example.org", "country": "Canada", "company": "Innovative Solutions Inc"}
|
99 |
-
]
|
100 |
-
logging.info("User JSON data loaded successfully.")
|
101 |
-
return json_data
|
102 |
-
except Exception as e:
|
103 |
-
logging.error(f"Error loading JSON data: {e}")
|
104 |
-
return []
|
105 |
-
|
106 |
-
# Streamlit App
|
107 |
-
st.title("Penetration Testing AI Assistant")
|
108 |
|
109 |
-
|
110 |
-
model
|
111 |
|
112 |
-
|
113 |
-
st.error("Failed to load model or tokenizer. Please check your configuration.")
|
114 |
-
|
115 |
-
# User instruction input
|
116 |
-
instruction = st.text_input("Enter an instruction for the model:")
|
117 |
-
|
118 |
-
# Generate text button
|
119 |
-
if instruction:
|
120 |
try:
|
121 |
generated_text = generate_text(model, tokenizer, instruction)
|
122 |
-
|
123 |
-
st.write(generated_text)
|
124 |
except ValueError as ve:
|
125 |
-
|
126 |
except Exception as e:
|
127 |
logging.error(f"Error during text generation: {e}")
|
128 |
-
|
129 |
|
130 |
-
#
|
131 |
-
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
st.write(f"**Email:** {user['email']}")
|
137 |
-
st.write(f"**Country:** {user['country']}")
|
138 |
-
st.write(f"**Company:** {user['company']}")
|
139 |
-
st.write("---")
|
|
|
1 |
+
import gradio as gr
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
|
|
4 |
import logging
|
5 |
import re
|
6 |
|
|
|
12 |
)
|
13 |
|
14 |
# Model and tokenizer loading function with caching
|
|
|
15 |
def load_model():
|
16 |
"""
|
17 |
Loads and caches the pre-trained language model and tokenizer.
|
|
|
34 |
return model, tokenizer
|
35 |
except Exception as e:
|
36 |
logging.error(f"Error loading model: {e}")
|
|
|
37 |
return None, None
|
38 |
|
39 |
def sanitize_input(text):
|
|
|
80 |
logging.error(f"Error generating text: {e}")
|
81 |
return "Error in text generation."
|
82 |
|
83 |
+
# Gradio Interface Function
|
84 |
+
def gradio_interface(instruction):
|
85 |
"""
|
86 |
+
Interface function for Gradio to interact with the model and generate text.
|
|
|
|
|
87 |
"""
|
88 |
+
# Load the model and tokenizer
|
89 |
+
model, tokenizer = load_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
+
if not model or not tokenizer:
|
92 |
+
return "Failed to load model or tokenizer. Please check your configuration."
|
93 |
|
94 |
+
# Generate the text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
try:
|
96 |
generated_text = generate_text(model, tokenizer, instruction)
|
97 |
+
return generated_text
|
|
|
98 |
except ValueError as ve:
|
99 |
+
return f"Invalid input: {ve}"
|
100 |
except Exception as e:
|
101 |
logging.error(f"Error during text generation: {e}")
|
102 |
+
return "An error occurred. Please try again."
|
103 |
|
104 |
+
# Create Gradio Interface
|
105 |
+
iface = gr.Interface(
|
106 |
+
fn=gradio_interface,
|
107 |
+
inputs=gr.Textbox(label="Enter an instruction for the model:", placeholder="Type your instruction here..."),
|
108 |
+
outputs=gr.Textbox(label="Generated Text:"),
|
109 |
+
title="Penetration Testing AI Assistant",
|
110 |
+
description="This tool allows you to interact with a pre-trained AI model for penetration testing assistance. Enter an instruction to generate a response.",
|
111 |
+
)
|
112 |
|
113 |
+
# Launch the Gradio interface
|
114 |
+
iface.launch()
|
|
|
|
|
|
|
|