Canstralian commited on
Commit
58022ea
1 Parent(s): 659ce46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -47
app.py CHANGED
@@ -1,7 +1,6 @@
1
- import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
- import json
5
  import logging
6
  import re
7
 
@@ -13,7 +12,6 @@ logging.basicConfig(
13
  )
14
 
15
  # Model and tokenizer loading function with caching
16
- @st.cache_resource
17
  def load_model():
18
  """
19
  Loads and caches the pre-trained language model and tokenizer.
@@ -36,7 +34,6 @@ def load_model():
36
  return model, tokenizer
37
  except Exception as e:
38
  logging.error(f"Error loading model: {e}")
39
- st.error("Failed to load model. Please check the logs.")
40
  return None, None
41
 
42
  def sanitize_input(text):
@@ -83,57 +80,35 @@ def generate_text(model, tokenizer, instruction):
83
  logging.error(f"Error generating text: {e}")
84
  return "Error in text generation."
85
 
86
- @st.cache_data
87
- def load_json_data():
88
  """
89
- Loads JSON data, simulating the loading process with a sample list.
90
- Returns:
91
- list: A list of dictionaries with sample user data.
92
  """
93
- try:
94
- json_data = [
95
- {"name": "Raja Clarke", "email": "consectetuer@yahoo.edu", "country": "Chile", "company": "Urna Nunc Consulting"},
96
- {"name": "Melissa Hobbs", "email": "massa.non@hotmail.couk", "country": "France", "company": "Gravida Mauris Limited"},
97
- {"name": "John Doe", "email": "john.doe@example.com", "country": "USA", "company": "Example Corp"},
98
- {"name": "Jane Smith", "email": "jane.smith@example.org", "country": "Canada", "company": "Innovative Solutions Inc"}
99
- ]
100
- logging.info("User JSON data loaded successfully.")
101
- return json_data
102
- except Exception as e:
103
- logging.error(f"Error loading JSON data: {e}")
104
- return []
105
-
106
- # Streamlit App
107
- st.title("Penetration Testing AI Assistant")
108
 
109
- # Load the model and tokenizer
110
- model, tokenizer = load_model()
111
 
112
- if not model or not tokenizer:
113
- st.error("Failed to load model or tokenizer. Please check your configuration.")
114
-
115
- # User instruction input
116
- instruction = st.text_input("Enter an instruction for the model:")
117
-
118
- # Generate text button
119
- if instruction:
120
  try:
121
  generated_text = generate_text(model, tokenizer, instruction)
122
- st.subheader("Generated Text:")
123
- st.write(generated_text)
124
  except ValueError as ve:
125
- st.error(f"Invalid input: {ve}")
126
  except Exception as e:
127
  logging.error(f"Error during text generation: {e}")
128
- st.error("An error occurred. Please try again.")
129
 
130
- # Display JSON user data
131
- st.subheader("User Data (from JSON)")
132
- user_data = load_json_data()
 
 
 
 
 
133
 
134
- for user in user_data:
135
- st.write(f"**Name:** {user['name']}")
136
- st.write(f"**Email:** {user['email']}")
137
- st.write(f"**Country:** {user['country']}")
138
- st.write(f"**Company:** {user['company']}")
139
- st.write("---")
 
1
+ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
 
4
  import logging
5
  import re
6
 
 
12
  )
13
 
14
  # Model and tokenizer loading function with caching
 
15
  def load_model():
16
  """
17
  Loads and caches the pre-trained language model and tokenizer.
 
34
  return model, tokenizer
35
  except Exception as e:
36
  logging.error(f"Error loading model: {e}")
 
37
  return None, None
38
 
39
  def sanitize_input(text):
 
80
  logging.error(f"Error generating text: {e}")
81
  return "Error in text generation."
82
 
83
+ # Gradio Interface Function
84
+ def gradio_interface(instruction):
85
  """
86
+ Interface function for Gradio to interact with the model and generate text.
 
 
87
  """
88
+ # Load the model and tokenizer
89
+ model, tokenizer = load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ if not model or not tokenizer:
92
+ return "Failed to load model or tokenizer. Please check your configuration."
93
 
94
+ # Generate the text
 
 
 
 
 
 
 
95
  try:
96
  generated_text = generate_text(model, tokenizer, instruction)
97
+ return generated_text
 
98
  except ValueError as ve:
99
+ return f"Invalid input: {ve}"
100
  except Exception as e:
101
  logging.error(f"Error during text generation: {e}")
102
+ return "An error occurred. Please try again."
103
 
104
+ # Create Gradio Interface
105
+ iface = gr.Interface(
106
+ fn=gradio_interface,
107
+ inputs=gr.Textbox(label="Enter an instruction for the model:", placeholder="Type your instruction here..."),
108
+ outputs=gr.Textbox(label="Generated Text:"),
109
+ title="Penetration Testing AI Assistant",
110
+ description="This tool allows you to interact with a pre-trained AI model for penetration testing assistance. Enter an instruction to generate a response.",
111
+ )
112
 
113
+ # Launch the Gradio interface
114
+ iface.launch()