Canstralian commited on
Commit
3d25e27
·
verified ·
1 Parent(s): 55d61a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -21,9 +21,9 @@ def load_model():
21
  model: Pre-trained language model.
22
  tokenizer: Tokenizer for the model.
23
  """
24
- model_path = "Canstralian/pentest_ai"
25
  try:
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_path,
29
  torch_dtype=torch.float16 if device == "cuda" else torch.float32,
@@ -70,10 +70,10 @@ def generate_text(model, tokenizer, instruction):
70
  device = "cuda" if torch.cuda.is_available() else "cpu"
71
  tokens = tokenizer.encode(instruction, return_tensors='pt').to(device)
72
  generated_tokens = model.generate(
73
- tokens,
74
- max_length=1024,
75
- top_p=1.0,
76
- temperature=0.5,
77
  top_k=50
78
  )
79
  generated_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
@@ -109,6 +109,9 @@ st.title("Penetration Testing AI Assistant")
109
  # Load the model and tokenizer
110
  model, tokenizer = load_model()
111
 
 
 
 
112
  # User instruction input
113
  instruction = st.text_input("Enter an instruction for the model:")
114
 
 
21
  model: Pre-trained language model.
22
  tokenizer: Tokenizer for the model.
23
  """
 
24
  try:
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
+ model_path = "Canstralian/pentest_ai" # Replace with the actual path if different
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_path,
29
  torch_dtype=torch.float16 if device == "cuda" else torch.float32,
 
70
  device = "cuda" if torch.cuda.is_available() else "cpu"
71
  tokens = tokenizer.encode(instruction, return_tensors='pt').to(device)
72
  generated_tokens = model.generate(
73
+ tokens,
74
+ max_length=1024,
75
+ top_p=1.0,
76
+ temperature=0.5,
77
  top_k=50
78
  )
79
  generated_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
 
109
  # Load the model and tokenizer
110
  model, tokenizer = load_model()
111
 
112
+ if not model or not tokenizer:
113
+ st.error("Failed to load model or tokenizer. Please check your configuration.")
114
+
115
  # User instruction input
116
  instruction = st.text_input("Enter an instruction for the model:")
117