Canstralian commited on
Commit
4daff63
·
verified ·
1 Parent(s): 83c1666

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -12
app.py CHANGED
@@ -1,29 +1,48 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
 
 
 
 
 
4
 
5
- # Load the model and tokenizer
6
- model_path = "Canstralian/pentest_ai"
7
  model = AutoModelForCausalLM.from_pretrained(model_path)
8
  tokenizer = AutoTokenizer.from_pretrained(model_path)
9
 
10
  # Function to handle user inputs and generate responses
11
  def generate_text(instruction):
12
- # Encode the input and ensure it fits within model limits
13
  inputs = tokenizer.encode(instruction, return_tensors='pt', truncation=True, max_length=512)
14
 
15
- # Generate the output with additional parameters for better control
16
  outputs = model.generate(inputs, max_length=150, num_beams=5, temperature=0.7, top_p=0.95, do_sample=True)
17
 
18
- # Decode and return the result
19
  output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
  return output_text
21
 
22
- # Create a Gradio interface
23
- iface = gr.Interface(fn=generate_text,
24
- inputs=gr.Textbox(lines=2, placeholder="Enter your question here..."),
25
- outputs="text",
26
- live=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- # Launch the interface
29
  iface.launch()
 
 
1
  import gradio as gr
 
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import requests
5
+ import pandas as pd
6
+ import numpy as np
7
+ from datasets import load_dataset
8
 
9
+ # Load the model and tokenizer from Hugging Face Hub
10
+ model_path = "Canstralian/pentest_ai" # Replace with your model path if needed
11
  model = AutoModelForCausalLM.from_pretrained(model_path)
12
  tokenizer = AutoTokenizer.from_pretrained(model_path)
13
 
14
  # Function to handle user inputs and generate responses
15
  def generate_text(instruction):
16
+ # Encode the input text to token IDs
17
  inputs = tokenizer.encode(instruction, return_tensors='pt', truncation=True, max_length=512)
18
 
19
+ # Generate the output text
20
  outputs = model.generate(inputs, max_length=150, num_beams=5, temperature=0.7, top_p=0.95, do_sample=True)
21
 
22
+ # Decode the output and return the response
23
  output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
  return output_text
25
 
26
+ # Function to load a sample dataset (this can be replaced with any dataset)
27
+ def load_sample_data():
28
+ # Load a sample dataset from Hugging Face Datasets
29
+ dataset = load_dataset("imdb", split="train[:5]")
30
+ df = pd.DataFrame(dataset)
31
+ return df.head() # Show a preview of the first 5 entries
32
+
33
+ # Gradio interface to interact with the text generation function
34
+ iface = gr.Interface(
35
+ fn=generate_text,
36
+ inputs=gr.Textbox(lines=2, placeholder="Enter your question or prompt here..."),
37
+ outputs="text",
38
+ live=True,
39
+ title="Pentest AI Text Generator",
40
+ description="Generate text using a fine-tuned model for pentesting-related queries."
41
+ )
42
+
43
+ # Gradio interface for viewing the sample dataset (optional)
44
+ data_viewer = gr.Interface(fn=load_sample_data, inputs=[], outputs="dataframe", title="Sample Dataset Viewer")
45
 
46
+ # Launch the interfaces
47
  iface.launch()
48
+ data_viewer.launch()