File size: 7,849 Bytes
66b2cfe
091d316
 
66b2cfe
091d316
66b2cfe
370d98b
091d316
 
d71e7b7
656a88a
091d316
 
d71e7b7
66b2cfe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370d98b
 
 
 
 
091d316
370d98b
 
091d316
370d98b
 
 
 
 
66b2cfe
 
 
091d316
 
 
 
 
370d98b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46e83b2
 
d71e7b7
 
 
 
 
 
339e3fb
d71e7b7
46e83b2
 
 
339e3fb
 
b96f71d
370d98b
ddf531e
8016df7
 
46e83b2
 
 
8016df7
 
 
370d98b
8016df7
091d316
 
 
 
8016df7
 
370d98b
8016df7
370d98b
 
656a88a
8016df7
370d98b
8016df7
b043438
 
 
 
 
 
 
 
 
 
8016df7
370d98b
 
 
 
 
 
 
 
 
 
 
d71e7b7
370d98b
8016df7
370d98b
473c5fd
8016df7
 
473c5fd
 
 
ddf531e
 
 
 
 
 
473c5fd
ddf531e
 
 
 
 
 
 
 
 
 
 
 
8016df7
 
 
ddf531e
 
 
8016df7
 
ddf531e
8016df7
 
 
 
 
 
 
 
 
 
 
 
 
 
06339aa
66b2cfe
 
091d316
66b2cfe
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import os
from io import BytesIO
import random
import torch
from PIL import Image
from transformers import AutoProcessor, FocalNetForImageClassification
from diffusers import DiffusionPipeline
from detoxify import Detoxify
import gradio as gr
from huggingface_hub import InferenceClient
import requests
from torchvision import transforms
import numpy as np

# Paths and model setup
model_path = "MichalMlodawski/nsfw-image-detection-large"

# Load the model and feature extractor
feature_extractor = AutoProcessor.from_pretrained(model_path)
model = FocalNetForImageClassification.from_pretrained(model_path)
model.eval()

# Image transformations
transform = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

# Mapping from model labels to NSFW categories
label_to_category = {
    "LABEL_0": "Safe",
    "LABEL_1": "Questionable",
    "LABEL_2": "Unsafe"
}

# Device configuration
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the diffusion pipeline
if torch.cuda.is_available():
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, use_safetensors=True)
    pipe.enable_xformers_memory_efficient_attention()
    pipe = pipe.to(device)
else:
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
    pipe = pipe.to(device)

MAX_SEED = np.iinfo(np.int32).max

# Initialize the InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# Function to analyze text
def analyze_text(input_text):
    results = Detoxify('original').predict(input_text)
    return results

# Inference function for generating images
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator().manual_seed(seed)
    image = pipe(
        prompt=prompt, 
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale, 
        num_inference_steps=num_inference_steps, 
        width=width, 
        height=height,
        generator=generator
    ).images[0] 
    return image

# Respond function for the chatbot
def respond(message, history, system_message, max_tokens, temperature, top_p):
    messages = [{"role": "system", "content": system_message}]
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})
    messages.append({"role": "user", "content": message})
    response = client.chat_completion(
        messages,
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p,
    )
    return response.choices[0].message['content']

# Function to generate posts
def generate_post(prompt, max_tokens, temperature, top_p):
    response = client.chat_completion(
        [{"role": "user", "content": prompt}],
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p,
    )
    return response.choices[0].message['content']

# Function to moderate posts
def moderate_post(post):
    results = Detoxify('original').predict(post)
    for key, value in results.items():
        if value > 0.5:
            return "Post does not adhere to community guidelines."
    return "Post adheres to community guidelines."

# Function to generate images using the diffusion pipeline
def generate_image(prompt):
    generator = torch.manual_seed(random.randint(0, MAX_SEED))
    image = pipe(prompt=prompt, generator=generator).images[0]
    return image

# Function to moderate images
def moderate_image(image):
    image_tensor = transform(image).unsqueeze(0)
    inputs = feature_extractor(images=image, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
        probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
        confidence, predicted = torch.max(probabilities, 1)
    label = model.config.id2label[predicted.item()]
    category = label_to_category.get(label, "Unknown")
    return f"Label: {label}, Category: {category}, Confidence: {confidence.item() * 100:.2f}%"

# Create the Gradio interface
css = """
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""

if torch.cuda.is_available():
    power_device = "GPU"
else:
    power_device = "CPU"

with gr.Blocks(css=css) as demo:
    gr.Markdown("# AI-driven Content Generation and Moderation Bot")
    gr.Markdown(f"Currently running on {power_device}.")

    with gr.Tabs():
        with gr.TabItem("Chat"):
            with gr.Column():
                chat_interface = gr.ChatInterface(
                    respond,
                    additional_inputs=[
                        gr.Textbox(value="You are a friendly Chatbot meant to assist users in managing social media posts ensuring they meet community guidelines", label="System message", visible=False),
                        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens", visible=False),
                        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", visible=False),
                        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", visible=False),
                    ],
                )
                advanced_button = gr.Button("Show Advanced Settings")
                advanced_settings = gr.Column(visible=False)
                with advanced_settings:
                    chat_interface.additional_inputs[0].visible = True
                    chat_interface.additional_inputs[1].visible = True
                    chat_interface.additional_inputs[2].visible = True
                    chat_interface.additional_inputs[3].visible = True
                
                def toggle_advanced_settings():
                    advanced_settings.visible = not advanced_settings.visible
                
                advanced_button.click(toggle_advanced_settings, [], advanced_settings)
        
        with gr.TabItem("Generate Post"):
            post_prompt = gr.Textbox(label="Post Prompt")
            max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
            temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
            top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
            generate_button = gr.Button("Generate Post")
            generated_post = gr.Textbox(label="Generated Post")
            generate_button.click(generate_post, [post_prompt, max_tokens, temperature, top_p], generated_post)
        
        with gr.TabItem("Moderate Post"):
            post_content = gr.Textbox(label="Post Content")
            moderate_button = gr.Button("Moderate Post")
            moderation_result = gr.Textbox(label="Moderation Result")
            moderate_button.click(moderate_post, post_content, moderation_result)
        
        with gr.TabItem("Generate Image"):
            image_prompt = gr.Textbox(label="Image Prompt")
            generate_image_button = gr.Button("Generate Image")
            generated_image = gr.Image(label="Generated Image")
            generate_image_button.click(generate_image, image_prompt, generated_image)
        
        with gr.TabItem("Moderate Image"):
            selected_image = gr.Image(type="pil", label="Upload Image for Moderation")
            classify_button = gr.Button("Classify Image")
            classification_result = gr.Textbox(label="Classification Result")
            classify_button.click(moderate_image, selected_image, classification_result)

demo.launch()