solo-project / app.py
karwanjiru's picture
..
b96f71d
raw
history blame
4.55 kB
import gradio as gr
from huggingface_hub import InferenceClient
import requests
from PIL import Image
from io import BytesIO
# Initialize the client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Define the function to respond to user inputs
def respond(message, history):
messages = []
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = client.chat_completion(
messages,
)
return response.choices[0].message['content']
# Define the function to generate posts
def generate_post(prompt):
response = client.chat_completion(
[{"role": "user", "content": prompt}],
)
return response.choices[0].message['content']
# Define the function to moderate posts
def moderate_post(post):
# Implement your post moderation logic here
if "inappropriate" in post:
return "Post does not adhere to community guidelines."
return "Post adheres to community guidelines."
# Define the function to generate images
def generate_image(prompt):
# Replace with actual model or API endpoint for image generation
response = client.text_to_image(prompt)
image = Image.open(BytesIO(response))
return image
# Define the function to moderate images
def moderate_image(image):
# Convert the PIL image to a format that can be sent for moderation
buffered = BytesIO()
image.save(buffered, format="JPEG")
image_bytes = buffered.getvalue()
# Replace with your actual image moderation API endpoint
moderation_api_url = "https://example.com/moderation/api"
# Send the image to the moderation API
response = requests.post(moderation_api_url, files={"file": image_bytes})
result = response.json()
# Check the result from the moderation API
if result.get("moderation_status") == "approved":
return "Image adheres to community guidelines."
else:
return "Image does not adhere to community guidelines."
# Create the Gradio interface
demo = gr.Blocks()
with demo:
gr.Markdown("# AI-driven Content Generation and Moderation Bot")
with gr.Tabs():
with gr.TabItem("Chat"):
with gr.Column():
chat_interface = gr.ChatInterface(
respond,
)
advanced_button = gr.Button("Show Advanced Settings")
advanced_settings = gr.Column(visible=False)
with advanced_settings:
chat_interface.additional_inputs[0].visible = True
chat_interface.additional_inputs[1].visible = True
chat_interface.additional_inputs[2].visible = True
chat_interface.additional_inputs[3].visible = True
def toggle_advanced_settings():
advanced_settings.visible = not advanced_settings.visible
advanced_button.click(toggle_advanced_settings, [], advanced_settings)
with gr.TabItem("Generate Post"):
post_prompt = gr.Textbox(label="Post Prompt")
generate_button = gr.Button("Generate Post")
generated_post = gr.Textbox(label="Generated Post")
generate_button.click(generate_post, post_prompt, generated_post)
with gr.TabItem("Moderate Post"):
post_content = gr.Textbox(label="Post Content")
moderate_button = gr.Button("Moderate Post")
moderation_result = gr.Textbox(label="Moderation Result")
moderate_button.click(moderate_post, post_content, moderation_result)
with gr.TabItem("Generate Image"):
image_prompt = gr.Textbox(label="Image Prompt")
generate_image_button = gr.Button("Generate Image")
generated_image = gr.Image(label="Generated Image")
generate_image_button.click(generate_image, image_prompt, generated_image)
with gr.TabItem("Moderate Image"):
uploaded_image = gr.Image(label="Upload Image")
moderate_image_button = gr.Button("Moderate Image")
image_moderation_result = gr.Textbox(label="Image Moderation Result")
moderate_image_button.click(moderate_image, uploaded_image, image_moderation_result)
if __name__ == "__main__":
demo.launch(debug=True)