import vertexai import http.client import typing import urllib.request from vertexai.preview.generative_models import GenerativeModel #,Part,Image import streamlit as st from io import StringIO import pandas as pd from streamlit_extras.let_it_rain import rain import time st.set_page_config (page_icon="icon.jpg",page_title="Content Moderation",layout="wide") st.markdown("

Content Moderation

", unsafe_allow_html=True) project_id = "agileai-poc" loc = "us-central1" model= GenerativeModel("gemini-pro") # vision_model = GenerativeModel("gemini-pro-vision") prompt="""understand the content provided and if any spaces found in content ignore and generate the output only in the given format format: 1.Tone:find the tone of content 2.Negative sentences :"Only find the negative sentences based on semantic analysis" to ensure accuracy in detecting negative words in pointwise and "must highlight the negative words in bold" in same sentence if the content is in positive tone give output as "No changes required" """ prompt2=""" understand the content provided and generate the output only in the given format format: 1.Tone :provide the generated content tone 2.Content:"Don't explain the content" just modify the same content by only "Replacing the negative words by converting the tone into formal",if the content doesn't have any negative words give output as "No changes required" """ # prompt="updated_prompt.txt" #Analysis:analyse the input text and highlight the points about tone of content,list the sentences which comes under negative,positive ,even mention the negative,postive,normal words which are used in sentence highlight them by providing different font color to easily differentiate and in bold #Analysis:analyse the input text and highlight the points about tone of content,list out the negative sentences and highlight the negative words found in sentences in bold # c1,c2,c3=st.columns((5,0.5,7)) # with c1: # input=st.text_area("Post your Content",height=500) # tab1,tab2=st.tabs(["Feedback","Reference"]) # if st.button("Submit"): # response=model.generate_content([prompt,input]) # # with c3 : # # st.write(response.text) # # st.divider() # # with c3: # # if st.button("Reference "): # # reference=model.generate_content([prompt2,response.text]) # # st.write(reference.text) # with c3 : # tab1.write(response.text) # # with tab2: # reference=model.generate_content([prompt2,response.text]) # tab2.write(reference.text) ## Define layout and containers HEIGHT = 1000 cols = st.columns(2) with cols[0]: left_panel = st.container(height=HEIGHT + 15, border=True) with cols[1]: upper_right_panel = st.container(height=HEIGHT//2, border=True) lower_right_panel = st.container(height=HEIGHT//2, border=True) ## Add contents with left_panel: st.markdown("
Post your Content
",unsafe_allow_html=True) input=st.text_area(label="v",label_visibility="collapsed",placeholder="your content",height=900) st.toast("Check the input text ,avoid empty spaces if any.", icon="🚨") time.sleep(10) submit=st.button(":blue[Submit]") if submit: response=model.generate_content([prompt,input]) # return response # with upper_right_panel: # st.write(response.text) # with lower_right_panel: # st.markdown("
Rephrased content:
",unsafe_allow_html=True) # reference=model.generate_content([prompt2,response.text]) # st.write(reference.text) try: with upper_right_panel: st.markdown("
Analyzed Content:
",unsafe_allow_html=True) st.write(response.text) with lower_right_panel: st.markdown("
Rephrased content:
",unsafe_allow_html=True) reference=model.generate_content([prompt2,response.text]) st.write(reference.text) except: st.error("Check the input text ,avoid empty spaces if any.") st.stop() # rain(emoji="🎈",animation_length="10",falling_speed=10) # for chunk in response : # print(chunk.text) # content_type=st.selectbox("Content Type",["Text","Image","Video"]) # # input=st.text_area(":blue[My text here :]",height=500) # if "Video" in content_type: # def generate_text(project_id: str, location: str) -> str: # # Initialize Vertex AI # # vertexai.init(project=project_id, location=loc) # # Load the model # # vision_model = GenerativeModel("gemini-pro-vision") # # Generate text # response = vision_model.generate_content( # [ # Part.from_uri( # "gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4" # ), # "What is in the video?", # ] # ) # print(response.text) # return response.text # img_file=st.file_uploader("choose an Image" ) # elif "Image" in content_type: # # create helper function # def load_image_from_url(image_url: str) -> Image: # with urllib.request.urlopen(image_url) as response: # response = typing.cast(http.client.HTTPResponse, response) # image_bytes = response.read() # return Image.from_bytes(image_bytes) # # Load images from Cloud Storage URI # # landmark1 = load_image_from_url( # # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png" # # ) # # Pass multimodal prompt # # model = GenerativeModel("gemini-pro-vision") # response = model.generate_content( # [ # landmark1, # "city: Rome, Landmark: the Colosseum", # landmark2, # "city: Beijing, Landmark: Forbidden City", # landmark3, # ] # ) # print(response) # landmark1 = st.tecload_image_from_url( # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png" # # ) # if st.button("Reference "): # reference=model.generate_content([prompt2,response]) # st.write(reference.text)