# # import dependencies # import gradio as gr # from openai import OpenAI # import os # import re # # define the openai key # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm" # # make an instance of the openai client # client = OpenAI(api_key = api_key) # # finetuned model instance # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" # # function to humanize the text # def humanize_text(AI_text): # """Humanizes the provided AI text using the fine-tuned model.""" # response = completion = client.chat.completions.create( # model=finetuned_model, # temperature = 0.85, # messages=[ # {"role": "system", "content": """ # You are a text humanizer. # You humanize AI generated text. # The text must appear like humanly written. # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, # {"role": "user", "content": f"THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {AI_text}"} # ] # ) # humanized_text = response.choices[0].message.content.strip() # return humanized_text # # Gradio interface definition # interface = gr.Interface( # fn=humanize_text, # inputs="textbox", # outputs="textbox", # title="AI Text Humanizer: NoaiGPT.com Demo", # description="Enter AI-generated text and get a human-written version.", # ) # # Launch the Gradio app # interface.launch(debug = True) # import dependencies import gradio as gr from openai import OpenAI import os import re from transformers import pipeline # define the openai key api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm" # make an instance of the openai client client = OpenAI(api_key = api_key) # finetuned model instance finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" # Load the AI detection model pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert") # Define the function to get predictions def get_prediction(text): return pipe(text)[0] # function to humanize the text def humanize_text(AI_text): """Humanizes the provided AI text using the fine-tuned model.""" humanized_text = AI_text attempts = 0 max_attempts = 3 while attempts < max_attempts: response = client.chat.completions.create( model=finetuned_model, temperature=0.85, messages=[ {"role": "system", "content": """ You are a text humanizer. You humanize AI generated text. The text must appear like humanly written. THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"} ] ) humanized_text = response.choices[0].message.content.strip() # Check if the humanized text is still detected as AI prediction = get_prediction(humanized_text) if prediction['label'] != 'AI' or prediction['score'] < 0.9: break attempts += 1 return humanized_text # Gradio interface definition interface = gr.Interface( fn=humanize_text, inputs="textbox", outputs="textbox", title="AI Text Humanizer: NoaiGPT.com Demo", description="Enter AI-generated text and get a human-written version.", ) # Launch the Gradio app interface.launch(debug=True)