import gradio as gr from transformers import pipeline # 감성 분석 파이프라인 초기화 sentiment = pipeline("sentiment-analysis") # 사용자 입력에 대한 감성 분석 결과를 반환하는 함수 def get_sentiment(입력): # 감성 분석 실행 return sentiment(입력) gr.Interface(fn=ask_question, inputs="입력", outputs="output", title="Sentiment Analysis", description="").launch() # import gradio as gr # from transformers import pipeline # sentiment = pipeline("sentiment-analysis") # def get_sentiment(입력): # # from transformers import AutoTokenizer, AutoModelForCausalLM # # model_name = "heegyu/koalpaca-355m" # # tokenizer = AutoTokenizer.from_pretrained(model_name) # # tokenizer.truncation_side = "right" # # model = AutoModelForCausalLM.from_pretrained(model_name) # return sentiment(입력) # def get_response(output): # context = f"{context}\n" # inputs = tokenizer( # context, # truncation=True, # max_length=512, # return_tensors="pt") # generation_args = dict( # max_length=256, # min_length=64, # eos_token_id=2, # do_sample=True, # top_p=1.0, # early_stopping=True # ) # outputs = model.generate(**inputs, **generation_args) # response = tokenizer.decode(outputs[0]) # print(context) # print(response) # response = response[len(context):].replace("", "") # return response # model, tokenizer = get_pipe() # def ask_question(input_): # response = get_response(tokenizer, model, input_) # return response # gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="한국어로 질문하세요.").launch()