import glob import io import os import re import shutil import sys from contextlib import closing from pathlib import Path import boto3 import gradio as gr import requests from langchain.agents import Tool, initialize_agent, AgentType from langchain.chains import LLMChain, LLMMathChain, StuffDocumentsChain, ConversationalRetrievalChain from langchain.embeddings.openai import OpenAIEmbeddings from langchain.memory import ChatMessageHistory, ConversationBufferMemory from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter from langchain_community.chat_models import AzureChatOpenAI, ChatOllama from langchain_community.document_loaders import DirectoryLoader, UnstructuredFileLoader, YoutubeLoader from langchain_community.llms import AzureOpenAI, Ollama from langchain_community.vectorstores import Chroma from langchain_core.prompts import PromptTemplate from sqlitedict import SqliteDict from azure_utils import AzureVoiceData from polly_utils import PollyVoiceData, NEURAL_ENGINE #os env #你必須在os env 填入OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_BASE, OPENAI_API_KEY #基於保密, 程式碼裡不提供 global_deployment_id = "gpt-4-32k" global_model_name = "gpt-4-32k" ollama_url = "http://10.51.50.39:3000" ollama_models = ["qwen:72b","yi:34b-chat","deepseek-coder:33b"] #chroma settings chroma_api_impl = "HH_Azure_Openai" #root_file_path = "C:\\Users\\catsk\\SourceCode\\azure_openai_poc\\data\\" root_file_path = "./data/" #其實是data 存放的位置 hr_source_path = "hr_source" ks_source_path = "ks_source" believe_source_path = 'be_source' sqlite_name = "cache.sqlite3" sqlite_key="stored_files" persist_db = "persist_db" hr_collection_name = "hr_db" chroma_db_impl="localdb+langchain" tmp_collection="tmp_collection" #global text setting inputText = "問題(按q 或Ctrl + c跳出): " refuse_string="服務被拒. 內容可能涉及敏感字詞,政治,煽動他人或是其他不當言詞, 請改以其他內容嚐試" #video LOOPING_TALKING_HEAD = "./data/videos/Masahiro.mp4" TALKING_HEAD_WIDTH = "192" AZURE_VOICE_DATA = AzureVoiceData() POLLY_VOICE_DATA = PollyVoiceData() prompt_string ="" def save_sqlite(key,value): try: with SqliteDict(sqlite_name) as mydict: old_value = mydict[key] mydict[key] = value+old_value # Using dict[key] to store mydict.commit() # Need to commit() to actually flush the data except Exception as ex: print("Error during storing data (Possibly unsupported):", ex) def load_sqlite(key): try: with SqliteDict(sqlite_name) as mydict: value = mydict[key] # No need to use commit(), since we are only loading data! return value except Exception as ex: print("Error during loading data:", ex) def delete_sql(key): try: with SqliteDict(sqlite_name) as mydict: mydict[key] = [] # Using dict[key] to store mydict.commit() # Need to commit() to actually flush the data except Exception as ex: print("Error during storing data (Possibly unsupported):", ex) def ai_answer(answer): print('AI 回答: \033[32m' + answer +'\033[0m') def get_llm_model(model_type, model_name): match model_type: case "azure": return AzureOpenAI(deployment_name = global_deployment_id, model_name = global_model_name) case "ollama": ollama_model = model_name match model_name: case "qwen:72b": #通義千問1.5-72B pass case "yi:34b-chat": #零一萬物-34B pass case "deepseek-coder:33b": #for Codex AI pass case _: raise gr.Error("the current model is not supported in your Ollama server!") return Ollama(model=ollama_model, base_url=ollama_url) def get_chat_model(model_type, model_name): match model_type: case "azure": return AzureChatOpenAI(deployment_name = global_deployment_id, model_name = global_model_name) #return AzureChatOpenAI(deployment_name = global_deployment_id, # model_name = global_model_name, # http_client = httpx.Client(proxies="http://your_proxy:your_port")) case "ollama": ollama_model = model_name match model_name: case "qwen:72b": #通義千問1.5-72B pass case "yi:34b-chat": #零一萬物-34B pass case "deepseek-coder:33b": #for Codex AI pass case _: raise gr.Error("the current model is not supported in your Ollama server!") return ChatOllama(model=ollama_model, base_url=ollama_url) def get_openaiembeddings(): return OpenAIEmbeddings( deployment="CivetGPT_embedding", model="text-embedding-ada-002", openai_api_base="https://civet-project-001.openai.azure.com/", openai_api_type="azure", openai_api_key = "0e3e5b666818488fa1b5cb4e4238ffa7", chunk_size=1 ) def multidocs_loader(files_path, file_ext): full_files_pattern = "*." + file_ext loader = DirectoryLoader(files_path, glob=full_files_pattern, show_progress=True) data = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10) documents = text_splitter.split_documents(data) return documents def unstructure_file_loader(filename_path): loader = UnstructuredFileLoader(filename_path) data = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10) documents = text_splitter.split_documents(data) return documents def add_documents_into_cromadb(db_name, file_path, collection_name): _db_name = db_name documents = multidocs_loader(file_path,"*") embeddings = get_openaiembeddings() chroma_db = Chroma.from_documents( documents, embeddings, collection_name=collection_name, persist_directory=root_file_path+ persist_db, #chroma_db_impl=chroma_db_impl ) chroma_db.persist() print('adding documents done!') def initial_croma_db(db_name, files_path, file_ext, collection_name): _db_name = db_name documents = multidocs_loader(files_path, file_ext) embeddings = get_openaiembeddings() chroma_db = Chroma.from_documents( documents, embeddings, collection_name = collection_name, persist_directory= root_file_path+ persist_db, chroma_db_impl=chroma_db_impl ) chroma_db.persist() print('vectorstore done!') def add_files_to_collection(input_file_path, collection_name): file_path=root_file_path+input_file_path add_documents_into_cromadb(persist_db, file_path, collection_name) def get_prompt_summary_string(): _local_prompt_string = """使用中文替下面內容做個精簡摘要: {text} 精簡摘要:""" if prompt_string == "": return _local_prompt_string else: return prompt_string template_string = """ 我是鴻海(等同Foxconn)的員工, 你是一個鴻海的人資專家. 請根據歷史對話,針對這次的問題, 形成獨立問題. 請優先從提供的文件中尋找答案, 你被允許回答不知道, 但回答不知道時需要給中央人資的客服聯絡窗口資訊. 不論什麼問題, 都以中文回答 歷史對話: {chat_history} 這次的問題: {question} 人資專家: """ default_legal_contract_prompt = """ 你是一位超級助理, 十分擅長從大量文字中擷取摘要. 以下用 ''' 包含的是保密合約的內容,幫我生成一份2,000個中文字以內保密合約摘要,摘要需要包含以下項目: 1.背景: 介紹對方公司的背景、為什麼要跟該公司簽訂保密合約 2.目的: 要與對方交換什麼資料, 資料內容與範圍 3.合約期間:保密合約的時間範圍 4.提前解約條款: 發生什麼樣的條件就會要提前解約 5.保密期間: 保密的時間範圍 6.管轄法院: 如有爭端,雙方同意的管轄法院是哪個法院 AI 風險評估: 希望AI 可以評估該資料交換是否有高風險的疑慮; 評估準測: 高風險: 涉及到營業秘密的內容 中風險: 沒有營業秘密, 但有涉及敏感資料(足以辨識個人的訊息) 低風險: 僅涉及作業面向的訊息 保密合約: ''' {text} ''' """ default_legal_quotation_prompt = """ 你是一位超級助理, 十分擅長從大量文字中擷取摘要. 以下用 ''' 包含的是報價單的內容,幫我生成一份2,000個中文字以內報價單摘要,摘要需要包含以下項目: 1. 標的名稱: 報價單中所列出的產品或服務的名稱。 2. 價格: 報價單中所列出的每個產品或服務的價格, 一定要有正確的幣別與金額數字. 3. 付款內容: 報價單中所列出的付款方式和相關內容, 包括訂金, 交貨款和保留款的金額和支付方式; 除了各款項的交付百分比, 也需要有正確的金額與幣別. 4. 交貨時間: 報價單中所列出的產品或服務的交付的日期或時間範圍。 5. 保固(英文為Warranty): 請摘要報價單中所有關於保固內容. 6. 維修費用:報價單中所列出的產品或服務的維修費用或相關條款, 有任何維修的金額請一定要列出. 7. 貿易條件(Trade Term) 8. 其他注意事項:報價單中所列出的其他重要事項或注意事項。 請根據報價單的內容, 生成一份清晰明確的摘要, 條列式地把摘要列出, 確保所有項目都被包含在內. 如果內容超過三句話, 請以子項目的方式逐一列舉出來. 請注意,生成的摘要應該是簡潔且易於理解的, 要詳細條列出內容, 不可產生 "依其他文件說明" 等說明方式. 在報價單裡沒有找到符合的資訊, 你被允許回答 "無相關資料". 報價單內容: ''' {text} ''' """ def get_prompt_template_string(): print("template:"+template_string) return template_string def get_default_template_prompt(): template = "你是個知識廣泛的超級助手, 以下所有問題請用中文回答, 並請在500個中文字以內來解釋 {concept} 概念" prompt = PromptTemplate( input_variables = ["concept"], template = template ) return prompt def chat_conversation(): print("resource: " + global_deployment_id + " / " + global_model_name) chat = AzureChatOpenAI( deployment_name = global_deployment_id, model_name = global_model_name, ) history = ChatMessageHistory() history.add_ai_message("你是一個超級助理, 以下問題都用中文回答") while 1: text = input(inputText) if text == 'q': break history.add_user_message(text) ai_response = chat(history.messages) ai_answer(ai_response.content) def local_vector_search(question_str, chat_history, collection_name = hr_collection_name): embedding = get_openaiembeddings() vectorstore = Chroma( embedding_function=embedding, collection_name=collection_name, persist_directory=root_file_path+persist_db, ) memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True, ai_prefix = "AI超級助理") llm = get_llm_model(chat_model_type = "azure") chat_llm = get_chat_model(chat_model_type = "azure") prompt = PromptTemplate( template=get_prompt_template_string(), input_variables=["question","chat_history"] ) prompt.format(question=question_str,chat_history=chat_history) km_chain = ConversationalRetrievalChain.from_llm( llm=chat_llm, retriever=vectorstore.as_retriever(), memory=memory, condense_question_prompt=prompt, ) km_tool = Tool( name='Knowledge Base', func=km_chain.run, description='一個非常有用的工具, 當要查詢任何公司政策以及鴻海相關資料都使用這個工具' ) math_math = LLMMathChain(llm=llm,verbose=True) math_tool = Tool( name='Calculator', func=math_math.run, description='Useful for when you need to answer questions about math.' ) tools=[math_tool,km_tool] agent=initialize_agent( agent=AgentType.OPENAI_FUNCTIONS, tools=tools, llm=chat_llm, verbose=True, memory=memory, max_iterations=30, ) result=km_chain(question_str) #result=agent.run(question_str) print(result) return result["answer"] def make_markdown_table(array): nl = "\n" markdown = "" for entry in array: markdown += f"{entry} {nl}" return markdown def get_hr_files(): files = load_sqlite(sqlite_key) if files == None: return else: return make_markdown_table(files) def update_hr_km(files): file_paths = [file.name for file in files] dest_file_path=root_file_path+hr_source_path if not os.path.exists(dest_file_path): os.makedirs(dest_file_path) for file in file_paths: shutil.copy(file, dest_file_path) add_files_to_collection(hr_source_path, hr_collection_name) save_sqlite(sqlite_key, [Path(file_path).name for file_path in file_paths]) return get_hr_files() def clear_all_collection(collection_name): pass def all_files_under_diretory(path): files = glob.glob(path+'\*') for f in files: os.remove(f) def clear_hr_datas(): #remove hr collection client = get_chroma_client(hr_collection_name) client.delete_collection(name=hr_collection_name) print("Collection removed completely!") #remove files all_files_under_diretory(root_file_path+hr_source_path) delete_sql(sqlite_key) return get_hr_files() def num_of_collection(collection_name): client = get_chroma_client(collection_name) number = client.get_collection(collection_name).count() return f"目前知識卷裡有{number}卷項目" def clear_tmp_collection(): client = get_chroma_client(tmp_collection) client.delete_collection(name=tmp_collection) all_files_under_diretory(root_file_path+ks_source_path) return num_of_collection(tmp_collection) def content_summary(split_documents): global current_chatllm llm = current_chatllm _local_prompt_string = get_prompt_summary_string() print("prompt_string: "+_local_prompt_string) _local_prompt = PromptTemplate.from_template(_local_prompt_string) llm_chain = LLMChain(llm=llm, prompt=_local_prompt) # Define StuffDocumentsChain stuff_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="text" ) output = stuff_chain.invoke(split_documents) return output def pdf_summary(file_name): print("file_name: "+file_name) loader = UnstructuredFileLoader(file_name, mode="elements", strategy="fast",) document = loader.load() text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=20 ) split_documents = text_splitter.split_documents(document) return content_summary(split_documents) def youtube_summary(youtube_url): loader=YoutubeLoader.from_youtube_url(youtube_url, add_video_info=True, language=['en','zh-TW'], translation='zh-TW') document=loader.load() text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=10) split_documents=text_splitter.split_documents(document) result = content_summary(split_documents) return result['output_text'] def summary_large_file(files): file_paths = [file.name for file in files] print(file_paths[0]) result = pdf_summary(file_paths[0]) return result["output_text"] def upload_large_file(files): file_paths = [file.name for file in files] return Path(file_paths[0]).stem def get_chroma_client(collection_name): vectorstore = Chroma( embedding_function=get_openaiembeddings(), collection_name=collection_name, persist_directory= root_file_path+persist_db, ) return vectorstore._client def create_db(): files_path = root_file_path+hr_source_path file_ext = "pdf" initial_croma_db(persist_db, files_path, file_ext, hr_collection_name) def create_html_video(file_name, width, temp_file_url): html_video = f'' return html_video def do_html_audio_speak(words_to_speak): polly_client = boto3.Session( aws_access_key_id="AKIAV7Q7AAGW54RBR6FZ", aws_secret_access_key="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", region_name='us-west-2' ).client('polly') language_code="cmn-CN" engine = NEURAL_ENGINE voice_id = "Zhiyu" print("voice_id: "+voice_id+"\nlanguage_code="+language_code) response = polly_client.synthesize_speech( Text=words_to_speak, OutputFormat='mp3', VoiceId=voice_id, LanguageCode=language_code, Engine=engine ) html_audio = '
no audio' # Save the audio stream returned by Amazon Polly on Lambda's temp directory if "AudioStream" in response: with closing(response["AudioStream"]) as stream: try: with open('./data/audios/tempfile.mp3', 'wb') as f: f.write(stream.read()) temp_aud_file = gr.File("./data/audios/tempfile.mp3") temp_aud_file_url = "/file=" + temp_aud_file.value['name'] html_audio = f'' except IOError as error: # Could not write to file, exit gracefully print(error) return None, None else: # The response didn't contain audio data, exit gracefully print("Could not stream audio") return None, None return html_audio, "./data/audios/tempfile.mp3" def do_html_video_speak(): key = "eyJhbGciOiJIUzUxMiJ9.eyJ1c2VybmFtZSI6ImNhdHNreXR3QGdtYWlsLmNvbSJ9.OypOUZF-xv4-b8i9F4_aaMQiJpxv0mXRT5kyuJwTMXVd4awV-O-Obntp--AqGghNNowzQ9oG7zArSnQjz2vQgg" url = "https://api.exh.ai/animations/v2/generate_lipsync_from_audio" files = {"audio_file": ("./data/audios/tempfile.mp3", open("./data/audios/tempfile.mp3", "rb"), "audio/mpeg")} payload = { "animation_pipeline": "high_quality", "idle_url": "https://ugc-idle.s3-us-west-2.amazonaws.com/5fd9ba1b1607b39a4d559300c1e35bee.mp4" } headers = { "accept": "application/json", "authorization": f"Bearer {key}" } res = requests.post(url, data=payload, files=files, headers=headers) print("res.status_code: ", res.status_code) html_video = '
no video' if isinstance(res.content, bytes): response_stream = io.BytesIO(res.content) print("len(res.content)): ", len(res.content)) with open('./data/videos/tempfile.mp4', 'wb') as f: f.write(response_stream.read()) temp_file = gr.File("./data/videos/tempfile.mp4") temp_file_url = "/file=" + temp_file.value['name'] html_video = f'' else: print('video url unknown') return res, html_video, "./data/videos/tempfile.mp4" def kh_update_km(files): file_paths = [file.name for file in files] dest_file_path = root_file_path + ks_source_path if not os.path.exists(dest_file_path): os.makedirs(dest_file_path) for file in file_paths: shutil.copy(file, dest_file_path) add_files_to_collection(ks_source_path, tmp_collection) return num_of_collection(tmp_collection) def generate_autolayout(description, template): llm = get_chat_model(model_type = "azure", model_name="") _template= PromptTemplate( input_variables=["text"], template=template ) llm_chain = LLMChain(llm=llm, prompt=_template) layout_output = llm_chain.invoke(description) print(layout_output) return layout_output class Logger: def __init__(self, filename): self.terminal = sys.stdout self.log = open(filename, "w", encoding='UTF-8') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): self.terminal.flush() self.log.flush() def isatty(self): return False def read_logs(): sys.stdout.flush() ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') with open("output.log", "r", encoding='UTF-8') as f: return ansi_escape.sub('', f.read()) def lunch_style(demo): sys.stdout = Logger("output.log") demo.load(read_logs, None, None, every=1) if len(sys.argv)==1: print("running server as default value") demo.launch(ssl_verify=False, share=True, allowed_paths=[root_file_path, root_file_path+hr_source_path]) elif len(sys.argv)==2 and sys.argv[1] == "server": local_ip = "10.51.50.39" local_port = int(3100) print(f"running server on http://{local_ip}:{local_port}") demo.launch(ssl_verify=False, share=True, allowed_paths=[root_file_path, root_file_path+hr_source_path],auth=("Foxconn", "Foxconn123!"),server_name=local_ip, server_port=local_port) elif len(sys.argv)==4: local_ip = sys.argv[2] local_port = sys.argv[3] print(f"running server on http://{local_ip}:{local_port}") _local_port = int(local_port) demo.launch(ssl_verify=False, share=True, allowed_paths=[root_file_path, root_file_path+hr_source_path],auth=("Foxconn", "Foxconn123!"),server_name=local_ip, server_port=_local_port) else: print("syntax: python