import openai import time import wikipedia import random import re import requests from bs4 import BeautifulSoup import os import glob from natsort import natsorted import requests from bs4 import BeautifulSoup import xml.etree.ElementTree as ET import pandas as pd wikipedia.set_lang("ja") # APIキーの設定 openai.api_key = os.environ['OPENAI_API_KEY'] engine="gpt-3.5-turbo" def generate(system_template,prompt,engine="gpt-3.5-turbo"): while True: #OpenAI APIが落ちてる時に無限リトライするので注意 try: response = openai.ChatCompletion.create( model=engine, messages=[ {"role": "system", "content": system_template}, {"role": "user", "content":prompt}, ] ) result=response["choices"][0]["message"]["content"] return result except: print("リトライ") time.sleep(30) pass def generate_carte(prompt,engine="gpt-3.5-turbo"): while True: #OpenAI APIが落ちてる時に無限リトライするので注意 try: response = openai.ChatCompletion.create( model=engine, messages=[ {"role": "system", "content": "You are useful assistant"}, {"role": "user", "content":"%s\n・・・という患者と医師の会話をSOAP形式のカルテとして日本語で端的にまとめて下さい。各セクションはS),O), A),P)として下さい "%prompt}, ] ) result=response["choices"][0]["message"]["content"] return result except: print("リトライ") time.sleep(30) pass def get_selected_fileds(texts): input_name = texts.replace(' ' , "+") corona_fields = ct.get_study_fields( search_expr="%s SEARCH[Location](AREA[LocationCountry]Japan AND AREA[LocationStatus]Recruiting)"%(input_name), fields=["NCTId", "Condition", "BriefTitle",'BriefSummary','EligibilityCriteria'], max_studies=500, fmt="csv") return corona_fields def get_retriever_str(fields): retriever_str='' for i in range(1,len(fields)): colnames = fields[0] targetCol = fields[i] for f in range(len(fields[0])): retriever_str+=colnames[f] + ":" + targetCol[f] +"\n" retriever_str+='\n' return retriever_str