File size: 4,433 Bytes
69194ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
""" 
#TODO: make a agent that uses HUMAMN as a tool to get:
- Purpose of science experiment
- What fields of study do they already know of

#IDEA: Platform generate more indepth experiments by generaing a data set and generate / collect scienfic data

### Chatbot
the chatbot helps the BOUNTY_BOARD_CHAIN generate science experiments

### EXPERIMENT and Provide feedback on experiments

### Interrgration

- I need to intergrate this code into the app. This includes creating an id for each post, and potentially and a comment section for each "Experiment"
- I addition i need to generate a mostly pinecone retriever to geenrate scientific experiments from the "community vectore search"
- potentially have prenium users store their private data, but i may not implement this during the hackathon
"""

# https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
from langchain_core.runnables import RunnablePassthrough
from langchain.retrievers import ArxivRetriever, pubmed
from langchain_core.output_parsers import StrOutputParser
from langchain.retrievers import ArxivRetriever
from langchain.retrievers import PubMedRetriever
from langchain.retrievers import WikipediaRetriever
from operator import itemgetter

response_schemas = [
    ResponseSchema(name="Experiment_Name", description="the name given to the experiment"),
    ResponseSchema(name="Material", description="list of materials need to perfrom the experiments", type="list"),
    ResponseSchema(name="Sources", description="list of sources where the information was retrievered from", type="list"),
    ResponseSchema(name="Protocal", description="detailed instructions On how to make the item or perform the experiment", type="list"),
    ResponseSchema(name="Fields_of_study", description="the fields of study that his experiment uses", type="list"),
    ResponseSchema(name="Purpose_of_Experiments", description="assume what the user is trying to acchieve"),
    ResponseSchema(name="Safety_Precuation", description="What does the User need to know to avoid any potential harm"),
    ResponseSchema(name="Level_of_Difficulty", description="How difficult is it to perform this experiment ecample beginner, novice, Intermidiate, Hard "),
# 
]


output_parser = StructuredOutputParser.from_response_schemas(response_schemas)

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

format_instructions = output_parser.get_format_instructions()


prompt = maker_prompt = PromptTemplate(
    template="You must generate a well detailed list of items for creating a given item from scratch. \
        Also describe the purpose for a text-to-3d model to use for extra context\n{format_instructions}\n{question}\n{context}",
    input_variables=["question"],
    partial_variables={"format_instructions": format_instructions},
    memory = memory
)


def join_strings(*args: str) -> str:
    """
    Join an arbitrary number of strings into one string.
    
    Args:
        *args: Variable number of strings to join.
    
    Returns:
        str: Joined string.
    """
    return ''.join(args)

def format_docs(docs):
    return "\n\n".join([join_strings(d.page_content, d.metadata['Entry ID'],d.metadata['Title'], ) for d in docs])


arxiv_retriever = ArxivRetriever(load_max_docs=2)

# model = ChatOpenAI(temperature=0)
model = ChatOpenAI(temperature=0,model="gpt-4")


retriver = arxiv_retriever = ArxivRetriever(load_max_docs=2)

pub_med_retriever = PubMedRetriever()

wikipedia_retriever = WikipediaRetriever()

arxiv_chain = (
    {"context": arxiv_retriever, "question": RunnablePassthrough()}
    | prompt
    | model
    | output_parser
)

pub_med_chain = (
    {"context": pub_med_retriever, "question": RunnablePassthrough()}
    | prompt
    | model
    | output_parser
)

wikipedia_chain = (
    {"context": wikipedia_retriever, "question": RunnablePassthrough()}
    | prompt
    | model
    | output_parser
)




if __name__ == "__main__":
    query = "how to create alectronoic on a cellulose subtstrate"
    pub_med_data = pub_med_chain.invoke(query)
    wiki_data = wikipedia_chain.invoke(query)

    x=0