Agent for interacting with Semantic Scholar
get_app(uniq_id, llm_model='gpt-4o-mini')
This function returns the langraph app.
Source code in aiagents4pharma/talk2competitors/agents/s2_agent.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 | def get_app(uniq_id, llm_model='gpt-4o-mini'):
'''
This function returns the langraph app.
'''
def agent_s2_node(state: Talk2Competitors):
'''
This function calls the model.
'''
logger.log(logging.INFO, "Creating Agent_S2 node with thread_id %s", uniq_id)
response = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
return response
# Define the tools
tools = [search_tool,
display_results,
get_single_paper_recommendations,
get_multi_paper_recommendations]
# Create the LLM
llm = ChatOpenAI(model=llm_model, temperature=0)
model = create_react_agent(
llm,
tools=tools,
state_schema=Talk2Competitors,
state_modifier=config.S2_AGENT_PROMPT,
checkpointer=MemorySaver()
)
# Define a new graph
workflow = StateGraph(Talk2Competitors)
# Define the two nodes we will cycle between
workflow.add_node("agent_s2", agent_s2_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.add_edge(START, "agent_s2")
# Initialize memory to persist state between graph runs
checkpointer = MemorySaver()
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable.
# Note that we're (optionally) passing the memory when compiling the graph
app = workflow.compile(checkpointer=checkpointer)
logger.log(logging.INFO, "Compiled the graph")
return app
|