from dotenv import load_dotenv load_dotenv() from langchain.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage import mlflow from agent import getGraph from utils.InterruptPayload import InterruptPayload from utils.StreamGraph import streamGraph # MLFLOW mlflow.set_experiment("TEST PROJET") # VOIR AVEC LA COMMANDE "MLFLOW SERVER" mlflow.langchain.autolog() initial_input = { 'messages':[HumanMessage("Recherche 'Recette de Monster' sur internet")] } config={"configurable": {"thread_id": 'yes'}} # Et je lance ! streamGraph(initial_input, config, getGraph())