langgraph_used

This commit is contained in:
lusixing
2026-02-03 21:37:41 -08:00
parent 0aab9a838b
commit c1ce804d14
8 changed files with 613 additions and 5 deletions

View File

@@ -31,6 +31,8 @@ import * as ImagePicker from 'expo-image-picker';
import { AIRole } from '../types';
import { colors, typography, spacing, borderRadius, shadows } from '../theme/colors';
import { aiService, AIMessage } from '../services/ai.service';
import { langGraphService } from '../services/langgraph.service';
import { HumanMessage, AIMessage as LangChainAIMessage, SystemMessage } from "@langchain/core/messages";
import { assetsService } from '../services/assets.service';
import { useAuth } from '../context/AuthContext';
import { AI_CONFIG, getVaultStorageKeys } from '../config';
@@ -280,8 +282,23 @@ export default function FlowScreen() {
setMessages(prev => [...prev, userMsg]);
try {
// Call AI proxy with selected role's system prompt
const aiResponse = await aiService.sendMessage(userMessage, token, selectedRole?.systemPrompt || '');
// 1. Convert current messages history to LangChain format
const history: (HumanMessage | LangChainAIMessage | SystemMessage)[] = messages.map(msg => {
if (msg.role === 'user') return new HumanMessage(msg.content);
return new LangChainAIMessage(msg.content);
});
// 2. Add system prompt
const systemPrompt = new SystemMessage(selectedRole?.systemPrompt || '');
// 3. Add current new message
const currentMsg = new HumanMessage(userMessage);
// 4. Combine all messages for LangGraph processing
const fullMessages = [systemPrompt, ...history, currentMsg];
// 5. Execute via LangGraph service (handles token limits and context)
const aiResponse = await langGraphService.execute(fullMessages, token);
// Add AI response
const aiMsg: ChatMessage = {

View File

@@ -13,6 +13,7 @@ import {
logApiDebug,
} from '../config';
import { AIRole } from '../types';
import { trimInternalMessages } from '../utils/token_utils';
// =============================================================================
// Type Definitions
@@ -259,14 +260,17 @@ export const aiService = {
});
}
const historicalMessages = messages.map(msg => ({
// Enforce token limit (10,000 tokens)
const trimmedMessages = trimInternalMessages(messages);
const historicalMessages = trimmedMessages.map(msg => ({
role: msg.role,
content: msg.content,
}));
const summaryPrompt: AIMessage = {
role: 'user',
content: 'Please provide a concise summary of the conversation above in Chinese (since the user request was in Chinese). Focus on the main topics discussed and any key conclusions or actions mentioned.',
content: 'Please provide a concise summary of the conversation above in English. Focus on the main topics discussed and any key conclusions or actions mentioned.',
};
const response = await this.chat([...historicalMessages, summaryPrompt], token);

View File

@@ -0,0 +1,96 @@
/**
* LangGraph Service
*
* Implements AI chat logic using LangGraph.js for state management
* and context handling.
*/
import { StateGraph, START, END, Annotation } from "@langchain/langgraph";
import { BaseMessage, HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages";
import { aiService } from "./ai.service";
import { trimLangChainMessages } from "../utils/token_utils";
// =============================================================================
// Settings
// =============================================================================
/**
* Define the State using Annotation (Standard for latest LangGraph.js)
*/
const GraphAnnotation = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
default: () => [],
}),
});
// =============================================================================
// Graph Definition
// =============================================================================
/**
* The main node that calls our existing AI API
*/
async function callModel(state: typeof GraphAnnotation.State, config: any) {
const { messages } = state;
const { token } = config.configurable || {};
// 1. Trim messages to stay under token limit
const trimmedMessages = trimLangChainMessages(messages);
// 2. Convert LangChain messages to our internal AIMessage format for the API
const apiMessages = trimmedMessages.map(m => {
let role: 'system' | 'user' | 'assistant' = 'user';
const type = (m as any)._getType?.() || (m instanceof SystemMessage ? 'system' : m instanceof HumanMessage ? 'human' : m instanceof AIMessage ? 'ai' : 'user');
if (type === 'system') role = 'system';
else if (type === 'human') role = 'user';
else if (type === 'ai') role = 'assistant';
return {
role,
content: m.content.toString()
};
});
// 3. Call the proxy service
const response = await aiService.chat(apiMessages, token);
const content = response.choices[0]?.message?.content || "No response generated";
// 4. Return the new message to satisfy the Graph (it will be appended due to reducer)
return {
messages: [new AIMessage(content)]
};
}
// =============================================================================
// Service Export
// =============================================================================
export const langGraphService = {
/**
* Run the chat graph with history
*/
async execute(
currentMessages: BaseMessage[],
userToken: string,
): Promise<string> {
// Define the graph
const workflow = new StateGraph(GraphAnnotation)
.addNode("agent", callModel)
.addEdge(START, "agent")
.addEdge("agent", END);
const app = workflow.compile();
// Execute the graph
const result = await app.invoke(
{ messages: currentMessages },
{ configurable: { token: userToken } }
);
// Return the content of the last message (the AI response)
const lastMsg = result.messages[result.messages.length - 1];
return lastMsg.content.toString();
}
};

View File

@@ -0,0 +1,22 @@
/**
* Mock for Node.js async_hooks
* Used to fix LangGraph.js compatibility with React Native
*/
export class AsyncLocalStorage {
disable() { }
getStore() {
return undefined;
}
run(store: any, callback: (...args: any[]) => any, ...args: any[]) {
return callback(...args);
}
exit(callback: (...args: any[]) => any, ...args: any[]) {
return callback(...args);
}
enterWith(store: any) { }
}
export default {
AsyncLocalStorage,
};

76
src/utils/token_utils.ts Normal file
View File

@@ -0,0 +1,76 @@
/**
* Token Utilities
*
* Shared logic for trimming messages to stay within token limits.
*/
import { BaseMessage, SystemMessage } from "@langchain/core/messages";
import { AIMessage as ServiceAIMessage } from "../services/ai.service";
export const TOKEN_LIMIT = 10000;
const CHARS_PER_TOKEN = 3; // Conservative estimate: 1 token ≈ 3 chars
export const MAX_CHARS = TOKEN_LIMIT * CHARS_PER_TOKEN;
/**
* Trims LangChain messages to fit within token limit
*/
export function trimLangChainMessages(messages: BaseMessage[]): BaseMessage[] {
let totalLength = 0;
const trimmed: BaseMessage[] = [];
// Always keep the system message if it's at the start
let systemMsg: BaseMessage | null = null;
if (messages.length > 0 && (messages[0] instanceof SystemMessage || (messages[0] as any)._getType?.() === 'system')) {
systemMsg = messages[0];
totalLength += systemMsg.content.toString().length;
}
// Iterate backwards and add messages until we hit the char limit
for (let i = messages.length - 1; i >= (systemMsg ? 1 : 0); i--) {
const msg = messages[i];
const len = msg.content.toString().length;
if (totalLength + len > MAX_CHARS) break;
trimmed.unshift(msg);
totalLength += len;
}
if (systemMsg) {
trimmed.unshift(systemMsg);
}
return trimmed;
}
/**
* Trims internal AIMessage format messages to fit within token limit
*/
export function trimInternalMessages(messages: ServiceAIMessage[]): ServiceAIMessage[] {
let totalLength = 0;
const trimmed: ServiceAIMessage[] = [];
// Always keep the system message if it's at the start
let systemMsg: ServiceAIMessage | null = null;
if (messages.length > 0 && messages[0].role === 'system') {
systemMsg = messages[0];
totalLength += systemMsg.content.length;
}
// Iterate backwards and add messages until we hit the char limit
for (let i = messages.length - 1; i >= (systemMsg ? 1 : 0); i--) {
const msg = messages[i];
const len = msg.content.length;
if (totalLength + len > MAX_CHARS) break;
trimmed.unshift(msg);
totalLength += len;
}
if (systemMsg) {
trimmed.unshift(systemMsg);
}
return trimmed;
}