跳转至

评估

要评估你的智能代理的性能,你可以使用 LangSmith 评估。你需要首先定义一个评估器函数来判断智能代理的结果,例如最终输出或轨迹。根据你的评估技术,这可能涉及或不涉及参考输出:

def evaluator(*, outputs: dict, reference_outputs: dict):
    # 将智能代理输出与参考输出进行比较
    output_messages = outputs["messages"]
    reference_messages = reference_outputs["messages"]
    score = compare_messages(output_messages, reference_messages)
    return {"key": "evaluator_score", "score": score}
type EvaluatorParams = {
  outputs: Record<string, any>;
  referenceOutputs: Record<string, any>;
};

function evaluator({ outputs, referenceOutputs }: EvaluatorParams) {
  // 将智能代理输出与参考输出进行比较
  const outputMessages = outputs.messages;
  const referenceMessages = referenceOutputs.messages;
  const score = compareMessages(outputMessages, referenceMessages);
  return { key: "evaluator_score", score: score };
}

要开始使用,你可以使用 AgentEvals 包中的预构建评估器:

pip install -U agentevals
npm install agentevals

创建评估器

评估智能代理性能的一种常见方法是将其轨迹(它调用工具的顺序)与参考轨迹进行比较:

import json
# highlight-next-line
from agentevals.trajectory.match import create_trajectory_match_evaluator

outputs = [
    {
        "role": "assistant",
        "tool_calls": [
            {
                "function": {
                    "name": "get_weather",
                    "arguments": json.dumps({"city": "san francisco"}),
                }
            },
            {
                "function": {
                    "name": "get_directions",
                    "arguments": json.dumps({"destination": "presidio"}),
                }
            }
        ],
    }
]
reference_outputs = [
    {
        "role": "assistant",
        "tool_calls": [
            {
                "function": {
                    "name": "get_weather",
                    "arguments": json.dumps({"city": "san francisco"}),
                }
            },
        ],
    }
]

# 创建评估器
evaluator = create_trajectory_match_evaluator(
    # highlight-next-line
    trajectory_match_mode="superset",  # (1)!
)

# 运行评估器
result = evaluator(
    outputs=outputs, reference_outputs=reference_outputs
)
import { createTrajectoryMatchEvaluator } from "agentevals/trajectory/match";

const outputs = [
  {
    role: "assistant",
    tool_calls: [
      {
        function: {
          name: "get_weather",
          arguments: JSON.stringify({ city: "san francisco" }),
        },
      },
      {
        function: {
          name: "get_directions",
          arguments: JSON.stringify({ destination: "presidio" }),
        },
      },
    ],
  },
];

const referenceOutputs = [
  {
    role: "assistant",
    tool_calls: [
      {
        function: {
          name: "get_weather",
          arguments: JSON.stringify({ city: "san francisco" }),
        },
      },
    ],
  },
];

// 创建评估器
const evaluator = createTrajectoryMatchEvaluator({
  // 指定如何比较轨迹。`superset` 将接受输出轨迹作为有效,如果它是参考轨迹的超集。其他选项包括: strict、unordered 和 subset
  trajectoryMatchMode: "superset", // (1)!
});

// 运行评估器
const result = evaluator({
  outputs: outputs,
  referenceOutputs: referenceOutputs,
});
  1. 指定如何比较轨迹。superset 将接受输出轨迹作为有效,如果它是参考轨迹的超集。其他选项包括: strictunorderedsubset

作为下一步,了解更多关于如何自定义轨迹匹配评估器

LLM 作为评判者

你可以使用 LLM 作为评判者评估器,它使用 LLM 将轨迹与参考输出进行比较并输出分数:

import json
from agentevals.trajectory.llm import (
    # highlight-next-line
    create_trajectory_llm_as_judge,
    TRAJECTORY_ACCURACY_PROMPT_WITH_REFERENCE
)

evaluator = create_trajectory_llm_as_judge(
    prompt=TRAJECTORY_ACCURACY_PROMPT_WITH_REFERENCE,
    model="openai:o3-mini"
)
import {
  createTrajectoryLlmAsJudge,
  TRAJECTORY_ACCURACY_PROMPT_WITH_REFERENCE,
} from "agentevals/trajectory/llm";

const evaluator = createTrajectoryLlmAsJudge({
  prompt: TRAJECTORY_ACCURACY_PROMPT_WITH_REFERENCE,
  model: "openai:o3-mini",
});

运行评估器

要运行评估器,你首先需要创建一个 LangSmith 数据集。要使用预构建的 AgentEvals 评估器,你需要一个具有以下模式的数据集:

  • input: {"messages": [...]} 用于调用智能代理的输入消息。
  • output: {"messages": [...]} 智能代理输出中预期的消息历史记录。对于轨迹评估,你可以选择只保留助手消息。
from langsmith import Client
from langgraph.prebuilt import create_react_agent
from agentevals.trajectory.match import create_trajectory_match_evaluator

client = Client()
agent = create_react_agent(...)
evaluator = create_trajectory_match_evaluator(...)

experiment_results = client.evaluate(
    lambda inputs: agent.invoke(inputs),
    # 替换为你的数据集名称
    data="<Name of your dataset>",
    evaluators=[evaluator]
)
import { Client } from "langsmith";
import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { createTrajectoryMatchEvaluator } from "agentevals/trajectory/match";

const client = new Client();
const agent = createReactAgent({...});
const evaluator = createTrajectoryMatchEvaluator({...});

const experimentResults = await client.evaluate(
    (inputs) => agent.invoke(inputs),
    // 替换为你的数据集名称
    { data: "<Name of your dataset>" },
    { evaluators: [evaluator] }
);