Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: langchain-ai/langchainjs
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 64c2b8f33b8c314a4c85a3035865cb396958ede2
Choose a base ref
...
head repository: langchain-ai/langchainjs
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: e0fc2a41e9b09da301a96075956a44863ff827cc
Choose a head ref
  • 4 commits
  • 11 files changed
  • 1 contributor

Commits on Jan 20, 2025

  1. release(core): 0.3.32 (#7558)

    jacoblee93 authored Jan 20, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
    Copy the full SHA
    bb33dca View commit details
  2. Release 0.3.12 (#7559)

    jacoblee93 authored Jan 20, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
    Copy the full SHA
    c4f6122 View commit details

Commits on Jan 21, 2025

  1. fix(core): Prevent cache misses from triggering model start callback …

    …runs twice (#7565)
    jacoblee93 authored Jan 21, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
    Copy the full SHA
    b6007bb View commit details
  2. fix(core): Ensure that cached flag in run extras is only set for cach…

    …e hits (#7566)
    jacoblee93 authored Jan 21, 2025
    Copy the full SHA
    e0fc2a4 View commit details
2 changes: 1 addition & 1 deletion langchain-core/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@langchain/core",
"version": "0.3.31",
"version": "0.3.32",
"description": "Core LangChain.js abstractions and schemas",
"type": "module",
"engines": {
6 changes: 4 additions & 2 deletions langchain-core/src/callbacks/base.ts
Original file line number Diff line number Diff line change
@@ -97,7 +97,8 @@ abstract class BaseCallbackHandlerMethodsClass {
err: Error,
runId: string,
parentRunId?: string,
tags?: string[]
tags?: string[],
extraParams?: Record<string, unknown>
): // eslint-disable-next-line @typescript-eslint/no-explicit-any
Promise<any> | any;

@@ -108,7 +109,8 @@ abstract class BaseCallbackHandlerMethodsClass {
output: LLMResult,
runId: string,
parentRunId?: string,
tags?: string[]
tags?: string[],
extraParams?: Record<string, unknown>
): // eslint-disable-next-line @typescript-eslint/no-explicit-any
Promise<any> | any;

22 changes: 18 additions & 4 deletions langchain-core/src/callbacks/manager.ts
Original file line number Diff line number Diff line change
@@ -303,7 +303,13 @@ export class CallbackManagerForLLMRun
);
}

async handleLLMError(err: Error | unknown): Promise<void> {
async handleLLMError(
err: Error | unknown,
_runId?: string,
_parentRunId?: string,
_tags?: string[],
extraParams?: Record<string, unknown>
): Promise<void> {
await Promise.all(
this.handlers.map((handler) =>
consumeCallback(async () => {
@@ -313,7 +319,8 @@ export class CallbackManagerForLLMRun
err,
this.runId,
this._parentRunId,
this.tags
this.tags,
extraParams
);
} catch (err) {
const logFunction = handler.raiseError
@@ -332,7 +339,13 @@ export class CallbackManagerForLLMRun
);
}

async handleLLMEnd(output: LLMResult): Promise<void> {
async handleLLMEnd(
output: LLMResult,
_runId?: string,
_parentRunId?: string,
_tags?: string[],
extraParams?: Record<string, unknown>
): Promise<void> {
await Promise.all(
this.handlers.map((handler) =>
consumeCallback(async () => {
@@ -342,7 +355,8 @@ export class CallbackManagerForLLMRun
output,
this.runId,
this._parentRunId,
this.tags
this.tags,
extraParams
);
} catch (err) {
const logFunction = handler.raiseError
145 changes: 100 additions & 45 deletions langchain-core/src/language_models/chat_models.ts
Original file line number Diff line number Diff line change
@@ -9,6 +9,8 @@ import {
coerceMessageLikeToMessage,
AIMessageChunk,
isAIMessageChunk,
isBaseMessage,
isAIMessage,
} from "../messages/index.js";
import type { BasePromptValueInterface } from "../prompt_values.js";
import {
@@ -343,41 +345,50 @@ export abstract class BaseChatModel<
async _generateUncached(
messages: BaseMessageLike[][],
parsedOptions: this["ParsedCallOptions"],
handledOptions: RunnableConfig
handledOptions: RunnableConfig,
startedRunManagers?: CallbackManagerForLLMRun[]
): Promise<LLMResult> {
const baseMessages = messages.map((messageList) =>
messageList.map(coerceMessageLikeToMessage)
);

const inheritableMetadata = {
...handledOptions.metadata,
...this.getLsParams(parsedOptions),
};
// create callback manager and start run
const callbackManager_ = await CallbackManager.configure(
handledOptions.callbacks,
this.callbacks,
handledOptions.tags,
this.tags,
inheritableMetadata,
this.metadata,
{ verbose: this.verbose }
);
const extra = {
options: parsedOptions,
invocation_params: this?.invocationParams(parsedOptions),
batch_size: 1,
};
const runManagers = await callbackManager_?.handleChatModelStart(
this.toJSON(),
baseMessages,
handledOptions.runId,
undefined,
extra,
undefined,
undefined,
handledOptions.runName
);
let runManagers: CallbackManagerForLLMRun[] | undefined;
if (
startedRunManagers !== undefined &&
startedRunManagers.length === baseMessages.length
) {
runManagers = startedRunManagers;
} else {
const inheritableMetadata = {
...handledOptions.metadata,
...this.getLsParams(parsedOptions),
};
// create callback manager and start run
const callbackManager_ = await CallbackManager.configure(
handledOptions.callbacks,
this.callbacks,
handledOptions.tags,
this.tags,
inheritableMetadata,
this.metadata,
{ verbose: this.verbose }
);
const extra = {
options: parsedOptions,
invocation_params: this?.invocationParams(parsedOptions),
batch_size: 1,
};
runManagers = await callbackManager_?.handleChatModelStart(
this.toJSON(),
baseMessages,
handledOptions.runId,
undefined,
extra,
undefined,
undefined,
handledOptions.runName
);
}
const generations: ChatGeneration[][] = [];
const llmOutputs: LLMResult["llmOutput"][] = [];
// Even if stream is not explicitly called, check if model is implicitly
@@ -511,7 +522,12 @@ export abstract class BaseChatModel<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsedOptions: any;
handledOptions: RunnableConfig;
}): Promise<LLMResult & { missingPromptIndices: number[] }> {
}): Promise<
LLMResult & {
missingPromptIndices: number[];
startedRunManagers?: CallbackManagerForLLMRun[];
}
> {
const baseMessages = messages.map((messageList) =>
messageList.map(coerceMessageLikeToMessage)
);
@@ -534,7 +550,6 @@ export abstract class BaseChatModel<
options: parsedOptions,
invocation_params: this?.invocationParams(parsedOptions),
batch_size: 1,
cached: true,
};
const runManagers = await callbackManager_?.handleChatModelStart(
this.toJSON(),
@@ -580,16 +595,51 @@ export abstract class BaseChatModel<
cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
if (promiseResult.status === "fulfilled") {
const result = promiseResult.value as Generation[];
generations[i] = result;
generations[i] = result.map((result) => {
if (
"message" in result &&
isBaseMessage(result.message) &&
isAIMessage(result.message)
) {
// eslint-disable-next-line no-param-reassign
result.message.usage_metadata = {
input_tokens: 0,
output_tokens: 0,
total_tokens: 0,
};
}
// eslint-disable-next-line no-param-reassign
result.generationInfo = {
...result.generationInfo,
tokenUsage: {},
};
return result;
});
if (result.length) {
await runManager?.handleLLMNewToken(result[0].text);
}
return runManager?.handleLLMEnd({
generations: [result],
});
return runManager?.handleLLMEnd(
{
generations: [result],
},
undefined,
undefined,
undefined,
{
cached: true,
}
);
} else {
// status === "rejected"
await runManager?.handleLLMError(promiseResult.reason);
await runManager?.handleLLMError(
promiseResult.reason,
undefined,
undefined,
undefined,
{
cached: true,
}
);
return Promise.reject(promiseResult.reason);
}
})
@@ -598,6 +648,7 @@ export abstract class BaseChatModel<
const output = {
generations,
missingPromptIndices,
startedRunManagers: runManagers,
};

// This defines RUN_KEY as a non-enumerable property on the output object
@@ -650,20 +701,24 @@ export abstract class BaseChatModel<
callOptions as CallOptions
);

const { generations, missingPromptIndices } = await this._generateCached({
messages: baseMessages,
cache,
llmStringKey,
parsedOptions: callOptions,
handledOptions: runnableConfig,
});
const { generations, missingPromptIndices, startedRunManagers } =
await this._generateCached({
messages: baseMessages,
cache,
llmStringKey,
parsedOptions: callOptions,
handledOptions: runnableConfig,
});

let llmOutput = {};
if (missingPromptIndices.length > 0) {
const results = await this._generateUncached(
missingPromptIndices.map((i) => baseMessages[i]),
callOptions,
runnableConfig
runnableConfig,
startedRunManagers !== undefined
? missingPromptIndices.map((i) => startedRunManagers?.[i])
: undefined
);
await Promise.all(
results.generations.map(async (generation, index) => {
Loading