Saltar al contenido principal

Class: OpenAI

OpenAI LLM implementation

Hierarchy

Constructors

constructor

new OpenAI(init?)

Parameters

NameType
init?Partial<OpenAI> & { azure?: AzureOpenAIConfig }

Overrides

BaseLLM.constructor

Defined in

packages/core/src/llm/LLM.ts:218

Properties

additionalChatOptions

Optional additionalChatOptions: Omit<Partial<ChatCompletionCreateParams>, "stream" | "max_tokens" | "messages" | "model" | "temperature" | "top_p">

Defined in

packages/core/src/llm/LLM.ts:201


additionalSessionOptions

Optional additionalSessionOptions: Omit<Partial<ClientOptions>, "apiKey" | "timeout" | "maxRetries">

Defined in

packages/core/src/llm/LLM.ts:211


apiKey

Optional apiKey: string = undefined

Defined in

packages/core/src/llm/LLM.ts:207


callbackManager

Optional callbackManager: CallbackManager

Defined in

packages/core/src/llm/LLM.ts:216


maxRetries

maxRetries: number

Defined in

packages/core/src/llm/LLM.ts:208


maxTokens

Optional maxTokens: number

Defined in

packages/core/src/llm/LLM.ts:200


model

model: string

Defined in

packages/core/src/llm/LLM.ts:197


session

session: OpenAISession

Defined in

packages/core/src/llm/LLM.ts:210


temperature

temperature: number

Defined in

packages/core/src/llm/LLM.ts:198


timeout

Optional timeout: number

Defined in

packages/core/src/llm/LLM.ts:209


topP

topP: number

Defined in

packages/core/src/llm/LLM.ts:199

Accessors

metadata

get metadata(): Object

Returns

Object

NameType
contextWindownumber
maxTokensundefined | number
modelstring
temperaturenumber
tokenizerCL100K_BASE
topPnumber

Overrides

BaseLLM.metadata

Defined in

packages/core/src/llm/LLM.ts:273

Methods

chat

chat(params): Promise<AsyncIterable<ChatResponseChunk>>

Get a chat response from the LLM

Parameters

NameType
paramsLLMChatParamsStreaming

Returns

Promise<AsyncIterable<ChatResponseChunk>>

Overrides

BaseLLM.chat

Defined in

packages/core/src/llm/LLM.ts:320

chat(params): Promise<ChatResponse>

Parameters

NameType
paramsLLMChatParamsNonStreaming

Returns

Promise<ChatResponse>

Overrides

BaseLLM.chat

Defined in

packages/core/src/llm/LLM.ts:323


complete

complete(params): Promise<AsyncIterable<CompletionResponse>>

Get a prompt completion from the LLM

Parameters

NameType
paramsLLMCompletionParamsStreaming

Returns

Promise<AsyncIterable<CompletionResponse>>

Inherited from

BaseLLM.complete

Defined in

packages/core/src/llm/LLM.ts:138

complete(params): Promise<CompletionResponse>

Parameters

NameType
paramsLLMCompletionParamsNonStreaming

Returns

Promise<CompletionResponse>

Inherited from

BaseLLM.complete

Defined in

packages/core/src/llm/LLM.ts:141


mapMessageType

mapMessageType(messageType): "function" | "user" | "assistant" | "system"

Parameters

NameType
messageTypeMessageType

Returns

"function" | "user" | "assistant" | "system"

Defined in

packages/core/src/llm/LLM.ts:303


streamChat

Protected streamChat(«destructured»): AsyncIterable<ChatResponseChunk>

Parameters

NameType
«destructured»LLMChatParamsStreaming

Returns

AsyncIterable<ChatResponseChunk>

Defined in

packages/core/src/llm/LLM.ts:358


tokens

tokens(messages): number

Calculates the number of tokens needed for the given chat messages

Parameters

NameType
messagesChatMessage[]

Returns

number

Overrides

BaseLLM.tokens

Defined in

packages/core/src/llm/LLM.ts:288