diff --git a/.stats.yml b/.stats.yml
index 47c2bce1c..2814bb777 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 55
+configured_endpoints: 62
diff --git a/README.md b/README.md
index 406434e6d..b75320e78 100644
--- a/README.md
+++ b/README.md
@@ -102,7 +102,7 @@ Documentation for each method, request param, and response field are available i
### Polling Helpers
-When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
+When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes
helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
If an API method results in an action which could benefit from polling there will be a corresponding version of the
method ending in 'AndPoll'.
@@ -117,6 +117,20 @@ const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
+### Bulk Upload Helpers
+
+When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations.
+For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once.
+
+```ts
+const fileList = [
+ createReadStream('/home/data/example.pdf'),
+ ...
+];
+
+const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList);
+```
+
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
diff --git a/api.md b/api.md
index 7557ce133..8161fb2c7 100644
--- a/api.md
+++ b/api.md
@@ -179,53 +179,88 @@ Methods:
# Beta
-## Chat
+## VectorStores
-### Completions
+Types:
+
+- VectorStore
+- VectorStoreDeleted
Methods:
-- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
-- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
-- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream
+- client.beta.vectorStores.create({ ...params }) -> VectorStore
+- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore
+- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
+- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage
+- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted
-## Assistants
+### Files
Types:
-- Assistant
-- AssistantDeleted
-- AssistantStreamEvent
-- AssistantTool
-- CodeInterpreterTool
-- FunctionTool
-- MessageStreamEvent
-- RetrievalTool
-- RunStepStreamEvent
-- RunStreamEvent
-- ThreadStreamEvent
+- VectorStoreFile
+- VectorStoreFileDeleted
Methods:
-- client.beta.assistants.create({ ...params }) -> Assistant
-- client.beta.assistants.retrieve(assistantId) -> Assistant
-- client.beta.assistants.update(assistantId, { ...params }) -> Assistant
-- client.beta.assistants.list({ ...params }) -> AssistantsPage
-- client.beta.assistants.del(assistantId) -> AssistantDeleted
+- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
+- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
+- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage
+- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted
+- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile>
+- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile>
+- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile>
+- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile>
-### Files
+### FileBatches
Types:
-- AssistantFile
-- FileDeleteResponse
+- VectorStoreFileBatch
Methods:
-- client.beta.assistants.files.create(assistantId, { ...params }) -> AssistantFile
-- client.beta.assistants.files.retrieve(assistantId, fileId) -> AssistantFile
-- client.beta.assistants.files.list(assistantId, { ...params }) -> AssistantFilesPage
-- client.beta.assistants.files.del(assistantId, fileId) -> FileDeleteResponse
+- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch
+- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch
+- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch
+- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage
+- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch>
+- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch>
+- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch>
+
+## Chat
+
+### Completions
+
+Methods:
+
+- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
+- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
+- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream
+
+## Assistants
+
+Types:
+
+- Assistant
+- AssistantDeleted
+- AssistantStreamEvent
+- AssistantTool
+- CodeInterpreterTool
+- FileSearchTool
+- FunctionTool
+- MessageStreamEvent
+- RunStepStreamEvent
+- RunStreamEvent
+- ThreadStreamEvent
+
+Methods:
+
+- client.beta.assistants.create({ ...params }) -> Assistant
+- client.beta.assistants.retrieve(assistantId) -> Assistant
+- client.beta.assistants.update(assistantId, { ...params }) -> Assistant
+- client.beta.assistants.list({ ...params }) -> AssistantsPage
+- client.beta.assistants.del(assistantId) -> AssistantDeleted
## Threads
@@ -280,11 +315,11 @@ Types:
- CodeInterpreterOutputImage
- CodeInterpreterToolCall
- CodeInterpreterToolCallDelta
+- FileSearchToolCall
+- FileSearchToolCallDelta
- FunctionToolCall
- FunctionToolCallDelta
- MessageCreationStepDetails
-- RetrievalToolCall
-- RetrievalToolCallDelta
- RunStep
- RunStepDelta
- RunStepDeltaEvent
@@ -303,44 +338,33 @@ Methods:
Types:
-- Annotation
-- AnnotationDelta
-- FileCitationAnnotation
-- FileCitationDeltaAnnotation
-- FilePathAnnotation
-- FilePathDeltaAnnotation
-- ImageFile
-- ImageFileContentBlock
-- ImageFileDelta
-- ImageFileDeltaBlock
-- Message
-- MessageContent
-- MessageContentDelta
-- MessageDeleted
-- MessageDelta
-- MessageDeltaEvent
-- Text
-- TextContentBlock
-- TextDelta
-- TextDeltaBlock
-
-Methods:
-
-- client.beta.threads.messages.create(threadId, { ...params }) -> Message
-- client.beta.threads.messages.retrieve(threadId, messageId) -> Message
-- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message
-- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage
-
-#### Files
-
-Types:
-
-- MessageFile
+- Annotation
+- AnnotationDelta
+- FileCitationAnnotation
+- FileCitationDeltaAnnotation
+- FilePathAnnotation
+- FilePathDeltaAnnotation
+- ImageFile
+- ImageFileContentBlock
+- ImageFileDelta
+- ImageFileDeltaBlock
+- Message
+- MessageContent
+- MessageContentDelta
+- MessageDeleted
+- MessageDelta
+- MessageDeltaEvent
+- Text
+- TextContentBlock
+- TextDelta
+- TextDeltaBlock
Methods:
-- client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile
-- client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage
+- client.beta.threads.messages.create(threadId, { ...params }) -> Message
+- client.beta.threads.messages.retrieve(threadId, messageId) -> Message
+- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message
+- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage
# Batches
diff --git a/helpers.md b/helpers.md
index 7a34c3023..dda1ab26b 100644
--- a/helpers.md
+++ b/helpers.md
@@ -1,4 +1,4 @@
-# Streaming Helpers
+# Helpers
OpenAI supports streaming responses when interacting with the [Chat](#chat-streaming) or [Assistant](#assistant-streaming-api) APIs.
@@ -449,3 +449,24 @@ See an example of a Next.JS integration here [`examples/stream-to-client-next.ts
#### Proxy Streaming to a Browser
See an example of using express to stream to a browser here [`examples/stream-to-client-express.ts`](examples/stream-to-client-express.ts).
+
+# Polling Helpers
+
+When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete.
+The SDK includes helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
+If an API method results in an action which could benefit from polling there will be a corresponding version of the
+method ending in `_AndPoll`.
+
+All methods also allow you to set the polling frequency, how often the API is checked for an update, via a function argument (`pollIntervalMs`).
+
+The polling methods are:
+
+```ts
+client.beta.threads.createAndRunPoll(...)
+client.beta.threads.runs.createAndPoll((...)
+client.beta.threads.runs.submitToolOutputsAndPoll((...)
+client.beta.vectorStores.files.uploadAndPoll((...)
+client.beta.vectorStores.files.createAndPoll((...)
+client.beta.vectorStores.fileBatches.createAndPoll((...)
+client.beta.vectorStores.fileBatches.uploadAndPoll((...)
+```
diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts
index ece0ec65c..a2974826c 100644
--- a/src/lib/AssistantStream.ts
+++ b/src/lib/AssistantStream.ts
@@ -7,7 +7,7 @@ import {
ImageFile,
TextDelta,
Messages,
-} from 'openai/resources/beta/threads/messages/messages';
+} from 'openai/resources/beta/threads/messages';
import * as Core from 'openai/core';
import { RequestOptions } from 'openai/core';
import {
@@ -30,7 +30,7 @@ import {
MessageStreamEvent,
RunStepStreamEvent,
RunStreamEvent,
-} from 'openai/resources/beta/assistants/assistants';
+} from 'openai/resources/beta/assistants';
import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps';
import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads';
import MessageDelta = Messages.MessageDelta;
diff --git a/src/lib/Util.ts b/src/lib/Util.ts
new file mode 100644
index 000000000..ae09b8a91
--- /dev/null
+++ b/src/lib/Util.ts
@@ -0,0 +1,23 @@
+/**
+ * Like `Promise.allSettled()` but throws an error if any promises are rejected.
+ */
+export const allSettledWithThrow = async (promises: Promise[]): Promise => {
+ const results = await Promise.allSettled(promises);
+ const rejected = results.filter((result): result is PromiseRejectedResult => result.status === 'rejected');
+ if (rejected.length) {
+ for (const result of rejected) {
+ console.error(result.reason);
+ }
+
+ throw new Error(`${rejected.length} promise(s) failed - see the above errors`);
+ }
+
+ // Note: TS was complaining about using `.filter().map()` here for some reason
+ const values: R[] = [];
+ for (const result of results) {
+ if (result.status === 'fulfilled') {
+ values.push(result.value);
+ }
+ }
+ return values;
+};
diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants.ts
similarity index 74%
rename from src/resources/beta/assistants/assistants.ts
rename to src/resources/beta/assistants.ts
index fc9afe2ae..c0827848e 100644
--- a/src/resources/beta/assistants/assistants.ts
+++ b/src/resources/beta/assistants.ts
@@ -3,18 +3,15 @@
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
-import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
+import * as AssistantsAPI from 'openai/resources/beta/assistants';
import * as Shared from 'openai/resources/shared';
-import * as FilesAPI from 'openai/resources/beta/assistants/files';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages';
import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
-import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
export class Assistants extends APIResource {
- files: FilesAPI.Files = new FilesAPI.Files(this._client);
-
/**
* Create an assistant with a model and instructions.
*/
@@ -22,7 +19,7 @@ export class Assistants extends APIResource {
return this._client.post('/assistants', {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -32,7 +29,7 @@ export class Assistants extends APIResource {
retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/assistants/${assistantId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -47,7 +44,7 @@ export class Assistants extends APIResource {
return this._client.post(`/assistants/${assistantId}`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -69,7 +66,7 @@ export class Assistants extends APIResource {
return this._client.getAPIList('/assistants', AssistantsPage, {
query,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -79,7 +76,7 @@ export class Assistants extends APIResource {
del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.delete(`/assistants/${assistantId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
}
@@ -105,13 +102,6 @@ export interface Assistant {
*/
description: string | null;
- /**
- * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- * attached to this assistant. There can be a maximum of 20 files attached to the
- * assistant. Files are ordered by their creation date in ascending order.
- */
- file_ids: Array;
-
/**
* The system instructions that the assistant uses. The maximum length is 256,000
* characters.
@@ -147,9 +137,53 @@ export interface Assistant {
/**
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ * assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ * `function`.
*/
tools: Array;
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: Assistant.ToolResources | null;
+}
+
+export namespace Assistant {
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter`` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
+ }
}
export interface AssistantDeleted {
@@ -535,7 +569,7 @@ export namespace AssistantStreamEvent {
}
}
-export type AssistantTool = CodeInterpreterTool | RetrievalTool | FunctionTool;
+export type AssistantTool = CodeInterpreterTool | FileSearchTool | FunctionTool;
export interface CodeInterpreterTool {
/**
@@ -544,6 +578,13 @@ export interface CodeInterpreterTool {
type: 'code_interpreter';
}
+export interface FileSearchTool {
+ /**
+ * The type of tool being defined: `file_search`
+ */
+ type: 'file_search';
+}
+
export interface FunctionTool {
function: Shared.FunctionDefinition;
@@ -642,13 +683,6 @@ export namespace MessageStreamEvent {
}
}
-export interface RetrievalTool {
- /**
- * The type of tool being defined: `retrieval`
- */
- type: 'retrieval';
-}
-
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
@@ -956,13 +990,6 @@ export interface AssistantCreateParams {
*/
description?: string | null;
- /**
- * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- * attached to this assistant. There can be a maximum of 20 files attached to the
- * assistant. Files are ordered by their creation date in ascending order.
- */
- file_ids?: Array;
-
/**
* The system instructions that the assistant uses. The maximum length is 256,000
* characters.
@@ -982,27 +1009,123 @@ export interface AssistantCreateParams {
*/
name?: string | null;
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: AssistantCreateParams.ToolResources | null;
+
/**
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ * assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ * `function`.
*/
tools?: Array;
+
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ *
+ * We generally recommend altering this or temperature but not both.
+ */
+ top_p?: number | null;
}
-export interface AssistantUpdateParams {
+export namespace AssistantCreateParams {
/**
- * The description of the assistant. The maximum length is 512 characters.
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
*/
- description?: string | null;
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+
+ /**
+ * A helper to create a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * with file_ids and attach it to this assistant. There can be a maximum of 1
+ * vector store attached to the assistant.
+ */
+ vector_stores?: Array;
+ }
+ export namespace FileSearch {
+ export interface VectorStore {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ * add to the vector store. There can be a maximum of 10000 files in a vector
+ * store.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to a vector store. This can be
+ * useful for storing additional information about the vector store in a structured
+ * format. Keys can be a maximum of 64 characters long and values can be a maxium
+ * of 512 characters long.
+ */
+ metadata?: unknown;
+ }
+ }
+ }
+}
+
+export interface AssistantUpdateParams {
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
- * attached to this assistant. There can be a maximum of 20 files attached to the
- * assistant. Files are ordered by their creation date in ascending order. If a
- * file was previously attached to the list but does not show up in the list, it
- * will be deleted from the assistant.
+ * The description of the assistant. The maximum length is 512 characters.
*/
- file_ids?: Array;
+ description?: string | null;
/**
* The system instructions that the assistant uses. The maximum length is 256,000
@@ -1032,11 +1155,90 @@ export interface AssistantUpdateParams {
*/
name?: string | null;
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: AssistantUpdateParams.ToolResources | null;
+
/**
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ * assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ * `function`.
*/
tools?: Array;
+
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ *
+ * We generally recommend altering this or temperature but not both.
+ */
+ top_p?: number | null;
+}
+
+export namespace AssistantUpdateParams {
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * Overrides the list of
+ * [file](https://platform.openai.com/docs/api-reference/files) IDs made available
+ * to the `code_interpreter` tool. There can be a maximum of 20 files associated
+ * with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * Overrides the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
+ }
}
export interface AssistantListParams extends CursorPageParams {
@@ -1061,9 +1263,9 @@ export namespace Assistants {
export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent;
export import AssistantTool = AssistantsAPI.AssistantTool;
export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool;
+ export import FileSearchTool = AssistantsAPI.FileSearchTool;
export import FunctionTool = AssistantsAPI.FunctionTool;
export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent;
- export import RetrievalTool = AssistantsAPI.RetrievalTool;
export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent;
export import RunStreamEvent = AssistantsAPI.RunStreamEvent;
export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent;
@@ -1071,10 +1273,4 @@ export namespace Assistants {
export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams;
export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
export import AssistantListParams = AssistantsAPI.AssistantListParams;
- export import Files = FilesAPI.Files;
- export import AssistantFile = FilesAPI.AssistantFile;
- export import FileDeleteResponse = FilesAPI.FileDeleteResponse;
- export import AssistantFilesPage = FilesAPI.AssistantFilesPage;
- export import FileCreateParams = FilesAPI.FileCreateParams;
- export import FileListParams = FilesAPI.FileListParams;
}
diff --git a/src/resources/beta/assistants/files.ts b/src/resources/beta/assistants/files.ts
deleted file mode 100644
index 51fd0c0d8..000000000
--- a/src/resources/beta/assistants/files.ts
+++ /dev/null
@@ -1,154 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import * as Core from 'openai/core';
-import { APIResource } from 'openai/resource';
-import { isRequestOptions } from 'openai/core';
-import * as FilesAPI from 'openai/resources/beta/assistants/files';
-import { CursorPage, type CursorPageParams } from 'openai/pagination';
-
-export class Files extends APIResource {
- /**
- * Create an assistant file by attaching a
- * [File](https://platform.openai.com/docs/api-reference/files) to an
- * [assistant](https://platform.openai.com/docs/api-reference/assistants).
- */
- create(
- assistantId: string,
- body: FileCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/assistants/${assistantId}/files`, {
- body,
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-
- /**
- * Retrieves an AssistantFile.
- */
- retrieve(
- assistantId: string,
- fileId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(`/assistants/${assistantId}/files/${fileId}`, {
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-
- /**
- * Returns a list of assistant files.
- */
- list(
- assistantId: string,
- query?: FileListParams,
- options?: Core.RequestOptions,
- ): Core.PagePromise;
- list(
- assistantId: string,
- options?: Core.RequestOptions,
- ): Core.PagePromise;
- list(
- assistantId: string,
- query: FileListParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.PagePromise {
- if (isRequestOptions(query)) {
- return this.list(assistantId, {}, query);
- }
- return this._client.getAPIList(`/assistants/${assistantId}/files`, AssistantFilesPage, {
- query,
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-
- /**
- * Delete an assistant file.
- */
- del(
- assistantId: string,
- fileId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.delete(`/assistants/${assistantId}/files/${fileId}`, {
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-}
-
-export class AssistantFilesPage extends CursorPage {}
-
-/**
- * A list of [Files](https://platform.openai.com/docs/api-reference/files) attached
- * to an `assistant`.
- */
-export interface AssistantFile {
- /**
- * The identifier, which can be referenced in API endpoints.
- */
- id: string;
-
- /**
- * The assistant ID that the file is attached to.
- */
- assistant_id: string;
-
- /**
- * The Unix timestamp (in seconds) for when the assistant file was created.
- */
- created_at: number;
-
- /**
- * The object type, which is always `assistant.file`.
- */
- object: 'assistant.file';
-}
-
-/**
- * Deletes the association between the assistant and the file, but does not delete
- * the [File](https://platform.openai.com/docs/api-reference/files) object itself.
- */
-export interface FileDeleteResponse {
- id: string;
-
- deleted: boolean;
-
- object: 'assistant.file.deleted';
-}
-
-export interface FileCreateParams {
- /**
- * A [File](https://platform.openai.com/docs/api-reference/files) ID (with
- * `purpose="assistants"`) that the assistant should use. Useful for tools like
- * `retrieval` and `code_interpreter` that can access files.
- */
- file_id: string;
-}
-
-export interface FileListParams extends CursorPageParams {
- /**
- * A cursor for use in pagination. `before` is an object ID that defines your place
- * in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
- */
- before?: string;
-
- /**
- * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- * order and `desc` for descending order.
- */
- order?: 'asc' | 'desc';
-}
-
-export namespace Files {
- export import AssistantFile = FilesAPI.AssistantFile;
- export import FileDeleteResponse = FilesAPI.FileDeleteResponse;
- export import AssistantFilesPage = FilesAPI.AssistantFilesPage;
- export import FileCreateParams = FilesAPI.FileCreateParams;
- export import FileListParams = FilesAPI.FileListParams;
-}
diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts
deleted file mode 100644
index c191d338b..000000000
--- a/src/resources/beta/assistants/index.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Assistant,
- AssistantDeleted,
- AssistantStreamEvent,
- AssistantTool,
- CodeInterpreterTool,
- FunctionTool,
- MessageStreamEvent,
- RetrievalTool,
- RunStepStreamEvent,
- RunStreamEvent,
- ThreadStreamEvent,
- AssistantCreateParams,
- AssistantUpdateParams,
- AssistantListParams,
- AssistantsPage,
- Assistants,
-} from './assistants';
-export {
- AssistantFile,
- FileDeleteResponse,
- FileCreateParams,
- FileListParams,
- AssistantFilesPage,
- Files,
-} from './files';
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index 8f8148f9b..ff79d5242 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -1,17 +1,26 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
-import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
+import * as AssistantsAPI from 'openai/resources/beta/assistants';
import * as ChatAPI from 'openai/resources/beta/chat/chat';
import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
+import * as VectorStoresAPI from 'openai/resources/beta/vector-stores/vector-stores';
export class Beta extends APIResource {
+ vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client);
chat: ChatAPI.Chat = new ChatAPI.Chat(this._client);
assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client);
threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client);
}
export namespace Beta {
+ export import VectorStores = VectorStoresAPI.VectorStores;
+ export import VectorStore = VectorStoresAPI.VectorStore;
+ export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted;
+ export import VectorStoresPage = VectorStoresAPI.VectorStoresPage;
+ export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams;
+ export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams;
+ export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams;
export import Chat = ChatAPI.Chat;
export import Assistants = AssistantsAPI.Assistants;
export import Assistant = AssistantsAPI.Assistant;
@@ -19,9 +28,9 @@ export namespace Beta {
export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent;
export import AssistantTool = AssistantsAPI.AssistantTool;
export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool;
+ export import FileSearchTool = AssistantsAPI.FileSearchTool;
export import FunctionTool = AssistantsAPI.FunctionTool;
export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent;
- export import RetrievalTool = AssistantsAPI.RetrievalTool;
export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent;
export import RunStreamEvent = AssistantsAPI.RunStreamEvent;
export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent;
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index 54407edb3..029cd084c 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -6,9 +6,9 @@ export {
AssistantStreamEvent,
AssistantTool,
CodeInterpreterTool,
+ FileSearchTool,
FunctionTool,
MessageStreamEvent,
- RetrievalTool,
RunStepStreamEvent,
RunStreamEvent,
ThreadStreamEvent,
@@ -17,7 +17,7 @@ export {
AssistantListParams,
AssistantsPage,
Assistants,
-} from './assistants/index';
+} from './assistants';
export {
AssistantResponseFormat,
AssistantResponseFormatOption,
@@ -37,3 +37,12 @@ export {
} from './threads/index';
export { Beta } from './beta';
export { Chat } from './chat/index';
+export {
+ VectorStore,
+ VectorStoreDeleted,
+ VectorStoreCreateParams,
+ VectorStoreUpdateParams,
+ VectorStoreListParams,
+ VectorStoresPage,
+ VectorStores,
+} from './vector-stores/index';
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index 5f41766a9..d0ebb1798 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -26,7 +26,7 @@ export {
MessageListParams,
MessagesPage,
Messages,
-} from './messages/index';
+} from './messages';
export {
AssistantResponseFormat,
AssistantResponseFormatOption,
diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages.ts
similarity index 89%
rename from src/resources/beta/threads/messages/messages.ts
rename to src/resources/beta/threads/messages.ts
index 28026f3ff..f17b8508d 100644
--- a/src/resources/beta/threads/messages/messages.ts
+++ b/src/resources/beta/threads/messages.ts
@@ -3,13 +3,10 @@
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
-import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
-import * as FilesAPI from 'openai/resources/beta/threads/messages/files';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
export class Messages extends APIResource {
- files: FilesAPI.Files = new FilesAPI.Files(this._client);
-
/**
* Create a message.
*/
@@ -21,7 +18,7 @@ export class Messages extends APIResource {
return this._client.post(`/threads/${threadId}/messages`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -31,7 +28,7 @@ export class Messages extends APIResource {
retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/threads/${threadId}/messages/${messageId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -47,7 +44,7 @@ export class Messages extends APIResource {
return this._client.post(`/threads/${threadId}/messages/${messageId}`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -71,7 +68,7 @@ export class Messages extends APIResource {
return this._client.getAPIList(`/threads/${threadId}/messages`, MessagesPage, {
query,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
}
@@ -81,21 +78,21 @@ export class MessagesPage extends CursorPage {}
/**
* A citation within the message that points to a specific quote from a specific
* File associated with the assistant or the message. Generated when the assistant
- * uses the "retrieval" tool to search files.
+ * uses the "file_search" tool to search files.
*/
export type Annotation = FileCitationAnnotation | FilePathAnnotation;
/**
* A citation within the message that points to a specific quote from a specific
* File associated with the assistant or the message. Generated when the assistant
- * uses the "retrieval" tool to search files.
+ * uses the "file_search" tool to search files.
*/
export type AnnotationDelta = FileCitationDeltaAnnotation | FilePathDeltaAnnotation;
/**
* A citation within the message that points to a specific quote from a specific
* File associated with the assistant or the message. Generated when the assistant
- * uses the "retrieval" tool to search files.
+ * uses the "file_search" tool to search files.
*/
export interface FileCitationAnnotation {
end_index: number;
@@ -132,7 +129,7 @@ export namespace FileCitationAnnotation {
/**
* A citation within the message that points to a specific quote from a specific
* File associated with the assistant or the message. Generated when the assistant
- * uses the "retrieval" tool to search files.
+ * uses the "file_search" tool to search files.
*/
export interface FileCitationDeltaAnnotation {
/**
@@ -302,6 +299,11 @@ export interface Message {
*/
assistant_id: string | null;
+ /**
+ * A list of files attached to the message, and the tools they were added to.
+ */
+ attachments: Array | null;
+
/**
* The Unix timestamp (in seconds) for when the message was completed.
*/
@@ -317,13 +319,6 @@ export interface Message {
*/
created_at: number;
- /**
- * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that
- * the assistant should use. Useful for tools like retrieval and code_interpreter
- * that can access files. A maximum of 10 files can be attached to a message.
- */
- file_ids: Array;
-
/**
* The Unix timestamp (in seconds) for when the message was marked as incomplete.
*/
@@ -373,6 +368,15 @@ export interface Message {
}
export namespace Message {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+
/**
* On an incomplete message, details about why the message is incomplete.
*/
@@ -413,13 +417,6 @@ export interface MessageDelta {
*/
content?: Array;
- /**
- * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that
- * the assistant should use. Useful for tools like retrieval and code_interpreter
- * that can access files. A maximum of 10 files can be attached to a message.
- */
- file_ids?: Array;
-
/**
* The entity that produced the message. One of `user` or `assistant`.
*/
@@ -511,12 +508,9 @@ export interface MessageCreateParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -527,6 +521,17 @@ export interface MessageCreateParams {
metadata?: unknown | null;
}
+export namespace MessageCreateParams {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+}
+
export interface MessageUpdateParams {
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -583,8 +588,4 @@ export namespace Messages {
export import MessageCreateParams = MessagesAPI.MessageCreateParams;
export import MessageUpdateParams = MessagesAPI.MessageUpdateParams;
export import MessageListParams = MessagesAPI.MessageListParams;
- export import Files = FilesAPI.Files;
- export import MessageFile = FilesAPI.MessageFile;
- export import MessageFilesPage = FilesAPI.MessageFilesPage;
- export import FileListParams = FilesAPI.FileListParams;
}
diff --git a/src/resources/beta/threads/messages/files.ts b/src/resources/beta/threads/messages/files.ts
deleted file mode 100644
index 994b09d5f..000000000
--- a/src/resources/beta/threads/messages/files.ts
+++ /dev/null
@@ -1,105 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import * as Core from 'openai/core';
-import { APIResource } from 'openai/resource';
-import { isRequestOptions } from 'openai/core';
-import * as FilesAPI from 'openai/resources/beta/threads/messages/files';
-import { CursorPage, type CursorPageParams } from 'openai/pagination';
-
-export class Files extends APIResource {
- /**
- * Retrieves a message file.
- */
- retrieve(
- threadId: string,
- messageId: string,
- fileId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(`/threads/${threadId}/messages/${messageId}/files/${fileId}`, {
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-
- /**
- * Returns a list of message files.
- */
- list(
- threadId: string,
- messageId: string,
- query?: FileListParams,
- options?: Core.RequestOptions,
- ): Core.PagePromise;
- list(
- threadId: string,
- messageId: string,
- options?: Core.RequestOptions,
- ): Core.PagePromise;
- list(
- threadId: string,
- messageId: string,
- query: FileListParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.PagePromise {
- if (isRequestOptions(query)) {
- return this.list(threadId, messageId, {}, query);
- }
- return this._client.getAPIList(`/threads/${threadId}/messages/${messageId}/files`, MessageFilesPage, {
- query,
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-}
-
-export class MessageFilesPage extends CursorPage {}
-
-/**
- * A list of files attached to a `message`.
- */
-export interface MessageFile {
- /**
- * The identifier, which can be referenced in API endpoints.
- */
- id: string;
-
- /**
- * The Unix timestamp (in seconds) for when the message file was created.
- */
- created_at: number;
-
- /**
- * The ID of the [message](https://platform.openai.com/docs/api-reference/messages)
- * that the [File](https://platform.openai.com/docs/api-reference/files) is
- * attached to.
- */
- message_id: string;
-
- /**
- * The object type, which is always `thread.message.file`.
- */
- object: 'thread.message.file';
-}
-
-export interface FileListParams extends CursorPageParams {
- /**
- * A cursor for use in pagination. `before` is an object ID that defines your place
- * in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
- */
- before?: string;
-
- /**
- * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- * order and `desc` for descending order.
- */
- order?: 'asc' | 'desc';
-}
-
-export namespace Files {
- export import MessageFile = FilesAPI.MessageFile;
- export import MessageFilesPage = FilesAPI.MessageFilesPage;
- export import FileListParams = FilesAPI.FileListParams;
-}
diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts
deleted file mode 100644
index ef446d012..000000000
--- a/src/resources/beta/threads/messages/index.ts
+++ /dev/null
@@ -1,30 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Annotation,
- AnnotationDelta,
- FileCitationAnnotation,
- FileCitationDeltaAnnotation,
- FilePathAnnotation,
- FilePathDeltaAnnotation,
- ImageFile,
- ImageFileContentBlock,
- ImageFileDelta,
- ImageFileDeltaBlock,
- Message,
- MessageContent,
- MessageContentDelta,
- MessageDeleted,
- MessageDelta,
- MessageDeltaEvent,
- Text,
- TextContentBlock,
- TextDelta,
- TextDeltaBlock,
- MessageCreateParams,
- MessageUpdateParams,
- MessageListParams,
- MessagesPage,
- Messages,
-} from './messages';
-export { MessageFile, FileListParams, MessageFilesPage, Files } from './files';
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
index c9b2d1ef5..d216195cb 100644
--- a/src/resources/beta/threads/runs/index.ts
+++ b/src/resources/beta/threads/runs/index.ts
@@ -5,11 +5,11 @@ export {
CodeInterpreterOutputImage,
CodeInterpreterToolCall,
CodeInterpreterToolCallDelta,
+ FileSearchToolCall,
+ FileSearchToolCallDelta,
FunctionToolCall,
FunctionToolCallDelta,
MessageCreationStepDetails,
- RetrievalToolCall,
- RetrievalToolCallDelta,
RunStep,
RunStepDelta,
RunStepDeltaEvent,
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 4cfa6c36e..9e42f8a20 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -8,7 +8,7 @@ import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/Assistant
import { sleep } from 'openai/core';
import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
-import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
+import * as AssistantsAPI from 'openai/resources/beta/assistants';
import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
@@ -39,7 +39,7 @@ export class Runs extends APIResource {
return this._client.post(`/threads/${threadId}/runs`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
stream: body.stream ?? false,
}) as APIPromise | APIPromise>;
}
@@ -50,7 +50,7 @@ export class Runs extends APIResource {
retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/threads/${threadId}/runs/${runId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -66,7 +66,7 @@ export class Runs extends APIResource {
return this._client.post(`/threads/${threadId}/runs/${runId}`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -90,7 +90,7 @@ export class Runs extends APIResource {
return this._client.getAPIList(`/threads/${threadId}/runs`, RunsPage, {
query,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -100,7 +100,7 @@ export class Runs extends APIResource {
cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -224,7 +224,7 @@ export class Runs extends APIResource {
return this._client.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
stream: body.stream ?? false,
}) as APIPromise | APIPromise>;
}
@@ -350,13 +350,6 @@ export interface Run {
*/
failed_at: number | null;
- /**
- * The list of [File](https://platform.openai.com/docs/api-reference/files) IDs the
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
- * this run.
- */
- file_ids: Array;
-
/**
* Details on why the run is incomplete. Will be `null` if the run is not
* incomplete.
@@ -478,6 +471,11 @@ export interface Run {
* The sampling temperature used for this run. If not set, defaults to 1.
*/
temperature?: number | null;
+
+ /**
+ * The nucleus sampling value used for this run. If not set, defaults to 1.
+ */
+ top_p?: number | null;
}
export namespace Run {
@@ -720,6 +718,13 @@ export interface RunCreateParamsBase {
*/
tools?: Array | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: RunCreateParams.TruncationStrategy | null;
}
@@ -741,12 +746,9 @@ export namespace RunCreateParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -757,6 +759,17 @@ export namespace RunCreateParams {
metadata?: unknown | null;
}
+ export namespace AdditionalMessage {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
export interface TruncationStrategy {
/**
* The truncation strategy to use for the thread. The default is `auto`. If set to
@@ -943,6 +956,13 @@ export interface RunCreateAndPollParams {
*/
tools?: Array | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null;
}
@@ -964,12 +984,9 @@ export namespace RunCreateAndPollParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -980,6 +997,17 @@ export namespace RunCreateAndPollParams {
metadata?: unknown | null;
}
+ export namespace AdditionalMessage {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
export interface TruncationStrategy {
/**
* The truncation strategy to use for the thread. The default is `auto`. If set to
@@ -1119,6 +1147,13 @@ export interface RunCreateAndStreamParams {
*/
tools?: Array | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null;
}
@@ -1140,12 +1175,9 @@ export namespace RunCreateAndStreamParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -1156,6 +1188,17 @@ export namespace RunCreateAndStreamParams {
metadata?: unknown | null;
}
+ export namespace AdditionalMessage {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
export interface TruncationStrategy {
/**
* The truncation strategy to use for the thread. The default is `auto`. If set to
@@ -1295,6 +1338,13 @@ export interface RunStreamParams {
*/
tools?: Array | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: RunStreamParams.TruncationStrategy | null;
}
@@ -1316,12 +1366,9 @@ export namespace RunStreamParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -1332,6 +1379,17 @@ export namespace RunStreamParams {
metadata?: unknown | null;
}
+ export namespace AdditionalMessage {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
export interface TruncationStrategy {
/**
* The truncation strategy to use for the thread. The default is `auto`. If set to
@@ -1470,11 +1528,11 @@ export namespace Runs {
export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage;
export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall;
export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta;
+ export import FileSearchToolCall = StepsAPI.FileSearchToolCall;
+ export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta;
export import FunctionToolCall = StepsAPI.FunctionToolCall;
export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta;
export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
- export import RetrievalToolCall = StepsAPI.RetrievalToolCall;
- export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta;
export import RunStep = StepsAPI.RunStep;
export import RunStepDelta = StepsAPI.RunStepDelta;
export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent;
diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts
index f0816fdb2..203741f4b 100644
--- a/src/resources/beta/threads/runs/steps.ts
+++ b/src/resources/beta/threads/runs/steps.ts
@@ -18,7 +18,7 @@ export class Steps extends APIResource {
): Core.APIPromise {
return this._client.get(`/threads/${threadId}/runs/${runId}/steps/${stepId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -48,7 +48,7 @@ export class Steps extends APIResource {
return this._client.getAPIList(`/threads/${threadId}/runs/${runId}/steps`, RunStepsPage, {
query,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
}
@@ -220,6 +220,47 @@ export namespace CodeInterpreterToolCallDelta {
}
}
+export interface FileSearchToolCall {
+ /**
+ * The ID of the tool call object.
+ */
+ id: string;
+
+ /**
+ * For now, this is always going to be an empty object.
+ */
+ file_search: unknown;
+
+ /**
+ * The type of tool call. This is always going to be `file_search` for this type of
+ * tool call.
+ */
+ type: 'file_search';
+}
+
+export interface FileSearchToolCallDelta {
+ /**
+ * For now, this is always going to be an empty object.
+ */
+ file_search: unknown;
+
+ /**
+ * The index of the tool call in the tool calls array.
+ */
+ index: number;
+
+ /**
+ * The type of tool call. This is always going to be `file_search` for this type of
+ * tool call.
+ */
+ type: 'file_search';
+
+ /**
+ * The ID of the tool call object.
+ */
+ id?: string;
+}
+
export interface FunctionToolCall {
/**
* The ID of the tool call object.
@@ -330,47 +371,6 @@ export namespace MessageCreationStepDetails {
}
}
-export interface RetrievalToolCall {
- /**
- * The ID of the tool call object.
- */
- id: string;
-
- /**
- * For now, this is always going to be an empty object.
- */
- retrieval: unknown;
-
- /**
- * The type of tool call. This is always going to be `retrieval` for this type of
- * tool call.
- */
- type: 'retrieval';
-}
-
-export interface RetrievalToolCallDelta {
- /**
- * The index of the tool call in the tool calls array.
- */
- index: number;
-
- /**
- * The type of tool call. This is always going to be `retrieval` for this type of
- * tool call.
- */
- type: 'retrieval';
-
- /**
- * The ID of the tool call object.
- */
- id?: string;
-
- /**
- * For now, this is always going to be an empty object.
- */
- retrieval?: unknown;
-}
-
/**
* Represents a step in execution of a run.
*/
@@ -561,12 +561,12 @@ export namespace RunStepDeltaMessageDelta {
/**
* Details of the Code Interpreter tool call the run step was involved in.
*/
-export type ToolCall = CodeInterpreterToolCall | RetrievalToolCall | FunctionToolCall;
+export type ToolCall = CodeInterpreterToolCall | FileSearchToolCall | FunctionToolCall;
/**
* Details of the Code Interpreter tool call the run step was involved in.
*/
-export type ToolCallDelta = CodeInterpreterToolCallDelta | RetrievalToolCallDelta | FunctionToolCallDelta;
+export type ToolCallDelta = CodeInterpreterToolCallDelta | FileSearchToolCallDelta | FunctionToolCallDelta;
/**
* Details of the tool call.
@@ -579,7 +579,7 @@ export interface ToolCallDeltaObject {
/**
* An array of tool calls the run step was involved in. These can be associated
- * with one of three types of tools: `code_interpreter`, `retrieval`, or
+ * with one of three types of tools: `code_interpreter`, `file_search`, or
* `function`.
*/
tool_calls?: Array;
@@ -591,7 +591,7 @@ export interface ToolCallDeltaObject {
export interface ToolCallsStepDetails {
/**
* An array of tool calls the run step was involved in. These can be associated
- * with one of three types of tools: `code_interpreter`, `retrieval`, or
+ * with one of three types of tools: `code_interpreter`, `file_search`, or
* `function`.
*/
tool_calls: Array;
@@ -623,11 +623,11 @@ export namespace Steps {
export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage;
export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall;
export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta;
+ export import FileSearchToolCall = StepsAPI.FileSearchToolCall;
+ export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta;
export import FunctionToolCall = StepsAPI.FunctionToolCall;
export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta;
export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
- export import RetrievalToolCall = StepsAPI.RetrievalToolCall;
- export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta;
export import RunStep = StepsAPI.RunStep;
export import RunStepDelta = StepsAPI.RunStepDelta;
export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent;
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 29682c308..f3590ed80 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -6,8 +6,8 @@ import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from 'openai/lib/AssistantStream';
import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
-import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
-import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
+import * as AssistantsAPI from 'openai/resources/beta/assistants';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
import { Stream } from 'openai/streaming';
@@ -30,7 +30,7 @@ export class Threads extends APIResource {
return this._client.post('/threads', {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -40,7 +40,7 @@ export class Threads extends APIResource {
retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/threads/${threadId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -51,7 +51,7 @@ export class Threads extends APIResource {
return this._client.post(`/threads/${threadId}`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -61,7 +61,7 @@ export class Threads extends APIResource {
del(threadId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.delete(`/threads/${threadId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -87,7 +87,7 @@ export class Threads extends APIResource {
return this._client.post('/threads/runs', {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
stream: body.stream ?? false,
}) as APIPromise | APIPromise>;
}
@@ -154,7 +154,7 @@ export interface AssistantToolChoice {
/**
* The type of the tool. If type is `function`, the function name must be set
*/
- type: 'function' | 'code_interpreter' | 'retrieval';
+ type: 'function' | 'code_interpreter' | 'file_search';
function?: AssistantToolChoiceFunction;
}
@@ -203,6 +203,49 @@ export interface Thread {
* The object type, which is always `thread`.
*/
object: 'thread';
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ tool_resources: Thread.ToolResources | null;
+}
+
+export namespace Thread {
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this thread. There can be a maximum of 1 vector store attached to
+ * the thread.
+ */
+ vector_store_ids?: Array;
+ }
+ }
}
export interface ThreadDeleted {
@@ -227,6 +270,14 @@ export interface ThreadCreateParams {
* characters long.
*/
metadata?: unknown | null;
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ tool_resources?: ThreadCreateParams.ToolResources | null;
}
export namespace ThreadCreateParams {
@@ -247,12 +298,9 @@ export namespace ThreadCreateParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -262,6 +310,77 @@ export namespace ThreadCreateParams {
*/
metadata?: unknown | null;
}
+
+ export namespace Message {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this thread. There can be a maximum of 1 vector store attached to
+ * the thread.
+ */
+ vector_store_ids?: Array;
+
+ /**
+ * A helper to create a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ * store attached to the thread.
+ */
+ vector_stores?: Array;
+ }
+
+ export namespace FileSearch {
+ export interface VectorStore {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ * add to the vector store. There can be a maximum of 10000 files in a vector
+ * store.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to a vector store. This can be
+ * useful for storing additional information about the vector store in a structured
+ * format. Keys can be a maximum of 64 characters long and values can be a maxium
+ * of 512 characters long.
+ */
+ metadata?: unknown;
+ }
+ }
+ }
}
export interface ThreadUpdateParams {
@@ -272,6 +391,49 @@ export interface ThreadUpdateParams {
* characters long.
*/
metadata?: unknown | null;
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ tool_resources?: ThreadUpdateParams.ToolResources | null;
+}
+
+export namespace ThreadUpdateParams {
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this thread. There can be a maximum of 1 vector store attached to
+ * the thread.
+ */
+ vector_store_ids?: Array;
+ }
+ }
}
export type ThreadCreateAndRunParams =
@@ -296,7 +458,7 @@ export interface ThreadCreateAndRunParamsBase {
* The maximum number of completion tokens that may be used over the course of the
* run. The run will make a best effort to use only the number of completion tokens
* specified, across multiple turns of the run. If the run exceeds the number of
- * completion tokens specified, the run will end with status `complete`. See
+ * completion tokens specified, the run will end with status `incomplete`. See
* `incomplete_details` for more info.
*/
max_completion_tokens?: number | null;
@@ -305,7 +467,7 @@ export interface ThreadCreateAndRunParamsBase {
* The maximum number of prompt tokens that may be used over the course of the run.
* The run will make a best effort to use only the number of prompt tokens
* specified, across multiple turns of the run. If the run exceeds the number of
- * prompt tokens specified, the run will end with status `complete`. See
+ * prompt tokens specified, the run will end with status `incomplete`. See
* `incomplete_details` for more info.
*/
max_prompt_tokens?: number | null;
@@ -393,14 +555,29 @@ export interface ThreadCreateAndRunParamsBase {
*/
tool_choice?: AssistantToolChoiceOption | null;
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: ThreadCreateAndRunParams.ToolResources | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array<
- AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool
> | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null;
}
@@ -422,6 +599,14 @@ export namespace ThreadCreateAndRunParams {
* characters long.
*/
metadata?: unknown | null;
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ tool_resources?: Thread.ToolResources | null;
}
export namespace Thread {
@@ -442,12 +627,9 @@ export namespace ThreadCreateAndRunParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -457,6 +639,110 @@ export namespace ThreadCreateAndRunParams {
*/
metadata?: unknown | null;
}
+
+ export namespace Message {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this thread. There can be a maximum of 1 vector store attached to
+ * the thread.
+ */
+ vector_store_ids?: Array;
+
+ /**
+ * A helper to create a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ * store attached to the thread.
+ */
+ vector_stores?: Array;
+ }
+
+ export namespace FileSearch {
+ export interface VectorStore {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ * add to the vector store. There can be a maximum of 10000 files in a vector
+ * store.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to a vector store. This can be
+ * useful for storing additional information about the vector store in a structured
+ * format. Keys can be a maximum of 64 characters long and values can be a maxium
+ * of 512 characters long.
+ */
+ metadata?: unknown;
+ }
+ }
+ }
+ }
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
}
export interface TruncationStrategy {
@@ -515,7 +801,7 @@ export interface ThreadCreateAndRunPollParams {
* The maximum number of completion tokens that may be used over the course of the
* run. The run will make a best effort to use only the number of completion tokens
* specified, across multiple turns of the run. If the run exceeds the number of
- * completion tokens specified, the run will end with status `complete`. See
+ * completion tokens specified, the run will end with status `incomplete`. See
* `incomplete_details` for more info.
*/
max_completion_tokens?: number | null;
@@ -524,7 +810,7 @@ export interface ThreadCreateAndRunPollParams {
* The maximum number of prompt tokens that may be used over the course of the run.
* The run will make a best effort to use only the number of prompt tokens
* specified, across multiple turns of the run. If the run exceeds the number of
- * prompt tokens specified, the run will end with status `complete`. See
+ * prompt tokens specified, the run will end with status `incomplete`. See
* `incomplete_details` for more info.
*/
max_prompt_tokens?: number | null;
@@ -605,14 +891,29 @@ export interface ThreadCreateAndRunPollParams {
*/
tool_choice?: AssistantToolChoiceOption | null;
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: ThreadCreateAndRunPollParams.ToolResources | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array<
- AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool
> | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: ThreadCreateAndRunPollParams.TruncationStrategy | null;
}
@@ -634,6 +935,14 @@ export namespace ThreadCreateAndRunPollParams {
* characters long.
*/
metadata?: unknown | null;
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ tool_resources?: Thread.ToolResources | null;
}
export namespace Thread {
@@ -654,12 +963,9 @@ export namespace ThreadCreateAndRunPollParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -669,6 +975,110 @@ export namespace ThreadCreateAndRunPollParams {
*/
metadata?: unknown | null;
}
+
+ export namespace Message {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this thread. There can be a maximum of 1 vector store attached to
+ * the thread.
+ */
+ vector_store_ids?: Array;
+
+ /**
+ * A helper to create a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ * store attached to the thread.
+ */
+ vector_stores?: Array;
+ }
+
+ export namespace FileSearch {
+ export interface VectorStore {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ * add to the vector store. There can be a maximum of 10000 files in a vector
+ * store.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to a vector store. This can be
+ * useful for storing additional information about the vector store in a structured
+ * format. Keys can be a maximum of 64 characters long and values can be a maxium
+ * of 512 characters long.
+ */
+ metadata?: unknown;
+ }
+ }
+ }
+ }
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
}
export interface TruncationStrategy {
@@ -706,7 +1116,7 @@ export interface ThreadCreateAndRunStreamParams {
* The maximum number of completion tokens that may be used over the course of the
* run. The run will make a best effort to use only the number of completion tokens
* specified, across multiple turns of the run. If the run exceeds the number of
- * completion tokens specified, the run will end with status `complete`. See
+ * completion tokens specified, the run will end with status `incomplete`. See
* `incomplete_details` for more info.
*/
max_completion_tokens?: number | null;
@@ -715,7 +1125,7 @@ export interface ThreadCreateAndRunStreamParams {
* The maximum number of prompt tokens that may be used over the course of the run.
* The run will make a best effort to use only the number of prompt tokens
* specified, across multiple turns of the run. If the run exceeds the number of
- * prompt tokens specified, the run will end with status `complete`. See
+ * prompt tokens specified, the run will end with status `incomplete`. See
* `incomplete_details` for more info.
*/
max_prompt_tokens?: number | null;
@@ -796,14 +1206,29 @@ export interface ThreadCreateAndRunStreamParams {
*/
tool_choice?: AssistantToolChoiceOption | null;
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: ThreadCreateAndRunStreamParams.ToolResources | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array<
- AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool
> | null;
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ */
+ top_p?: number | null;
+
truncation_strategy?: ThreadCreateAndRunStreamParams.TruncationStrategy | null;
}
@@ -825,6 +1250,14 @@ export namespace ThreadCreateAndRunStreamParams {
* characters long.
*/
metadata?: unknown | null;
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ tool_resources?: Thread.ToolResources | null;
}
export namespace Thread {
@@ -845,12 +1278,9 @@ export namespace ThreadCreateAndRunStreamParams {
role: 'user' | 'assistant';
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- * the message should use. There can be a maximum of 10 files attached to a
- * message. Useful for tools like `retrieval` and `code_interpreter` that can
- * access and use files.
+ * A list of files attached to the message, and the tools they should be added to.
*/
- file_ids?: Array;
+ attachments?: Array | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -860,6 +1290,110 @@ export namespace ThreadCreateAndRunStreamParams {
*/
metadata?: unknown | null;
}
+
+ export namespace Message {
+ export interface Attachment {
+ add_to?: Array<'file_search' | 'code_interpreter'>;
+
+ /**
+ * The ID of the file to attach to the message.
+ */
+ file_id?: string;
+ }
+ }
+
+ /**
+ * A set of resources that are made available to the assistant's tools in this
+ * thread. The resources are specific to the type of tool. For example, the
+ * `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ * tool requires a list of vector store IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this thread. There can be a maximum of 1 vector store attached to
+ * the thread.
+ */
+ vector_store_ids?: Array;
+
+ /**
+ * A helper to create a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ * store attached to the thread.
+ */
+ vector_stores?: Array;
+ }
+
+ export namespace FileSearch {
+ export interface VectorStore {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ * add to the vector store. There can be a maximum of 10000 files in a vector
+ * store.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to a vector store. This can be
+ * useful for storing additional information about the vector store in a structured
+ * format. Keys can be a maximum of 64 characters long and values can be a maxium
+ * of 512 characters long.
+ */
+ metadata?: unknown;
+ }
+ }
+ }
+ }
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
}
export interface TruncationStrategy {
diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts
new file mode 100644
index 000000000..3ccdd0108
--- /dev/null
+++ b/src/resources/beta/vector-stores/file-batches.ts
@@ -0,0 +1,292 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import { sleep } from 'openai/core';
+import { Uploadable } from 'openai/core';
+import { allSettledWithThrow } from 'openai/lib/Util';
+import * as FileBatchesAPI from 'openai/resources/beta/vector-stores/file-batches';
+import * as FilesAPI from 'openai/resources/beta/vector-stores/files';
+import { VectorStoreFilesPage } from 'openai/resources/beta/vector-stores/files';
+import { type CursorPageParams } from 'openai/pagination';
+
+export class FileBatches extends APIResource {
+ /**
+ * Create a vector store file batch.
+ */
+ create(
+ vectorStoreId: string,
+ body: FileBatchCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/vector_stores/${vectorStoreId}/file_batches`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves a vector store file batch.
+ */
+ retrieve(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/vector_stores/${vectorStoreId}/file_batches/${batchId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Cancel a vector store file batch. This attempts to cancel the processing of
+ * files in this batch as soon as possible.
+ */
+ cancel(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Create a vector store batch and poll until all files have been processed.
+ */
+ async createAndPoll(
+ vectorStoreId: string,
+ body: FileBatchCreateParams,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const batch = await this.create(vectorStoreId, body);
+ return await this.poll(vectorStoreId, batch.id, options);
+ }
+
+ /**
+ * Returns a list of vector store files in a batch.
+ */
+ listFiles(
+ vectorStoreId: string,
+ batchId: string,
+ query?: FileBatchListFilesParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ listFiles(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ listFiles(
+ vectorStoreId: string,
+ batchId: string,
+ query: FileBatchListFilesParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.listFiles(vectorStoreId, batchId, {}, query);
+ }
+ return this._client.getAPIList(
+ `/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`,
+ VectorStoreFilesPage,
+ { query, ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } },
+ );
+ }
+
+ /**
+ * Wait for the given file batch to be processed.
+ *
+ * Note: this will return even if one of the files failed to process, you need to
+ * check batch.file_counts.failed_count to handle this case.
+ */
+ async poll(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };
+ if (options?.pollIntervalMs) {
+ headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();
+ }
+
+ while (true) {
+ const { data: batch, response } = await this.retrieve(vectorStoreId, batchId, {
+ ...options,
+ headers,
+ }).withResponse();
+
+ switch (batch.status) {
+ case 'in_progress':
+ let sleepInterval = 5000;
+
+ if (options?.pollIntervalMs) {
+ sleepInterval = options.pollIntervalMs;
+ } else {
+ const headerInterval = response.headers.get('openai-poll-after-ms');
+ if (headerInterval) {
+ const headerIntervalMs = parseInt(headerInterval);
+ if (!isNaN(headerIntervalMs)) {
+ sleepInterval = headerIntervalMs;
+ }
+ }
+ }
+ await sleep(sleepInterval);
+ break;
+ case 'failed':
+ case 'completed':
+ return batch;
+ }
+ }
+ }
+
+ /**
+ * Uploads the given files concurrently and then creates a vector store file batch.
+ *
+ * The concurrency limit is configurable using the `maxConcurrency` parameter.
+ */
+ async uploadAndPoll(
+ vectorStoreId: string,
+ { files, fileIds = [] }: { files: Uploadable[]; fileIds?: string[] },
+ options?: Core.RequestOptions & { pollIntervalMs?: number; maxConcurrency?: number },
+ ): Promise {
+ if (files === null || files.length == 0) {
+ throw new Error('No files provided to process.');
+ }
+
+ const configuredConcurrency = options?.maxConcurrency ?? 5;
+ //We cap the number of workers at the number of files (so we don't start any unnecessary workers)
+ const concurrencyLimit = Math.min(configuredConcurrency, files.length);
+
+ const client = this._client;
+ const fileIterator = files.values();
+ const allFileIds: string[] = [...fileIds];
+
+ //This code is based on this design. The libraries don't accommodate our environment limits.
+ // https://stackoverflow.com/questions/40639432/what-is-the-best-way-to-limit-concurrency-when-using-es6s-promise-all
+ async function processFiles(iterator: IterableIterator) {
+ for (let item of iterator) {
+ const fileObj = await client.files.create({ file: item, purpose: 'assistants' }, options);
+ allFileIds.push(fileObj.id);
+ }
+ }
+
+ //Start workers to process results
+ const workers = Array(concurrencyLimit).fill(fileIterator).map(processFiles);
+
+ //Wait for all processing to complete.
+ await allSettledWithThrow(workers);
+
+ return await this.createAndPoll(vectorStoreId, {
+ file_ids: allFileIds,
+ });
+ }
+}
+
+/**
+ * A batch of files attached to a vector store.
+ */
+export interface VectorStoreFileBatch {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the vector store files batch was
+ * created.
+ */
+ created_at: number;
+
+ file_counts: VectorStoreFileBatch.FileCounts;
+
+ /**
+ * The object type, which is always `vector_store.file_batch`.
+ */
+ object: 'vector_store.files_batch';
+
+ /**
+ * The status of the vector store files batch, which can be either `in_progress`,
+ * `completed`, `cancelled` or `failed`.
+ */
+ status: 'in_progress' | 'completed' | 'cancelled' | 'failed';
+
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * that the [File](https://platform.openai.com/docs/api-reference/files) is
+ * attached to.
+ */
+ vector_store_id: string;
+}
+
+export namespace VectorStoreFileBatch {
+ export interface FileCounts {
+ /**
+ * The number of files that where cancelled.
+ */
+ cancelled: number;
+
+ /**
+ * The number of files that have been processed.
+ */
+ completed: number;
+
+ /**
+ * The number of files that have failed to process.
+ */
+ failed: number;
+
+ /**
+ * The number of files that are currently being processed.
+ */
+ in_progress: number;
+
+ /**
+ * The total number of files.
+ */
+ total: number;
+ }
+}
+
+export interface FileBatchCreateParams {
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the vector store should use. Useful for tools like `file_search` that can access
+ * files.
+ */
+ file_ids: Array;
+}
+
+export interface FileBatchListFilesParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+ */
+ filter?: 'in_progress' | 'completed' | 'failed' | 'cancelled';
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace FileBatches {
+ export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch;
+ export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams;
+ export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams;
+}
+
+export { VectorStoreFilesPage };
diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts
new file mode 100644
index 000000000..40b97e9a9
--- /dev/null
+++ b/src/resources/beta/vector-stores/files.ts
@@ -0,0 +1,277 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import { sleep, Uploadable } from 'openai/core';
+import * as FilesAPI from 'openai/resources/beta/vector-stores/files';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Files extends APIResource {
+ /**
+ * Create a vector store file by attaching a
+ * [File](https://platform.openai.com/docs/api-reference/files) to a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
+ */
+ create(
+ vectorStoreId: string,
+ body: FileCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/vector_stores/${vectorStoreId}/files`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves a vector store file.
+ */
+ retrieve(
+ vectorStoreId: string,
+ fileId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/vector_stores/${vectorStoreId}/files/${fileId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of vector store files.
+ */
+ list(
+ vectorStoreId: string,
+ query?: FileListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ vectorStoreId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ vectorStoreId: string,
+ query: FileListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(vectorStoreId, {}, query);
+ }
+ return this._client.getAPIList(`/vector_stores/${vectorStoreId}/files`, VectorStoreFilesPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Delete a vector store file. This will remove the file from the vector store but
+ * the file itself will not be deleted. To delete the file, use the
+ * [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+ * endpoint.
+ */
+ del(
+ vectorStoreId: string,
+ fileId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.delete(`/vector_stores/${vectorStoreId}/files/${fileId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Attach a file to the given vector store and wait for it to be processed.
+ */
+ async createAndPoll(
+ vectorStoreId: string,
+ body: FileCreateParams,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const file = await this.create(vectorStoreId, body, options);
+ return await this.poll(vectorStoreId, file.id, options);
+ }
+
+ /**
+ * Wait for the vector store file to finish processing.
+ *
+ * Note: this will return even if the file failed to process, you need to check
+ * file.last_error and file.status to handle these cases
+ */
+ async poll(
+ vectorStoreId: string,
+ fileId: string,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };
+ if (options?.pollIntervalMs) {
+ headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();
+ }
+ while (true) {
+ const fileResponse = await this.retrieve(vectorStoreId, fileId, {
+ ...options,
+ headers,
+ }).withResponse();
+
+ const file = fileResponse.data;
+
+ switch (file.status) {
+ case 'in_progress':
+ let sleepInterval = 5000;
+
+ if (options?.pollIntervalMs) {
+ sleepInterval = options.pollIntervalMs;
+ } else {
+ const headerInterval = fileResponse.response.headers.get('openai-poll-after-ms');
+ if (headerInterval) {
+ const headerIntervalMs = parseInt(headerInterval);
+ if (!isNaN(headerIntervalMs)) {
+ sleepInterval = headerIntervalMs;
+ }
+ }
+ }
+ await sleep(sleepInterval);
+ break;
+ case 'failed':
+ case 'completed':
+ return file;
+ }
+ }
+ }
+
+ /**
+ * Upload a file to the `files` API and then attach it to the given vector store.
+ * Note the file will be asynchronously processed (you can use the alternative
+ * polling helper method to wait for processing to complete).
+ */
+ async upload(
+ vectorStoreId: string,
+ file: Uploadable,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options);
+ return this.create(vectorStoreId, { file_id: fileInfo.id }, options);
+ }
+
+ /**
+ * Add a file to a vector store and poll until processing is complete.
+ */
+ async uploadAndPoll(
+ vectorStoreId: string,
+ file: Uploadable,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options);
+ return await this.poll(vectorStoreId, fileInfo.id, options);
+ }
+}
+
+export class VectorStoreFilesPage extends CursorPage {}
+
+/**
+ * A list of files attached to a vector store.
+ */
+export interface VectorStoreFile {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the vector store file was created.
+ */
+ created_at: number;
+
+ /**
+ * The last error associated with this vector store file. Will be `null` if there
+ * are no errors.
+ */
+ last_error: VectorStoreFile.LastError | null;
+
+ /**
+ * The object type, which is always `vector_store.file`.
+ */
+ object: 'vector_store.file';
+
+ /**
+ * The status of the vector store file, which can be either `in_progress`,
+ * `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
+ * vector store file is ready for use.
+ */
+ status: 'in_progress' | 'completed' | 'cancelled' | 'failed';
+
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * that the [File](https://platform.openai.com/docs/api-reference/files) is
+ * attached to.
+ */
+ vector_store_id: string;
+}
+
+export namespace VectorStoreFile {
+ /**
+ * The last error associated with this vector store file. Will be `null` if there
+ * are no errors.
+ */
+ export interface LastError {
+ /**
+ * One of `server_error` or `rate_limit_exceeded`.
+ */
+ code: 'internal_error' | 'file_not_found' | 'parsing_error' | 'unhandled_mime_type';
+
+ /**
+ * A human-readable description of the error.
+ */
+ message: string;
+ }
+}
+
+export interface VectorStoreFileDeleted {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'vector_store.file.deleted';
+}
+
+export interface FileCreateParams {
+ /**
+ * A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ * vector store should use. Useful for tools like `file_search` that can access
+ * files.
+ */
+ file_id: string;
+}
+
+export interface FileListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+ */
+ filter?: 'in_progress' | 'completed' | 'failed' | 'cancelled';
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace Files {
+ export import VectorStoreFile = FilesAPI.VectorStoreFile;
+ export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted;
+ export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage;
+ export import FileCreateParams = FilesAPI.FileCreateParams;
+ export import FileListParams = FilesAPI.FileListParams;
+}
diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts
new file mode 100644
index 000000000..8fb787ccd
--- /dev/null
+++ b/src/resources/beta/vector-stores/index.ts
@@ -0,0 +1,25 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ VectorStore,
+ VectorStoreDeleted,
+ VectorStoreCreateParams,
+ VectorStoreUpdateParams,
+ VectorStoreListParams,
+ VectorStoresPage,
+ VectorStores,
+} from './vector-stores';
+export {
+ VectorStoreFile,
+ VectorStoreFileDeleted,
+ FileCreateParams,
+ FileListParams,
+ VectorStoreFilesPage,
+ Files,
+} from './files';
+export {
+ VectorStoreFileBatch,
+ FileBatchCreateParams,
+ FileBatchListFilesParams,
+ FileBatches,
+} from './file-batches';
diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts
new file mode 100644
index 000000000..892d06aa4
--- /dev/null
+++ b/src/resources/beta/vector-stores/vector-stores.ts
@@ -0,0 +1,318 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as VectorStoresAPI from 'openai/resources/beta/vector-stores/vector-stores';
+import * as FileBatchesAPI from 'openai/resources/beta/vector-stores/file-batches';
+import * as FilesAPI from 'openai/resources/beta/vector-stores/files';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class VectorStores extends APIResource {
+ files: FilesAPI.Files = new FilesAPI.Files(this._client);
+ fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client);
+
+ /**
+ * Create a vector store.
+ */
+ create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/vector_stores', {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves a vector store.
+ */
+ retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/vector_stores/${vectorStoreId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Modifies a vector store.
+ */
+ update(
+ vectorStoreId: string,
+ body: VectorStoreUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/vector_stores/${vectorStoreId}`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of vector stores.
+ */
+ list(
+ query?: VectorStoreListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ query: VectorStoreListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this._client.getAPIList('/vector_stores', VectorStoresPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+
+ /**
+ * Delete a vector store.
+ */
+ del(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.delete(`/vector_stores/${vectorStoreId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
+ });
+ }
+}
+
+export class VectorStoresPage extends CursorPage {}
+
+/**
+ * A vector store is a collection of processed files can be used by the
+ * `file_search` tool.
+ */
+export interface VectorStore {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The byte size of the vector store.
+ */
+ bytes: number;
+
+ /**
+ * The Unix timestamp (in seconds) for when the vector store was created.
+ */
+ created_at: number;
+
+ file_counts: VectorStore.FileCounts;
+
+ /**
+ * The Unix timestamp (in seconds) for when the vector store was last active.
+ */
+ last_active_at: number | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata: unknown | null;
+
+ /**
+ * The name of the vector store.
+ */
+ name: string;
+
+ /**
+ * The object type, which is always `vector_store`.
+ */
+ object: 'vector_store';
+
+ /**
+ * The status of the vector store, which can be either `expired`, `in_progress`, or
+ * `completed`. A status of `completed` indicates that the vector store is ready
+ * for use.
+ */
+ status: 'expired' | 'in_progress' | 'completed';
+
+ /**
+ * The expiration policy for a vector store.
+ */
+ expires_after?: VectorStore.ExpiresAfter;
+
+ /**
+ * The Unix timestamp (in seconds) for when the vector store will expire.
+ */
+ expires_at?: number | null;
+}
+
+export namespace VectorStore {
+ export interface FileCounts {
+ /**
+ * The number of files that were cancelled.
+ */
+ cancelled: number;
+
+ /**
+ * The number of files that have been successfully processed.
+ */
+ completed: number;
+
+ /**
+ * The number of files that have failed to process.
+ */
+ failed: number;
+
+ /**
+ * The number of files that are currently being processed.
+ */
+ in_progress: number;
+
+ /**
+ * The total number of files.
+ */
+ total: number;
+ }
+
+ /**
+ * The expiration policy for a vector store.
+ */
+ export interface ExpiresAfter {
+ /**
+ * Anchor timestamp after which the expiration policy applies. Supported anchors:
+ * `last_active_at`.
+ */
+ anchor: 'last_active_at';
+
+ /**
+ * The number of days after the anchor time that the vector store will expire.
+ */
+ days: number;
+ }
+}
+
+export interface VectorStoreDeleted {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'vector_store.deleted';
+}
+
+export interface VectorStoreCreateParams {
+ /**
+ * The expiration policy for a vector store.
+ */
+ expires_after?: VectorStoreCreateParams.ExpiresAfter;
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the vector store should use. Useful for tools like `file_search` that can access
+ * files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The name of the vector store.
+ */
+ name?: string;
+}
+
+export namespace VectorStoreCreateParams {
+ /**
+ * The expiration policy for a vector store.
+ */
+ export interface ExpiresAfter {
+ /**
+ * Anchor timestamp after which the expiration policy applies. Supported anchors:
+ * `last_active_at`.
+ */
+ anchor: 'last_active_at';
+
+ /**
+ * The number of days after the anchor time that the vector store will expire.
+ */
+ days: number;
+ }
+}
+
+export interface VectorStoreUpdateParams {
+ /**
+ * The expiration policy for a vector store.
+ */
+ expires_after?: VectorStoreUpdateParams.ExpiresAfter | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The name of the vector store.
+ */
+ name?: string | null;
+}
+
+export namespace VectorStoreUpdateParams {
+ /**
+ * The expiration policy for a vector store.
+ */
+ export interface ExpiresAfter {
+ /**
+ * Anchor timestamp after which the expiration policy applies. Supported anchors:
+ * `last_active_at`.
+ */
+ anchor: 'last_active_at';
+
+ /**
+ * The number of days after the anchor time that the vector store will expire.
+ */
+ days: number;
+ }
+}
+
+export interface VectorStoreListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace VectorStores {
+ export import VectorStore = VectorStoresAPI.VectorStore;
+ export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted;
+ export import VectorStoresPage = VectorStoresAPI.VectorStoresPage;
+ export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams;
+ export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams;
+ export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams;
+ export import Files = FilesAPI.Files;
+ export import VectorStoreFile = FilesAPI.VectorStoreFile;
+ export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted;
+ export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage;
+ export import FileCreateParams = FilesAPI.FileCreateParams;
+ export import FileListParams = FilesAPI.FileListParams;
+ export import FileBatches = FileBatchesAPI.FileBatches;
+ export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch;
+ export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams;
+ export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams;
+}
diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts
index 10b3d38d2..2469cce07 100644
--- a/src/resources/fine-tuning/jobs/jobs.ts
+++ b/src/resources/fine-tuning/jobs/jobs.ts
@@ -300,7 +300,7 @@ export interface JobCreateParams {
/**
* The ID of an uploaded file that contains training data.
*
- * See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
+ * See [upload file](https://platform.openai.com/docs/api-reference/files/create)
* for how to upload a file.
*
* Your dataset must be formatted as a JSONL file. Additionally, you must upload
diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts
similarity index 93%
rename from tests/api-resources/beta/assistants/assistants.test.ts
rename to tests/api-resources/beta/assistants.test.ts
index 62282148d..56ce8446a 100644
--- a/tests/api-resources/beta/assistants/assistants.test.ts
+++ b/tests/api-resources/beta/assistants.test.ts
@@ -24,11 +24,20 @@ describe('resource assistants', () => {
const response = await openai.beta.assistants.create({
model: 'gpt-4-turbo',
description: 'string',
- file_ids: ['string', 'string', 'string'],
instructions: 'string',
metadata: {},
name: 'string',
+ response_format: 'none',
+ temperature: 1,
+ tool_resources: {
+ code_interpreter: { file_ids: ['string', 'string', 'string'] },
+ file_search: {
+ vector_store_ids: ['string'],
+ vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }],
+ },
+ },
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ top_p: 1,
});
});
diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts
similarity index 93%
rename from tests/api-resources/beta/threads/messages/messages.test.ts
rename to tests/api-resources/beta/threads/messages.test.ts
index 7f62944e0..a0a025869 100644
--- a/tests/api-resources/beta/threads/messages/messages.test.ts
+++ b/tests/api-resources/beta/threads/messages.test.ts
@@ -24,7 +24,11 @@ describe('resource messages', () => {
const response = await openai.beta.threads.messages.create('string', {
content: 'x',
role: 'user',
- file_ids: ['string'],
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
metadata: {},
});
});
diff --git a/tests/api-resources/beta/threads/messages/files.test.ts b/tests/api-resources/beta/threads/messages/files.test.ts
deleted file mode 100644
index 58c8813fe..000000000
--- a/tests/api-resources/beta/threads/messages/files.test.ts
+++ /dev/null
@@ -1,65 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import OpenAI from 'openai';
-import { Response } from 'node-fetch';
-
-const openai = new OpenAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource files', () => {
- test('retrieve', async () => {
- const responsePromise = openai.beta.threads.messages.files.retrieve(
- 'thread_abc123',
- 'msg_abc123',
- 'file-abc123',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('retrieve: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- openai.beta.threads.messages.files.retrieve('thread_abc123', 'msg_abc123', 'file-abc123', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(OpenAI.NotFoundError);
- });
-
- test('list', async () => {
- const responsePromise = openai.beta.threads.messages.files.list('string', 'string');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- openai.beta.threads.messages.files.list('string', 'string', { path: '/_stainless_unknown_path' }),
- ).rejects.toThrow(OpenAI.NotFoundError);
- });
-
- test('list: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- openai.beta.threads.messages.files.list(
- 'string',
- 'string',
- { after: 'string', before: 'string', limit: 0, order: 'asc' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(OpenAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 2489d56e2..4a3743ca0 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -25,9 +25,36 @@ describe('resource runs', () => {
assistant_id: 'string',
additional_instructions: 'string',
additional_messages: [
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
],
instructions: 'string',
max_completion_tokens: 256,
@@ -39,6 +66,7 @@ describe('resource runs', () => {
temperature: 1,
tool_choice: 'none',
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ top_p: 1,
truncation_strategy: { type: 'auto', last_messages: 1 },
});
});
diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts
index 028a150f4..0a5f70af4 100644
--- a/tests/api-resources/beta/threads/threads.test.ts
+++ b/tests/api-resources/beta/threads/threads.test.ts
@@ -33,11 +33,45 @@ describe('resource threads', () => {
openai.beta.threads.create(
{
messages: [
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
],
metadata: {},
+ tool_resources: {
+ code_interpreter: { file_ids: ['string', 'string', 'string'] },
+ file_search: {
+ vector_store_ids: ['string'],
+ vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }],
+ },
+ },
},
{ path: '/_stainless_unknown_path' },
),
@@ -115,14 +149,53 @@ describe('resource threads', () => {
temperature: 1,
thread: {
messages: [
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
- { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
+ {
+ role: 'user',
+ content: 'x',
+ attachments: [
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ { file_id: 'string', add_to: ['file_search', 'code_interpreter'] },
+ ],
+ metadata: {},
+ },
],
+ tool_resources: {
+ code_interpreter: { file_ids: ['string', 'string', 'string'] },
+ file_search: {
+ vector_store_ids: ['string'],
+ vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }],
+ },
+ },
metadata: {},
},
tool_choice: 'none',
+ tool_resources: {
+ code_interpreter: { file_ids: ['string', 'string', 'string'] },
+ file_search: { vector_store_ids: ['string'] },
+ },
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ top_p: 1,
truncation_strategy: { type: 'auto', last_messages: 1 },
});
});
diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/beta/vector-stores/file-batches.test.ts
new file mode 100644
index 000000000..782b33a0c
--- /dev/null
+++ b/tests/api-resources/beta/vector-stores/file-batches.test.ts
@@ -0,0 +1,98 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource fileBatches', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openai.beta.vectorStores.fileBatches.create('vs_abc123', {
+ file_ids: ['string'],
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('cancel', async () => {
+ const responsePromise = openai.beta.vectorStores.fileBatches.cancel('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('cancel: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.fileBatches.cancel('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('listFiles', async () => {
+ const responsePromise = openai.beta.vectorStores.fileBatches.listFiles('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('listFiles: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.fileBatches.listFiles('string', 'string', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('listFiles: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.fileBatches.listFiles(
+ 'string',
+ 'string',
+ { after: 'string', before: 'string', filter: 'in_progress', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/beta/assistants/files.test.ts b/tests/api-resources/beta/vector-stores/files.test.ts
similarity index 77%
rename from tests/api-resources/beta/assistants/files.test.ts
rename to tests/api-resources/beta/vector-stores/files.test.ts
index e285b4664..03340753c 100644
--- a/tests/api-resources/beta/assistants/files.test.ts
+++ b/tests/api-resources/beta/vector-stores/files.test.ts
@@ -10,7 +10,7 @@ const openai = new OpenAI({
describe('resource files', () => {
test('create: only required params', async () => {
- const responsePromise = openai.beta.assistants.files.create('file-abc123', { file_id: 'string' });
+ const responsePromise = openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -21,11 +21,11 @@ describe('resource files', () => {
});
test('create: required and optional params', async () => {
- const response = await openai.beta.assistants.files.create('file-abc123', { file_id: 'string' });
+ const response = await openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' });
});
test('retrieve', async () => {
- const responsePromise = openai.beta.assistants.files.retrieve('string', 'string');
+ const responsePromise = openai.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -38,12 +38,14 @@ describe('resource files', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- openai.beta.assistants.files.retrieve('string', 'string', { path: '/_stainless_unknown_path' }),
+ openai.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', {
+ path: '/_stainless_unknown_path',
+ }),
).rejects.toThrow(OpenAI.NotFoundError);
});
test('list', async () => {
- const responsePromise = openai.beta.assistants.files.list('string');
+ const responsePromise = openai.beta.vectorStores.files.list('string');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -56,23 +58,23 @@ describe('resource files', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- openai.beta.assistants.files.list('string', { path: '/_stainless_unknown_path' }),
+ openai.beta.vectorStores.files.list('string', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(OpenAI.NotFoundError);
});
test('list: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- openai.beta.assistants.files.list(
+ openai.beta.vectorStores.files.list(
'string',
- { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { after: 'string', before: 'string', filter: 'in_progress', limit: 0, order: 'asc' },
{ path: '/_stainless_unknown_path' },
),
).rejects.toThrow(OpenAI.NotFoundError);
});
test('del', async () => {
- const responsePromise = openai.beta.assistants.files.del('string', 'string');
+ const responsePromise = openai.beta.vectorStores.files.del('string', 'string');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -85,7 +87,7 @@ describe('resource files', () => {
test('del: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- openai.beta.assistants.files.del('string', 'string', { path: '/_stainless_unknown_path' }),
+ openai.beta.vectorStores.files.del('string', 'string', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(OpenAI.NotFoundError);
});
});
diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/beta/vector-stores/vector-stores.test.ts
new file mode 100644
index 000000000..445fa9ebf
--- /dev/null
+++ b/tests/api-resources/beta/vector-stores/vector-stores.test.ts
@@ -0,0 +1,97 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource vectorStores', () => {
+ test('create', async () => {
+ const responsePromise = openai.beta.vectorStores.create({});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.vectorStores.retrieve('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.retrieve('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('update', async () => {
+ const responsePromise = openai.beta.vectorStores.update('string', {});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.vectorStores.list();
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(openai.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ OpenAI.NotFoundError,
+ );
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.list(
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('del', async () => {
+ const responsePromise = openai.beta.vectorStores.del('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('del: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.vectorStores.del('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});