diff --git a/docs/reference/classes/ApiError.html b/docs/reference/classes/ApiError.html index 3a1a326..29e39c0 100644 --- a/docs/reference/classes/ApiError.html +++ b/docs/reference/classes/ApiError.html @@ -1,4 +1,4 @@ -ApiError | @fal-ai/client

Class ApiError<Body>

Type Parameters

  • Body

Hierarchy (view full)

Constructors

constructor +ApiError | @fal-ai/client

Class ApiError<Body>

Type Parameters

  • Body

Hierarchy (view full)

Constructors

Properties

Constructors

Properties

body: Body
status: number
+

Constructors

Properties

body: Body
status: number
diff --git a/docs/reference/classes/FalStream.html b/docs/reference/classes/FalStream.html index 67016bf..11891bc 100644 --- a/docs/reference/classes/FalStream.html +++ b/docs/reference/classes/FalStream.html @@ -1,5 +1,5 @@ FalStream | @fal-ai/client

Class FalStream<Input, Output>

The class representing a streaming response. With t

-

Type Parameters

  • Input
  • Output

Constructors

Type Parameters

  • Input
  • Output

Constructors

Properties

config endpointId options @@ -9,18 +9,18 @@ abort done on -

Constructors

Properties

config: Required<Config>
endpointId: string
options: StreamOptions<Input>
url: string

Accessors

  • get signal(): AbortSignal
  • Gets the AbortSignal instance that can be used to listen for abort events.

    +

Constructors

Properties

config: Required<Config>
endpointId: string
options: StreamOptions<Input>
url: string

Accessors

Methods

Methods

  • Aborts the streaming request.

    Note: This method is noop in case the request is already done.

    Parameters

    • Optionalreason: string | Error

      optional cause for aborting the request.

      -

    Returns void

  • Gets a reference to the Promise that indicates whether the streaming +

Returns void

+
diff --git a/docs/reference/classes/ValidationError.html b/docs/reference/classes/ValidationError.html index 602c954..8c29923 100644 --- a/docs/reference/classes/ValidationError.html +++ b/docs/reference/classes/ValidationError.html @@ -1,6 +1,6 @@ -ValidationError | @fal-ai/client

Class ValidationError

Hierarchy (view full)

  • ApiError<ValidationErrorBody>
    • ValidationError

Constructors

constructor +ValidationError | @fal-ai/client

Class ValidationError

Hierarchy (view full)

  • ApiError<ValidationErrorBody>
    • ValidationError

Constructors

Properties

Accessors

Methods

Constructors

Properties

body: ValidationErrorBody
status: number

Accessors

Methods

+

Constructors

Properties

body: ValidationErrorBody
status: number

Accessors

Methods

diff --git a/docs/reference/functions/createFalClient.html b/docs/reference/functions/createFalClient.html index 9e951ad..5cefbfe 100644 --- a/docs/reference/functions/createFalClient.html +++ b/docs/reference/functions/createFalClient.html @@ -1,4 +1,4 @@ createFalClient | @fal-ai/client

Function createFalClient

  • Creates a new reference of the FalClient.

    Parameters

    • userConfig: Config = {}

      Optional configuration to override the default settings.

    Returns FalClient

    a new instance of the FalClient.

    -
+
diff --git a/docs/reference/functions/isCompletedQueueStatus.html b/docs/reference/functions/isCompletedQueueStatus.html index 9e56e8a..a3a43ae 100644 --- a/docs/reference/functions/isCompletedQueueStatus.html +++ b/docs/reference/functions/isCompletedQueueStatus.html @@ -1 +1 @@ -isCompletedQueueStatus | @fal-ai/client

Function isCompletedQueueStatus

+isCompletedQueueStatus | @fal-ai/client

Function isCompletedQueueStatus

diff --git a/docs/reference/functions/isQueueStatus.html b/docs/reference/functions/isQueueStatus.html index 7e41bb6..3c6facf 100644 --- a/docs/reference/functions/isQueueStatus.html +++ b/docs/reference/functions/isQueueStatus.html @@ -1 +1 @@ -isQueueStatus | @fal-ai/client

Function isQueueStatus

+isQueueStatus | @fal-ai/client

Function isQueueStatus

diff --git a/docs/reference/functions/parseEndpointId.html b/docs/reference/functions/parseEndpointId.html index 1c3831a..4b2c472 100644 --- a/docs/reference/functions/parseEndpointId.html +++ b/docs/reference/functions/parseEndpointId.html @@ -1 +1 @@ -parseEndpointId | @fal-ai/client

Function parseEndpointId

  • Parameters

    • id: string

    Returns EndpointId

+parseEndpointId | @fal-ai/client

Function parseEndpointId

  • Parameters

    • id: string

    Returns EndpointId

diff --git a/docs/reference/functions/withMiddleware.html b/docs/reference/functions/withMiddleware.html index b82c2d2..1b6ea1e 100644 --- a/docs/reference/functions/withMiddleware.html +++ b/docs/reference/functions/withMiddleware.html @@ -1,4 +1,4 @@ withMiddleware | @fal-ai/client

Function withMiddleware

+
diff --git a/docs/reference/functions/withProxy.html b/docs/reference/functions/withProxy.html index 4e50737..a6a5bbd 100644 --- a/docs/reference/functions/withProxy.html +++ b/docs/reference/functions/withProxy.html @@ -1 +1 @@ -withProxy | @fal-ai/client

Function withProxy

+withProxy | @fal-ai/client

Function withProxy

diff --git a/docs/reference/interfaces/CompletedQueueStatus.html b/docs/reference/interfaces/CompletedQueueStatus.html index dcb8571..d455445 100644 --- a/docs/reference/interfaces/CompletedQueueStatus.html +++ b/docs/reference/interfaces/CompletedQueueStatus.html @@ -1,6 +1,6 @@ -CompletedQueueStatus | @fal-ai/client

Interface CompletedQueueStatus

interface CompletedQueueStatus {
    logs: RequestLog[];
    metrics?: Metrics;
    request_id: string;
    response_url: string;
    status: "COMPLETED";
}

Hierarchy

  • BaseQueueStatus
    • CompletedQueueStatus

Properties

logs +CompletedQueueStatus | @fal-ai/client

Interface CompletedQueueStatus

interface CompletedQueueStatus {
    logs: RequestLog[];
    metrics?: Metrics;
    request_id: string;
    response_url: string;
    status: "COMPLETED";
}

Hierarchy

  • BaseQueueStatus
    • CompletedQueueStatus

Properties

logs: RequestLog[]
metrics?: Metrics
request_id: string
response_url: string
status: "COMPLETED"
+

Properties

logs: RequestLog[]
metrics?: Metrics
request_id: string
response_url: string
status: "COMPLETED"
diff --git a/docs/reference/interfaces/FalClient.html b/docs/reference/interfaces/FalClient.html index ac3f945..4e3fbf3 100644 --- a/docs/reference/interfaces/FalClient.html +++ b/docs/reference/interfaces/FalClient.html @@ -1,7 +1,7 @@ FalClient | @fal-ai/client

Interface FalClient

The main client type, it provides access to simple API model usage, as well as access to the queue and storage APIs.

createFalClient

-
interface FalClient {
    queue: QueueClient;
    realtime: RealtimeClient;
    storage: StorageClient;
    stream: (<Id>(endpointId: Id, options: StreamOptions<InputType<Id>>) => Promise<FalStream<InputType<Id>, OutputType<Id>>>);
    streaming: StreamingClient;
    run<Id>(endpointId: Id, options: RunOptions<InputType<Id>>): Promise<Result<OutputType<Id>>>;
    subscribe<Id>(endpointId: Id, options: RunOptions<InputType<Id>> & QueueSubscribeOptions): Promise<Result<OutputType<Id>>>;
}

Properties

interface FalClient {
    queue: QueueClient;
    realtime: RealtimeClient;
    storage: StorageClient;
    stream: (<Id>(endpointId: Id, options: StreamOptions<InputType<Id>>) => Promise<FalStream<InputType<Id>, OutputType<Id>>>);
    streaming: StreamingClient;
    run<Id>(endpointId: Id, options: RunOptions<InputType<Id>>): Promise<Result<OutputType<Id>>>;
    subscribe<Id>(endpointId: Id, options: RunOptions<InputType<Id>> & QueueSubscribeOptions): Promise<Result<OutputType<Id>>>;
}

Properties

queue realtime storage stream @@ -9,14 +9,14 @@

Methods

Properties

The queue client to interact with the queue API.

-
realtime: RealtimeClient

The realtime client to interact with the realtime API +

realtime: RealtimeClient

The realtime client to interact with the realtime API and receive updates in real-time.

  • #RealtimeClient
  • #RealtimeClient.connect
-
storage: StorageClient

The storage client to interact with the storage API.

-
stream: (<Id>(endpointId: Id, options: StreamOptions<InputType<Id>>) => Promise<FalStream<InputType<Id>, OutputType<Id>>>)

Calls a fal app that supports streaming and provides a streaming-capable +

storage: StorageClient

The storage client to interact with the storage API.

+
stream: (<Id>(endpointId: Id, options: StreamOptions<InputType<Id>>) => Promise<FalStream<InputType<Id>, OutputType<Id>>>)

Calls a fal app that supports streaming and provides a streaming-capable object as a result, that can be used to get partial results through either AsyncIterator or through an event listener.

Type declaration

    • <Id>(endpointId, options): Promise<FalStream<InputType<Id>, OutputType<Id>>>
    • Calls a fal app that supports streaming and provides a streaming-capable @@ -28,13 +28,13 @@

the endpoint id, e.g. fal-ai/llavav15-13b.

the request options, including the input payload.

the FalStream instance.

-
streaming: StreamingClient

The streaming client to interact with the streaming API.

+
streaming: StreamingClient

The streaming client to interact with the streaming API.

#stream

-

Methods

Methods

  • Runs a fal endpoints identified by its endpointId.

    Type Parameters

    • Id extends EndpointType

    Parameters

    • endpointId: Id

      the registered function revision id or alias.

    • options: RunOptions<InputType<Id>>

    Returns Promise<Result<OutputType<Id>>>

    the remote function output

    -
  • Subscribes to updates for a specific request in the queue.

    +
  • Subscribes to updates for a specific request in the queue.

    Type Parameters

    • Id extends EndpointType

    Parameters

    • endpointId: Id

      The ID of the API endpoint.

    • options: RunOptions<InputType<Id>> & QueueSubscribeOptions

      Options to configure how the request is run and how updates are received.

    Returns Promise<Result<OutputType<Id>>>

    A promise that resolves to the result of the request once it's completed.

    -
+
diff --git a/docs/reference/interfaces/InProgressQueueStatus.html b/docs/reference/interfaces/InProgressQueueStatus.html index ac70f16..7ea774f 100644 --- a/docs/reference/interfaces/InProgressQueueStatus.html +++ b/docs/reference/interfaces/InProgressQueueStatus.html @@ -1,5 +1,5 @@ -InProgressQueueStatus | @fal-ai/client

Interface InProgressQueueStatus

interface InProgressQueueStatus {
    logs: RequestLog[];
    request_id: string;
    response_url: string;
    status: "IN_PROGRESS";
}

Hierarchy

  • BaseQueueStatus
    • InProgressQueueStatus

Properties

logs +InProgressQueueStatus | @fal-ai/client

Interface InProgressQueueStatus

interface InProgressQueueStatus {
    logs: RequestLog[];
    request_id: string;
    response_url: string;
    status: "IN_PROGRESS";
}

Hierarchy

  • BaseQueueStatus
    • InProgressQueueStatus

Properties

logs: RequestLog[]
request_id: string
response_url: string
status: "IN_PROGRESS"
+

Properties

logs: RequestLog[]
request_id: string
response_url: string
status: "IN_PROGRESS"
diff --git a/docs/reference/interfaces/InQueueQueueStatus.html b/docs/reference/interfaces/InQueueQueueStatus.html index dd1860b..67f92db 100644 --- a/docs/reference/interfaces/InQueueQueueStatus.html +++ b/docs/reference/interfaces/InQueueQueueStatus.html @@ -1,5 +1,5 @@ -InQueueQueueStatus | @fal-ai/client

Interface InQueueQueueStatus

interface InQueueQueueStatus {
    queue_position: number;
    request_id: string;
    response_url: string;
    status: "IN_QUEUE";
}

Hierarchy

  • BaseQueueStatus
    • InQueueQueueStatus

Properties

queue_position +InQueueQueueStatus | @fal-ai/client

Interface InQueueQueueStatus

interface InQueueQueueStatus {
    queue_position: number;
    request_id: string;
    response_url: string;
    status: "IN_QUEUE";
}

Hierarchy

  • BaseQueueStatus
    • InQueueQueueStatus

Properties

queue_position: number
request_id: string
response_url: string
status: "IN_QUEUE"
+

Properties

queue_position: number
request_id: string
response_url: string
status: "IN_QUEUE"
diff --git a/docs/reference/interfaces/QueueClient.html b/docs/reference/interfaces/QueueClient.html index e030a6f..7073fd4 100644 --- a/docs/reference/interfaces/QueueClient.html +++ b/docs/reference/interfaces/QueueClient.html @@ -1,6 +1,6 @@ QueueClient | @fal-ai/client

Interface QueueClient

Represents a request queue with methods for submitting requests, checking their status, retrieving results, and subscribing to updates.

-
interface QueueClient {
    cancel(endpointId: string, options: BaseQueueOptions): Promise<void>;
    result<Id>(endpointId: Id, options: BaseQueueOptions): Promise<Result<OutputType<Id>>>;
    status(endpointId: string, options: QueueStatusOptions): Promise<QueueStatus>;
    streamStatus(endpointId: string, options: QueueStatusStreamOptions): Promise<FalStream<unknown, QueueStatus>>;
    submit<Id>(endpointId: Id, options: SubmitOptions<InputType<Id>>): Promise<InQueueQueueStatus>;
    subscribeToStatus(endpointId: string, options: QueueStatusSubscriptionOptions): Promise<CompletedQueueStatus>;
}

Methods

interface QueueClient {
    cancel(endpointId: string, options: BaseQueueOptions): Promise<void>;
    result<Id>(endpointId: Id, options: BaseQueueOptions): Promise<Result<OutputType<Id>>>;
    status(endpointId: string, options: QueueStatusOptions): Promise<QueueStatus>;
    streamStatus(endpointId: string, options: QueueStatusStreamOptions): Promise<FalStream<unknown, QueueStatus>>;
    submit<Id>(endpointId: Id, options: SubmitOptions<InputType<Id>>): Promise<InQueueQueueStatus>;
    subscribeToStatus(endpointId: string, options: QueueStatusSubscriptionOptions): Promise<CompletedQueueStatus>;
}

Methods

cancel result status streamStatus @@ -12,25 +12,25 @@ is run and how updates are received.

Returns Promise<void>

A promise that resolves once the request is cancelled.

If the request cannot be cancelled.

-
  • Retrieves the result of a specific request from the queue.

    +
  • Retrieves the result of a specific request from the queue.

    Type Parameters

    • Id extends EndpointType

    Parameters

    • endpointId: Id

      The ID of the function web endpoint.

    • options: BaseQueueOptions

      Options to configure how the request is run.

    Returns Promise<Result<OutputType<Id>>>

    A promise that resolves to the result of the request.

    -
  • Retrieves the status of a specific request in the queue.

    Parameters

    • endpointId: string

      The ID of the function web endpoint.

    • options: QueueStatusOptions

      Options to configure how the request is run.

    Returns Promise<QueueStatus>

    A promise that resolves to the status of the request.

    -
  • Subscribes to updates for a specific request in the queue using HTTP streaming events.

    Parameters

    • endpointId: string

      The ID of the function web endpoint.

    • options: QueueStatusStreamOptions

      Options to configure how the request is run and how updates are received.

    Returns Promise<FalStream<unknown, QueueStatus>>

    The streaming object that can be used to listen for updates.

    -
  • Submits a request to the queue.

    Type Parameters

    • Id extends EndpointType

    Parameters

    • endpointId: Id

      The ID of the function web endpoint.

    • options: SubmitOptions<InputType<Id>>

      Options to configure how the request is run.

    Returns Promise<InQueueQueueStatus>

    A promise that resolves to the result of enqueuing the request.

    -
  • Subscribes to updates for a specific request in the queue using polling or streaming. See options.mode for more details.

    Parameters

    • endpointId: string

      The ID of the function web endpoint.

    • options: QueueStatusSubscriptionOptions

      Options to configure how the request is run and how updates are received.

    Returns Promise<CompletedQueueStatus>

    A promise that resolves to the final status of the request.

    -
+
diff --git a/docs/reference/interfaces/RealtimeClient.html b/docs/reference/interfaces/RealtimeClient.html index 175d204..4567c72 100644 --- a/docs/reference/interfaces/RealtimeClient.html +++ b/docs/reference/interfaces/RealtimeClient.html @@ -1,6 +1,6 @@ -RealtimeClient | @fal-ai/client

Interface RealtimeClient

interface RealtimeClient {
    connect<Input, Output>(app: string, handler: RealtimeConnectionHandler<Output>): RealtimeConnection<Input>;
}

Methods

connect +RealtimeClient | @fal-ai/client

Interface RealtimeClient

interface RealtimeClient {
    connect<Input, Output>(app: string, handler: RealtimeConnectionHandler<Output>): RealtimeConnection<Input>;
}

Methods

Methods

  • Connect to the realtime endpoint. The default implementation uses WebSockets to connect to fal function endpoints that support WSS.

    Type Parameters

    • Input = any
    • Output = any

    Parameters

    • app: string

      the app alias or identifier.

    • handler: RealtimeConnectionHandler<Output>

      the connection handler.

      -

    Returns RealtimeConnection<Input>

+

Returns RealtimeConnection<Input>

diff --git a/docs/reference/interfaces/StorageClient.html b/docs/reference/interfaces/StorageClient.html index 1687a86..d7f616b 100644 --- a/docs/reference/interfaces/StorageClient.html +++ b/docs/reference/interfaces/StorageClient.html @@ -1,14 +1,14 @@ StorageClient | @fal-ai/client

Interface StorageClient

File support for the client. This interface establishes the contract for uploading files to the server and transforming the input to replace file objects with URLs.

-
interface StorageClient {
    transformInput: ((input: Record<string, any>) => Promise<Record<string, any>>);
    upload: ((file: Blob) => Promise<string>);
}

Properties

interface StorageClient {
    transformInput: ((input: Record<string, any>) => Promise<Record<string, any>>);
    upload: ((file: Blob) => Promise<string>);
}

Properties

transformInput: ((input: Record<string, any>) => Promise<Record<string, any>>)

Transform the input to replace file objects with URLs. This is used to transform the input before sending it to the server and ensures that the server receives URLs instead of file objects.

Type declaration

upload: ((file: Blob) => Promise<string>)

Upload a file to the server. Returns the URL of the uploaded file.

+
upload: ((file: Blob) => Promise<string>)

Upload a file to the server. Returns the URL of the uploaded file.

Type declaration

    • (file): Promise<string>
    • Parameters

      • file: Blob

        the file to upload

      Returns Promise<string>

      the URL of the uploaded file

      -
+
diff --git a/docs/reference/interfaces/StreamingClient.html b/docs/reference/interfaces/StreamingClient.html index 07d7baa..21f62ad 100644 --- a/docs/reference/interfaces/StreamingClient.html +++ b/docs/reference/interfaces/StreamingClient.html @@ -1,9 +1,9 @@ StreamingClient | @fal-ai/client

Interface StreamingClient

The streaming client interface.

-
interface StreamingClient {
    stream<Id>(endpointId: Id, options: StreamOptions<InputType<Id>>): Promise<FalStream<InputType<Id>, OutputType<Id>>>;
}

Methods

interface StreamingClient {
    stream<Id>(endpointId: Id, options: StreamOptions<InputType<Id>>): Promise<FalStream<InputType<Id>, OutputType<Id>>>;
}

Methods

Methods

  • Calls a fal app that supports streaming and provides a streaming-capable object as a result, that can be used to get partial results through either AsyncIterator or through an event listener.

    Type Parameters

    • Id extends EndpointType

    Parameters

    • endpointId: Id

      the endpoint id, e.g. fal-ai/llavav15-13b.

    • options: StreamOptions<InputType<Id>>

      the request options, including the input payload.

    Returns Promise<FalStream<InputType<Id>, OutputType<Id>>>

    the FalStream instance.

    -
+
diff --git a/docs/reference/types/Metrics.html b/docs/reference/types/Metrics.html index 443d5b9..cdddd97 100644 --- a/docs/reference/types/Metrics.html +++ b/docs/reference/types/Metrics.html @@ -1 +1 @@ -Metrics | @fal-ai/client

Type Alias Metrics

Metrics: {
    inference_time: number | null;
}
+Metrics | @fal-ai/client

Type Alias Metrics

Metrics: {
    inference_time: number | null;
}
diff --git a/docs/reference/types/QueueStatus.html b/docs/reference/types/QueueStatus.html index d24e332..1ff716d 100644 --- a/docs/reference/types/QueueStatus.html +++ b/docs/reference/types/QueueStatus.html @@ -1 +1 @@ -QueueStatus | @fal-ai/client

Type Alias QueueStatus

+QueueStatus | @fal-ai/client

Type Alias QueueStatus

diff --git a/docs/reference/types/RequestLog.html b/docs/reference/types/RequestLog.html index 4a5b3ab..b485acb 100644 --- a/docs/reference/types/RequestLog.html +++ b/docs/reference/types/RequestLog.html @@ -1 +1 @@ -RequestLog | @fal-ai/client

Type Alias RequestLog

RequestLog: {
    level:
        | "STDERR"
        | "STDOUT"
        | "ERROR"
        | "INFO"
        | "WARN"
        | "DEBUG";
    message: string;
    source: "USER";
    timestamp: string;
}
+RequestLog | @fal-ai/client

Type Alias RequestLog

RequestLog: {
    level:
        | "STDERR"
        | "STDOUT"
        | "ERROR"
        | "INFO"
        | "WARN"
        | "DEBUG";
    message: string;
    source: "USER";
    timestamp: string;
}
diff --git a/docs/reference/types/RequestMiddleware.html b/docs/reference/types/RequestMiddleware.html index 3f8498a..09e5ccd 100644 --- a/docs/reference/types/RequestMiddleware.html +++ b/docs/reference/types/RequestMiddleware.html @@ -1 +1 @@ -RequestMiddleware | @fal-ai/client

Type Alias RequestMiddleware

RequestMiddleware: ((request: RequestConfig) => Promise<RequestConfig>)
+RequestMiddleware | @fal-ai/client

Type Alias RequestMiddleware

RequestMiddleware: ((request: RequestConfig) => Promise<RequestConfig>)
diff --git a/docs/reference/types/ResponseHandler.html b/docs/reference/types/ResponseHandler.html index 4bd5bf5..cefd1ed 100644 --- a/docs/reference/types/ResponseHandler.html +++ b/docs/reference/types/ResponseHandler.html @@ -1 +1 @@ -ResponseHandler | @fal-ai/client

Type Alias ResponseHandler<Output>

ResponseHandler<Output>: ((response: Response) => Promise<Output>)

Type Parameters

  • Output
+ResponseHandler | @fal-ai/client

Type Alias ResponseHandler<Output>

ResponseHandler<Output>: ((response: Response) => Promise<Output>)

Type Parameters

  • Output
diff --git a/docs/reference/types/Result.html b/docs/reference/types/Result.html index c476ae2..e9c2c50 100644 --- a/docs/reference/types/Result.html +++ b/docs/reference/types/Result.html @@ -1,3 +1,3 @@ Result | @fal-ai/client

Type Alias Result<T>

Result<T>: {
    data: T;
    requestId: string;
}

Represents an API result, containing the data, the request ID and any other relevant information.

-

Type Parameters

  • T
+

Type Parameters

diff --git a/docs/reference/types/RunOptions.html b/docs/reference/types/RunOptions.html index e9361de..eca017a 100644 --- a/docs/reference/types/RunOptions.html +++ b/docs/reference/types/RunOptions.html @@ -4,4 +4,4 @@
  • Optional Readonlyinput?: Input

    The function input. It will be submitted either as query params or the body payload, depending on the method.

  • Optional Readonlymethod?:
        | "get"
        | "post"
        | "put"
        | "delete"
        | string

    The HTTP method, defaults to post;

    -
  • +
    diff --git a/docs/reference/types/UrlOptions.html b/docs/reference/types/UrlOptions.html index beb767b..f643e60 100644 --- a/docs/reference/types/UrlOptions.html +++ b/docs/reference/types/UrlOptions.html @@ -3,4 +3,4 @@
  • Optional Readonlysubdomain?: string

    If true, the function will use the queue to run the function asynchronously and return the result in a separate call. This influences how the URL is built.

    -
  • +
    diff --git a/docs/reference/types/ValidationErrorInfo.html b/docs/reference/types/ValidationErrorInfo.html index a2932dd..d59478f 100644 --- a/docs/reference/types/ValidationErrorInfo.html +++ b/docs/reference/types/ValidationErrorInfo.html @@ -1 +1 @@ -ValidationErrorInfo | @fal-ai/client

    Type Alias ValidationErrorInfo

    ValidationErrorInfo: {
        loc: (string | number)[];
        msg: string;
        type: string;
    }
    +ValidationErrorInfo | @fal-ai/client

    Type Alias ValidationErrorInfo

    ValidationErrorInfo: {
        loc: (string | number)[];
        msg: string;
        type: string;
    }
    diff --git a/docs/reference/types/WebHookResponse.html b/docs/reference/types/WebHookResponse.html index fb05da7..3bac4c1 100644 --- a/docs/reference/types/WebHookResponse.html +++ b/docs/reference/types/WebHookResponse.html @@ -10,4 +10,4 @@
  • payload: Payload

    The payload of the response, structure determined by the Payload type.

  • request_id: string

    The unique identifier for the request.

  • status: "ERROR"

    Indicates an unsuccessful response.

    -
  • +
    diff --git a/docs/reference/variables/fal.html b/docs/reference/variables/fal.html index c99793f..7ea3ce4 100644 --- a/docs/reference/variables/fal.html +++ b/docs/reference/variables/fal.html @@ -1,3 +1,3 @@ fal | @fal-ai/client

    Variable falConst

    fal: SingletonFalClient = ...

    Creates a singleton instance of the client. This is useful as a compatibility layer for existing code that uses the clients version prior to 1.0.0.

    -
    +
    diff --git a/libs/client/package.json b/libs/client/package.json index fc1f180..d28a01f 100644 --- a/libs/client/package.json +++ b/libs/client/package.json @@ -1,7 +1,7 @@ { "name": "@fal-ai/client", "description": "The fal.ai client for JavaScript and TypeScript", - "version": "1.2.2", + "version": "1.2.3", "license": "MIT", "repository": { "type": "git", diff --git a/libs/client/src/types/endpoints.ts b/libs/client/src/types/endpoints.ts index 95a1877..27466d9 100644 --- a/libs/client/src/types/endpoints.ts +++ b/libs/client/src/types/endpoints.ts @@ -1,3 +1,49 @@ +export type Audio = { + /** + * Type of media (always 'audio') Default value: `"audio"` + */ + media_type?: "audio"; + /** + * URL where the media file can be accessed + */ + url: string; + /** + * MIME type of the media file + */ + content_type: string; + /** + * Original filename of the media + */ + file_name: string; + /** + * Size of the file in bytes + */ + file_size: number; + /** + * Duration of the media in seconds + */ + duration: number; + /** + * Overall bitrate of the media in bits per second + */ + bitrate: number; + /** + * Codec used to encode the media + */ + codec: string; + /** + * Container format of the media file (e.g., 'mp4', 'mov') + */ + container: string; + /** + * Number of audio channels + */ + channels: number; + /** + * Audio sample rate in Hz + */ + sample_rate: number; +}; export type AudioFile = { /** * The URL where the file can be downloaded from. @@ -24,6 +70,24 @@ export type AudioFile = { */ duration: number; }; +export type AudioTrack = { + /** + * Audio codec used (e.g., 'aac', 'mp3') + */ + codec: string; + /** + * Number of audio channels + */ + channels: number; + /** + * Audio sample rate in Hz + */ + sample_rate: number; + /** + * Audio bitrate in bits per second + */ + bitrate: number; +}; export type BoundingBox = { /** * X-coordinate of the top-left corner @@ -108,6 +172,10 @@ export type ControlLoraWeight = { * URL of the image to be used as the control image. */ control_image_url: string | Blob | File; + /** + * Type of preprocessing to apply to the input image. Default value: `"None"` + */ + preprocess?: "canny" | "depth" | "None"; }; export type ControlNet = { /** @@ -403,7 +471,7 @@ export type HunyuanV2VRequest = { /** * The seed to use for generating the video. */ - seed?: number; + seed?: number | null; /** * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. */ @@ -421,9 +489,14 @@ export type HunyuanV2VRequest = { */ num_frames?: "129" | "85"; /** - * The URL to the LoRA model weights. Default value: `""` + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. Default value: `` + */ + loras?: Array; + /** + * If set to true, the safety checker will be enabled. Default value: `true` */ - lora_url?: string | Blob | File; + enable_safety_checker?: boolean; /** * URL of the video input. */ @@ -597,6 +670,20 @@ export type IPAdapter = { */ image_projection_shortcut?: boolean; }; +export type Keyframe = { + /** + * The timestamp in milliseconds where this keyframe starts + */ + timestamp: number; + /** + * The duration in milliseconds of this keyframe + */ + duration: number; + /** + * The URL where this keyframe's media file can be accessed + */ + url: string; +}; export type LoraWeight = { /** * URL or the path to the LoRA weights. Or HF model name. @@ -694,6 +781,28 @@ export type PolygonOutputWithLabels = { */ image?: Image; }; +export type Ray2TextToVideoRequest = { + /** + * + */ + prompt: string; + /** + * The aspect ratio of the generated video Default value: `"16:9"` + */ + aspect_ratio?: "16:9" | "9:16" | "4:3" | "3:4" | "21:9" | "9:21"; + /** + * Whether the video should loop (end of video is blended with the beginning) + */ + loop?: boolean; + /** + * Default value: `"540p"` + */ + resolution?: "540p" | "720p"; + /** + * Default value: `"5s"` + */ + duration?: "5s" | "9s"; +}; export type ReferenceFace = { /** * URL of the reference face image @@ -718,6 +827,20 @@ export type Region = { */ y2: number; }; +export type Resolution = { + /** + * Display aspect ratio (e.g., '16:9') + */ + aspect_ratio: string; + /** + * Width of the video in pixels + */ + width: number; + /** + * Height of the video in pixels + */ + height: number; +}; export type RGBColor = { /** * Red color value @@ -732,6 +855,20 @@ export type RGBColor = { */ b?: number; }; +export type SubjectReferenceRequest = { + /** + * + */ + prompt: string; + /** + * URL of the subject reference image to use for consistent subject appearance + */ + subject_reference_image_url: string | Blob | File; + /** + * Whether to use the model's prompt optimizer Default value: `true` + */ + prompt_optimizer?: boolean; +}; export type TextToImageRequest = { /** * The prompt you would like to use to generate images. @@ -809,6 +946,116 @@ export type TextToVideoRequest = { */ aspect_ratio?: "16:9" | "9:16" | "1:1"; }; +export type Track = { + /** + * Unique identifier for the track + */ + id: string; + /** + * Type of track ('video' or 'audio') + */ + type: string; + /** + * List of keyframes that make up this track + */ + keyframes: Array; +}; +export type Video = { + /** + * Type of media (always 'video') Default value: `"video"` + */ + media_type?: "video"; + /** + * URL where the media file can be accessed + */ + url: string; + /** + * MIME type of the media file + */ + content_type: string; + /** + * Original filename of the media + */ + file_name: string; + /** + * Size of the file in bytes + */ + file_size: number; + /** + * Duration of the media in seconds + */ + duration: number; + /** + * Overall bitrate of the media in bits per second + */ + bitrate: number; + /** + * Codec used to encode the media + */ + codec: string; + /** + * Container format of the media file (e.g., 'mp4', 'mov') + */ + container: string; + /** + * Frames per second + */ + fps: number; + /** + * Total number of frames in the video + */ + frame_count: number; + /** + * Time base used for frame timestamps + */ + timebase: string; + /** + * Video resolution information + */ + resolution: Resolution; + /** + * Detailed video format information + */ + format: VideoFormat; + /** + * Audio track information if video has audio + */ + audio?: AudioTrack; + /** + * URL of the extracted first frame + */ + start_frame_url?: string | Blob | File; + /** + * URL of the extracted last frame + */ + end_frame_url?: string | Blob | File; +}; +export type VideoFormat = { + /** + * Container format of the video + */ + container: string; + /** + * Video codec used (e.g., 'h264') + */ + video_codec: string; + /** + * Codec profile (e.g., 'main', 'high') + */ + profile: string; + /** + * Codec level (e.g., 4.1) + */ + level: number; + /** + * Pixel format used (e.g., 'yuv420p') + */ + pixel_format: string; + /** + * Video bitrate in bits per second + */ + bitrate: number; +}; export type WhisperChunk = { /** * Start and end timestamp of the chunk @@ -1294,7 +1541,7 @@ export type AnimateDiffV2VTurboOutput = { }; export type AnyLlmInput = { /** - * Name of the model to use. Premium models are charged at 10x the rate of standard models, they include: anthropic/claude-3.5-sonnet, anthropic/claude-3-5-haiku, google/gemini-pro-1.5, openai/gpt-4o. Default value: `"google/gemini-flash-1.5"` + * Name of the model to use. Premium models are charged at 10x the rate of standard models, they include: openai/gpt-4o, anthropic/claude-3.5-sonnet, meta-llama/llama-3.2-90b-vision-instruct, google/gemini-pro-1.5, anthropic/claude-3-5-haiku. Default value: `"google/gemini-flash-1.5"` */ model?: | "anthropic/claude-3.5-sonnet" @@ -1308,7 +1555,8 @@ export type AnyLlmInput = { | "meta-llama/llama-3.1-8b-instruct" | "meta-llama/llama-3.1-70b-instruct" | "openai/gpt-4o-mini" - | "openai/gpt-4o"; + | "openai/gpt-4o" + | "deepseek/deepseek-r1"; /** * Prompt to be used for the chat completion */ @@ -1334,7 +1582,7 @@ export type AnyLlmOutput = { }; export type AnyLlmVisionInput = { /** - * Name of the model to use. Premium models are charged at 3x the rate of standard models, they include: anthropic/claude-3.5-sonnet, anthropic/claude-3-5-haiku, google/gemini-pro-1.5, openai/gpt-4o. Default value: `"google/gemini-flash-1.5"` + * Name of the model to use. Premium models are charged at 3x the rate of standard models, they include: openai/gpt-4o, anthropic/claude-3.5-sonnet, meta-llama/llama-3.2-90b-vision-instruct, google/gemini-pro-1.5, anthropic/claude-3-5-haiku. Default value: `"google/gemini-flash-1.5"` */ model?: | "anthropic/claude-3.5-sonnet" @@ -1342,7 +1590,8 @@ export type AnyLlmVisionInput = { | "google/gemini-pro-1.5" | "google/gemini-flash-1.5" | "google/gemini-flash-1.5-8b" - | "openai/gpt-4o"; + | "openai/gpt-4o" + | "meta-llama/llama-3.2-90b-vision-instruct"; /** * Prompt to be used for the image */ @@ -1533,6 +1782,30 @@ export type BaseInput = { */ export_fps?: number; }; +export type BatchMoonDreamOutput = { + /** + * URL to the generated captions JSON file containing filename-caption pairs. + */ + captions_file: File; + /** + * List of generated captions + */ + outputs: Array; +}; +export type BatchQueryInput = { + /** + * List of image URLs to be processed (maximum 32 images) + */ + images_data_url: string | Blob | File; + /** + * Single prompt to apply to all images + */ + prompt: string; + /** + * Maximum number of tokens to generate Default value: `64` + */ + max_tokens?: number; +}; export type BGRemoveInput = { /** * Input Image to erase from @@ -2250,7 +2523,7 @@ export type CcsrOutput = { }; export type ChatInput = { /** - * Name of the model to use. Premium models are charged at 10x the rate of standard models, they include: anthropic/claude-3.5-sonnet, anthropic/claude-3-5-haiku, google/gemini-pro-1.5, openai/gpt-4o. Default value: `"google/gemini-flash-1.5"` + * Name of the model to use. Premium models are charged at 10x the rate of standard models, they include: openai/gpt-4o, anthropic/claude-3.5-sonnet, meta-llama/llama-3.2-90b-vision-instruct, google/gemini-pro-1.5, anthropic/claude-3-5-haiku. Default value: `"google/gemini-flash-1.5"` */ model?: | "anthropic/claude-3.5-sonnet" @@ -2264,7 +2537,8 @@ export type ChatInput = { | "meta-llama/llama-3.1-8b-instruct" | "meta-llama/llama-3.1-70b-instruct" | "openai/gpt-4o-mini" - | "openai/gpt-4o"; + | "openai/gpt-4o" + | "deepseek/deepseek-r1"; /** * Prompt to be used for the chat completion */ @@ -5783,6 +6057,74 @@ export type FastTurboDiffusionOutput = { */ has_nsfw_concepts: Array; }; +export type FfmpegApiComposeInput = { + /** + * List of tracks to be combined into the final media + */ + tracks: Array; +}; +export type FfmpegApiComposeOutput = { + /** + * URL of the processed video file + */ + video_url: string | Blob | File; + /** + * URL of the video's thumbnail image + */ + thumbnail_url: string | Blob | File; +}; +export type FfmpegApiMetadataInput = { + /** + * URL of the media file (video or audio) to analyze + */ + media_url: string | Blob | File; + /** + * Whether to extract the start and end frames for videos. Note that when true the request will be slower. + */ + extract_frames?: boolean; +}; +export type FfmpegApiMetadataOutput = { + /** + * Metadata for the analyzed media file (either Video or Audio) + */ + media: Video | Audio; +}; +export type FfmpegApiWaveformInput = { + /** + * URL of the audio file to analyze + */ + media_url: string | Blob | File; + /** + * Controls how many points are sampled per second of audio. Lower values (e.g. 1-2) create a coarser waveform, higher values (e.g. 4-10) create a more detailed one. Default value: `4` + */ + points_per_second?: number; + /** + * Number of decimal places for the waveform values. Higher values provide more precision but increase payload size. Default value: `2` + */ + precision?: number; + /** + * Size of the smoothing window. Higher values create a smoother waveform. Must be an odd number. Default value: `3` + */ + smoothing_window?: number; +}; +export type FfmpegApiWaveformOutput = { + /** + * Normalized waveform data as an array of values between -1 and 1. The number of points is determined by audio duration × points_per_second. + */ + waveform: Array; + /** + * Duration of the audio in seconds + */ + duration: number; + /** + * Number of points in the waveform data + */ + points: number; + /** + * Number of decimal places used in the waveform values + */ + precision: number; +}; export type Florence2LargeCaptionInput = { /** * The URL of the image to be processed. @@ -7540,7 +7882,7 @@ export type FluxLoraPortraitTrainerOutput = { */ config_file: File; }; -export type FluxProCannyControlInput = { +export type FluxProCannyControlFinetunedInput = { /** * The prompt to generate an image from. */ @@ -7567,7 +7909,7 @@ export type FluxProCannyControlInput = { seed?: number; /** * The CFG (Classifier Free Guidance) scale is a measure of how close you want - * the model to stick to your prompt when looking for a related image to show you. Default value: `3.5` + * the model to stick to your prompt when looking for a related image to show you. Default value: `30` */ guidance_scale?: number; /** @@ -7592,8 +7934,18 @@ export type FluxProCannyControlInput = { * The control image URL to generate the Canny edge map from. */ control_image_url: string | Blob | File; + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; }; -export type FluxProDepthControlInput = { +export type FluxProCannyControlInput = { /** * The prompt to generate an image from. */ @@ -7642,20 +7994,40 @@ export type FluxProDepthControlInput = { */ output_format?: "jpeg" | "png"; /** - * The control image URL to generate the depth map from. + * The control image URL to generate the Canny edge map from. */ control_image_url: string | Blob | File; }; -export type FluxProFillInput = { +export type FluxProDepthControlFinetunedInput = { /** - * The prompt to fill the masked part of the image. + * The prompt to generate an image from. */ prompt: string; + /** + * The size of the generated image. Default value: `landscape_4_3` + */ + image_size?: + | ImageSize + | "square_hd" + | "square" + | "portrait_4_3" + | "portrait_16_9" + | "landscape_4_3" + | "landscape_16_9"; + /** + * The number of inference steps to perform. Default value: `28` + */ + num_inference_steps?: number; /** * The same seed and the same prompt given to the same version of the model * will output the same image every time. */ seed?: number; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. Default value: `15` + */ + guidance_scale?: number; /** * If set to true, the function will wait for the image to be generated and uploaded * before returning the response. This will increase the latency of the function but @@ -7675,15 +8047,21 @@ export type FluxProFillInput = { */ output_format?: "jpeg" | "png"; /** - * The image URL to generate an image from. Needs to match the dimensions of the mask. + * The control image URL to generate the depth map from. */ - image_url: string | Blob | File; + control_image_url: string | Blob | File; /** - * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + * References your specific model */ - mask_url: string | Blob | File; + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; }; -export type FluxProNewInput = { +export type FluxProDepthControlInput = { /** * The prompt to generate an image from. */ @@ -7731,16 +8109,153 @@ export type FluxProNewInput = { * The format of the generated image. Default value: `"jpeg"` */ output_format?: "jpeg" | "png"; -}; -export type FluxProNewOutput = { /** - * The generated image files info. + * The control image URL to generate the depth map from. */ - images: Array; + control_image_url: string | Blob | File; +}; +export type FluxProFillFinetunedInput = { /** - * + * The prompt to fill the masked part of the image. */ - timings: any; + prompt: string; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string | Blob | File; + /** + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string | Blob | File; + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; +}; +export type FluxProFillInput = { + /** + * The prompt to fill the masked part of the image. + */ + prompt: string; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string | Blob | File; + /** + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string | Blob | File; +}; +export type FluxProNewInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The size of the generated image. Default value: `landscape_4_3` + */ + image_size?: + | ImageSize + | "square_hd" + | "square" + | "portrait_4_3" + | "portrait_16_9" + | "landscape_4_3" + | "landscape_16_9"; + /** + * The number of inference steps to perform. Default value: `28` + */ + num_inference_steps?: number; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. Default value: `3.5` + */ + guidance_scale?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; +}; +export type FluxProNewOutput = { + /** + * The generated image files info. + */ + images: Array; + /** + * + */ + timings: any; /** * Seed of the generated Image. It will be the same value of the one passed in the * input or the randomly generated that was used in case none was passed. @@ -7848,6 +8363,65 @@ export type FluxProPlusTextToImageInput = { */ output_format?: "jpeg" | "png"; }; +export type FluxProTextToImageFinetunedInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The size of the generated image. Default value: `landscape_4_3` + */ + image_size?: + | ImageSize + | "square_hd" + | "square" + | "portrait_4_3" + | "portrait_16_9" + | "landscape_4_3" + | "landscape_16_9"; + /** + * The number of inference steps to perform. Default value: `28` + */ + num_inference_steps?: number; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. Default value: `3.5` + */ + guidance_scale?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; +}; export type FluxProTextToImageInput = { /** * The prompt to generate an image from. @@ -7897,6 +8471,105 @@ export type FluxProTextToImageInput = { */ output_format?: "jpeg" | "png"; }; +export type FluxProTrainerInput = { + /** + * URL to the training data + */ + data_url: string | Blob | File; + /** + * Determines the finetuning approach based on your concept Default value: `"character"` + */ + mode?: "character" | "product" | "style" | "general"; + /** + * Descriptive note to identify your fine-tune since names are UUIDs. Will be displayed in finetune_details. + */ + finetune_comment: string; + /** + * Defines training duration Default value: `300` + */ + iterations?: number; + /** + * Learning rate for training. Lower values may be needed for certain scenarios. Default is 1e-5 for full and 1e-4 for LoRA. + */ + learning_rate?: number; + /** + * The speed priority will improve training and inference speed Default value: `"quality"` + */ + priority?: "speed" | "quality"; + /** + * Enables/disables automatic image captioning Default value: `true` + */ + captioning?: boolean; + /** + * Unique word/phrase that will be used in the captions, to reference the newly introduced concepts Default value: `"TOK"` + */ + trigger_word?: string; + /** + * Choose between 32 and 16. A lora_rank of 16 can increase training efficiency and decrease loading times. Default value: `32` + */ + lora_rank?: number; + /** + * Choose between 'full' for a full finetuning + post hoc extraction of the trained weights into a LoRA or 'lora' for a raw LoRA training Default value: `"full"` + */ + finetune_type?: "full" | "lora"; +}; +export type FluxProTrainerOutput = { + /** + * References your specific model + */ + finetune_id: string; +}; +export type FluxProUltraTextToImageFinetunedInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * If set to true, the safety checker will be enabled. Default value: `true` + */ + enable_safety_checker?: boolean; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The aspect ratio of the generated image. Default value: `"16:9"` + */ + aspect_ratio?: "21:9" | "16:9" | "4:3" | "1:1" | "3:4" | "9:16" | "9:21"; + /** + * Generate less processed, more natural-looking images. + */ + raw?: boolean; + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; +}; export type FluxProUltraTextToImageInput = { /** * The prompt to generate an image from. @@ -8081,7 +8754,7 @@ export type FluxProV11ReduxOutput = { */ prompt: string; }; -export type FluxProV11UltraInput = { +export type FluxProV11UltraFinetunedInput = { /** * The prompt to generate an image from. */ @@ -8121,8 +8794,82 @@ export type FluxProV11UltraInput = { * Generate less processed, more natural-looking images. */ raw?: boolean; -}; -export type FluxProV11UltraOutput = { + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; +}; +export type FluxProV11UltraFinetunedOutput = { + /** + * The generated image files info. + */ + images: Array; + /** + * + */ + timings: any; + /** + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + */ + seed: number; + /** + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array; + /** + * The prompt used for generating the image. + */ + prompt: string; +}; +export type FluxProV11UltraInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * If set to true, the safety checker will be enabled. Default value: `true` + */ + enable_safety_checker?: boolean; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The aspect ratio of the generated image. Default value: `"16:9"` + */ + aspect_ratio?: "21:9" | "16:9" | "4:3" | "1:1" | "3:4" | "9:16" | "9:21"; + /** + * Generate less processed, more natural-looking images. + */ + raw?: boolean; +}; +export type FluxProV11UltraOutput = { /** * The generated image files info. */ @@ -8147,14 +8894,268 @@ export type FluxProV11UltraOutput = { }; export type FluxProV11UltraReduxInput = { /** - * The prompt to generate an image from. Default value: `""` + * The prompt to generate an image from. Default value: `""` + */ + prompt?: string; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * If set to true, the safety checker will be enabled. Default value: `true` + */ + enable_safety_checker?: boolean; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The aspect ratio of the generated image. Default value: `"16:9"` + */ + aspect_ratio?: "21:9" | "16:9" | "4:3" | "1:1" | "3:4" | "9:16" | "9:21"; + /** + * Generate less processed, more natural-looking images. + */ + raw?: boolean; + /** + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string | Blob | File; + /** + * The strength of the image prompt, between 0 and 1. Default value: `0.1` + */ + image_prompt_strength?: number; +}; +export type FluxProV11UltraReduxOutput = { + /** + * The generated image files info. + */ + images: Array; + /** + * + */ + timings: any; + /** + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + */ + seed: number; + /** + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array; + /** + * The prompt used for generating the image. + */ + prompt: string; +}; +export type FluxProV1CannyFinetunedInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The size of the generated image. Default value: `landscape_4_3` + */ + image_size?: + | ImageSize + | "square_hd" + | "square" + | "portrait_4_3" + | "portrait_16_9" + | "landscape_4_3" + | "landscape_16_9"; + /** + * The number of inference steps to perform. Default value: `28` + */ + num_inference_steps?: number; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. Default value: `30` + */ + guidance_scale?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The control image URL to generate the Canny edge map from. + */ + control_image_url: string | Blob | File; + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; +}; +export type FluxProV1CannyFinetunedOutput = { + /** + * The generated image files info. + */ + images: Array; + /** + * + */ + timings: any; + /** + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + */ + seed: number; + /** + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array; + /** + * The prompt used for generating the image. + */ + prompt: string; +}; +export type FluxProV1CannyInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The size of the generated image. Default value: `landscape_4_3` + */ + image_size?: + | ImageSize + | "square_hd" + | "square" + | "portrait_4_3" + | "portrait_16_9" + | "landscape_4_3" + | "landscape_16_9"; + /** + * The number of inference steps to perform. Default value: `28` + */ + num_inference_steps?: number; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + */ + seed?: number; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. Default value: `3.5` + */ + guidance_scale?: number; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; + /** + * The number of images to generate. Default value: `1` + */ + num_images?: number; + /** + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` + */ + safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6"; + /** + * The format of the generated image. Default value: `"jpeg"` + */ + output_format?: "jpeg" | "png"; + /** + * The control image URL to generate the Canny edge map from. + */ + control_image_url: string | Blob | File; +}; +export type FluxProV1CannyOutput = { + /** + * The generated image files info. + */ + images: Array; + /** + * + */ + timings: any; + /** + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + */ + seed: number; + /** + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array; + /** + * The prompt used for generating the image. + */ + prompt: string; +}; +export type FluxProV1DepthFinetunedInput = { + /** + * The prompt to generate an image from. + */ + prompt: string; + /** + * The size of the generated image. Default value: `landscape_4_3` + */ + image_size?: + | ImageSize + | "square_hd" + | "square" + | "portrait_4_3" + | "portrait_16_9" + | "landscape_4_3" + | "landscape_16_9"; + /** + * The number of inference steps to perform. Default value: `28` */ - prompt?: string; + num_inference_steps?: number; /** * The same seed and the same prompt given to the same version of the model * will output the same image every time. */ seed?: number; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. Default value: `15` + */ + guidance_scale?: number; /** * If set to true, the function will wait for the image to be generated and uploaded * before returning the response. This will increase the latency of the function but @@ -8165,10 +9166,6 @@ export type FluxProV11UltraReduxInput = { * The number of images to generate. Default value: `1` */ num_images?: number; - /** - * If set to true, the safety checker will be enabled. Default value: `true` - */ - enable_safety_checker?: boolean; /** * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. Default value: `"2"` */ @@ -8178,23 +9175,21 @@ export type FluxProV11UltraReduxInput = { */ output_format?: "jpeg" | "png"; /** - * The aspect ratio of the generated image. Default value: `"16:9"` - */ - aspect_ratio?: "21:9" | "16:9" | "4:3" | "1:1" | "3:4" | "9:16" | "9:21"; - /** - * Generate less processed, more natural-looking images. + * The control image URL to generate the depth map from. */ - raw?: boolean; + control_image_url: string | Blob | File; /** - * The image URL to generate an image from. Needs to match the dimensions of the mask. + * References your specific model */ - image_url: string | Blob | File; + finetune_id: string; /** - * The strength of the image prompt, between 0 and 1. Default value: `0.1` + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt */ - image_prompt_strength?: number; + finetune_strength: number; }; -export type FluxProV11UltraReduxOutput = { +export type FluxProV1DepthFinetunedOutput = { /** * The generated image files info. */ @@ -8217,7 +9212,7 @@ export type FluxProV11UltraReduxOutput = { */ prompt: string; }; -export type FluxProV1CannyInput = { +export type FluxProV1DepthInput = { /** * The prompt to generate an image from. */ @@ -8266,11 +9261,11 @@ export type FluxProV1CannyInput = { */ output_format?: "jpeg" | "png"; /** - * The control image URL to generate the Canny edge map from. + * The control image URL to generate the depth map from. */ control_image_url: string | Blob | File; }; -export type FluxProV1CannyOutput = { +export type FluxProV1DepthOutput = { /** * The generated image files info. */ @@ -8293,36 +9288,16 @@ export type FluxProV1CannyOutput = { */ prompt: string; }; -export type FluxProV1DepthInput = { +export type FluxProV1FillFinetunedInput = { /** - * The prompt to generate an image from. + * The prompt to fill the masked part of the image. */ prompt: string; - /** - * The size of the generated image. Default value: `landscape_4_3` - */ - image_size?: - | ImageSize - | "square_hd" - | "square" - | "portrait_4_3" - | "portrait_16_9" - | "landscape_4_3" - | "landscape_16_9"; - /** - * The number of inference steps to perform. Default value: `28` - */ - num_inference_steps?: number; /** * The same seed and the same prompt given to the same version of the model * will output the same image every time. */ seed?: number; - /** - * The CFG (Classifier Free Guidance) scale is a measure of how close you want - * the model to stick to your prompt when looking for a related image to show you. Default value: `3.5` - */ - guidance_scale?: number; /** * If set to true, the function will wait for the image to be generated and uploaded * before returning the response. This will increase the latency of the function but @@ -8342,11 +9317,25 @@ export type FluxProV1DepthInput = { */ output_format?: "jpeg" | "png"; /** - * The control image URL to generate the depth map from. + * The image URL to generate an image from. Needs to match the dimensions of the mask. */ - control_image_url: string | Blob | File; + image_url: string | Blob | File; + /** + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string | Blob | File; + /** + * References your specific model + */ + finetune_id: string; + /** + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + */ + finetune_strength: number; }; -export type FluxProV1DepthOutput = { +export type FluxProV1FillFinetunedOutput = { /** * The generated image files info. */ @@ -10906,6 +11895,60 @@ export type GuidanceInput = { */ scale?: number; }; +export type HaiperVideoV25FastInput = { + /** + * + */ + prompt: string; + /** + * The duration of the generated video in seconds Default value: `"4"` + */ + duration?: "4" | "6"; + /** + * Whether to use the model's prompt enhancer Default value: `true` + */ + prompt_enhancer?: boolean; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + */ + seed?: number | null; +}; +export type HaiperVideoV25FastOutput = { + /** + * The generated video + */ + video: File; +}; +export type HaiperVideoV25ImageToVideoFastInput = { + /** + * + */ + prompt: string; + /** + * The duration of the generated video in seconds Default value: `"4"` + */ + duration?: "4" | "6"; + /** + * Whether to use the model's prompt enhancer Default value: `true` + */ + prompt_enhancer?: boolean; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + */ + seed?: number | null; + /** + * URL of the image to use as the first frame + */ + image_url: string | Blob | File; +}; +export type HaiperVideoV25ImageToVideoFastOutput = { + /** + * The generated video + */ + video: File; +}; export type HaiperVideoV2ImageToVideoInput = { /** * @@ -10923,7 +11966,7 @@ export type HaiperVideoV2ImageToVideoInput = { * The same seed and the same prompt given to the same version of the model * will output the same video every time. */ - seed?: number; + seed?: number | null; /** * URL of the image to use as the first frame */ @@ -10952,7 +11995,7 @@ export type HaiperVideoV2Input = { * The same seed and the same prompt given to the same version of the model * will output the same video every time. */ - seed?: number; + seed?: number | null; }; export type HaiperVideoV2Output = { /** @@ -10985,10 +12028,40 @@ export type HunyuanVideoInput = { * The prompt to generate the video from. */ prompt: string; + /** + * The number of inference steps to run. Lower gets faster results, higher gets better results. Default value: `30` + */ + num_inference_steps?: number; /** * The seed to use for generating the video. */ - seed?: number; + seed?: number | null; + /** + * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. + */ + pro_mode?: boolean; + /** + * The aspect ratio of the video to generate. Default value: `"16:9"` + */ + aspect_ratio?: "16:9" | "9:16"; + /** + * The resolution of the video to generate. Default value: `"720p"` + */ + resolution?: "480p" | "580p" | "720p"; + /** + * The number of frames to generate. Default value: `"129"` + */ + num_frames?: "129" | "85"; +}; +export type HunyuanVideoLoraInput = { + /** + * The prompt to generate the video from. + */ + prompt: string; + /** + * The seed to use for generating the video. + */ + seed?: number | null; /** * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. */ @@ -11006,9 +12079,62 @@ export type HunyuanVideoInput = { */ num_frames?: "129" | "85"; /** - * The URL to the LoRA model weights. Default value: `""` + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. Default value: `` */ - lora_url?: string | Blob | File; + loras?: Array; + /** + * If set to true, the safety checker will be enabled. Default value: `true` + */ + enable_safety_checker?: boolean; +}; +export type HunyuanVideoLoraOutput = { + /** + * + */ + video: File; + /** + * The seed used for generating the video. + */ + seed: number; +}; +export type HunyuanVideoLoraTrainingInput = { + /** + * URL to zip archive with images. Try to use at least 4 images in general the more the better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to. + */ + images_data_url: string | Blob | File; + /** + * Number of steps to train the LoRA on. + */ + steps: number; + /** + * The trigger word to use. Default value: `""` + */ + trigger_word?: string; + /** + * Learning rate to use for training. Default value: `0.0001` + */ + learning_rate?: number; + /** + * Whether to generate captions for the images. Default value: `true` + */ + do_caption?: boolean; + /** + * The format of the archive. If not specified, the format will be inferred from the URL. + */ + data_archive_format?: string | null; +}; +export type HunyuanVideoLoraTrainingOutput = { + /** + * URL to the trained diffusers lora weights. + */ + diffusers_lora_file: File; + /** + * URL to the lora configuration file. + */ + config_file: File; }; export type HunyuanVideoOutput = { /** @@ -11036,7 +12162,7 @@ export type Hyper3dRodinInput = { /** * Seed value for randomization, ranging from 0 to 65535. Optional. */ - seed?: number; + seed?: number | null; /** * Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb. Default value: `"glb"` */ @@ -11064,11 +12190,11 @@ export type Hyper3dRodinInput = { /** * An array that specifies the dimensions and scaling factor of the bounding box. Typically, this array contains 3 elements, Length(X-axis), Width(Y-axis) and Height(Z-axis). */ - bbox_condition?: Array; + bbox_condition?: Array | null; /** * Generation add-on features. Default is []. Possible values are HighPack. The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost triple the billable units. */ - addons?: "HighPack"; + addons?: string | null; }; export type Hyper3dRodinOutput = { /** @@ -12498,6 +13624,15 @@ export type ImageToImageInput = { * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. Default value: `[object Object]` */ sigmas?: SigmasInput; + /** + * The type of prediction to use for the image generation. + * The `epsilon` is the default. Default value: `"epsilon"` + */ + prediction_type?: "v_prediction" | "epsilon"; + /** + * Whether to set the rescale_betas_snr_zero option or not for the sampler + */ + rescale_betas_snr_zero?: boolean; /** * The format of the generated image. Default value: `"png"` */ @@ -14057,6 +15192,15 @@ export type InpaintInput = { * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. Default value: `[object Object]` */ sigmas?: SigmasInput; + /** + * The type of prediction to use for the image generation. + * The `epsilon` is the default. Default value: `"epsilon"` + */ + prediction_type?: "v_prediction" | "epsilon"; + /** + * Whether to set the rescale_betas_snr_zero option or not for the sampler + */ + rescale_betas_snr_zero?: boolean; /** * The format of the generated image. Default value: `"png"` */ @@ -14179,38 +15323,9 @@ export type InpaintTurboInput = { }; export type Input = { /** - * URL of the image to remove background from - */ - image_url: string | Blob | File; - /** - * Model to use for background removal. - * The 'General Use (Light)' model is the original model used in the BiRefNet repository. - * The 'General Use (Heavy)' model is a slower but more accurate model. - * The 'Portrait' model is a model trained specifically for portrait images. - * The 'General Use (Light)' model is recommended for most use cases. - * - * The corresponding models are as follows: - * - 'General Use (Light)': BiRefNet-DIS_ep580.pth - * - 'General Use (Heavy)': BiRefNet-massive-epoch_240.pth - * - 'Portrait': BiRefNet-portrait-TR_P3M_10k-epoch_120.pth Default value: `"General Use (Light)"` - */ - model?: "General Use (Light)" | "General Use (Heavy)" | "Portrait"; - /** - * The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. Default value: `"1024x1024"` - */ - operating_resolution?: "1024x1024" | "2048x2048"; - /** - * The format of the output image Default value: `"png"` - */ - output_format?: "webp" | "png"; - /** - * Whether to output the mask used to remove the background - */ - output_mask?: boolean; - /** - * Whether to refine the foreground using the estimated mask Default value: `true` + * List of tracks to be combined into the final media */ - refine_foreground?: boolean; + tracks: Array; }; export type InsertTextInput = { /** @@ -14377,6 +15492,22 @@ export type IpAdapterFaceIdOutput = { */ seed: number; }; +export type KlingV15KolorsVirtualTryOnInput = { + /** + * Url for the human image. + */ + human_image_url: string | Blob | File; + /** + * Url to the garment image. + */ + garment_image_url: string | Blob | File; +}; +export type KlingV15KolorsVirtualTryOnOutput = { + /** + * The output image. + */ + image: Image; +}; export type KlingVideoV15ProImageToVideoInput = { /** * @@ -16020,7 +17151,16 @@ export type LoraImageToImageInput = { * Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter. * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. Default value: `[object Object]` */ - sigmas?: SigmasInput; + sigmas?: SigmasInput; + /** + * The type of prediction to use for the image generation. + * The `epsilon` is the default. Default value: `"epsilon"` + */ + prediction_type?: "v_prediction" | "epsilon"; + /** + * Whether to set the rescale_betas_snr_zero option or not for the sampler + */ + rescale_betas_snr_zero?: boolean; /** * The format of the generated image. Default value: `"png"` */ @@ -16221,6 +17361,15 @@ export type LoraInpaintInput = { * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. Default value: `[object Object]` */ sigmas?: SigmasInput; + /** + * The type of prediction to use for the image generation. + * The `epsilon` is the default. Default value: `"epsilon"` + */ + prediction_type?: "v_prediction" | "epsilon"; + /** + * Whether to set the rescale_betas_snr_zero option or not for the sampler + */ + rescale_betas_snr_zero?: boolean; /** * The format of the generated image. Default value: `"png"` */ @@ -16421,6 +17570,15 @@ export type LoraInput = { * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. Default value: `[object Object]` */ sigmas?: SigmasInput; + /** + * The type of prediction to use for the image generation. + * The `epsilon` is the default. Default value: `"epsilon"` + */ + prediction_type?: "v_prediction" | "epsilon"; + /** + * Whether to set the rescale_betas_snr_zero option or not for the sampler + */ + rescale_betas_snr_zero?: boolean; /** * The format of the generated image. Default value: `"png"` */ @@ -16664,6 +17822,22 @@ export type MaskInput = { */ image_url: string | Blob | File; }; +export type MetadataInput = { + /** + * URL of the media file (video or audio) to analyze + */ + media_url: string | Blob | File; + /** + * Whether to extract the start and end frames for videos. Note that when true the request will be slower. + */ + extract_frames?: boolean; +}; +export type MetadataOutput = { + /** + * Metadata for the analyzed media file (either Video or Audio) + */ + media: Video | Audio; +}; export type MiDaSInput = { /** * URL of the image to process @@ -16828,6 +18002,26 @@ export type MinimaxVideo01Output = { */ video: File; }; +export type MinimaxVideo01SubjectReferenceInput = { + /** + * + */ + prompt: string; + /** + * URL of the subject reference image to use for consistent subject appearance + */ + subject_reference_image_url: string | Blob | File; + /** + * Whether to use the model's prompt optimizer Default value: `true` + */ + prompt_optimizer?: boolean; +}; +export type MinimaxVideo01SubjectReferenceOutput = { + /** + * The generated video + */ + video: File; +}; export type MLSDInput = { /** * URL of the image to process @@ -16992,6 +18186,30 @@ export type MoondreamBatchedOutput = { */ filenames?: Array; }; +export type MoondreamNextBatchInput = { + /** + * List of image URLs to be processed (maximum 32 images) + */ + images_data_url: string | Blob | File; + /** + * Single prompt to apply to all images + */ + prompt: string; + /** + * Maximum number of tokens to generate Default value: `64` + */ + max_tokens?: number; +}; +export type MoondreamNextBatchOutput = { + /** + * URL to the generated captions JSON file containing filename-caption pairs. + */ + captions_file: File; + /** + * List of generated captions + */ + outputs: Array; +}; export type MoondreamNextDetectionInput = { /** * Image URL to be processed @@ -17297,9 +18515,13 @@ export type OmniZeroOutput = { }; export type Output = { /** - * The generated video with the lip sync. + * URL of the processed video file */ - video: File; + video_url: string | Blob | File; + /** + * URL of the video's thumbnail image + */ + thumbnail_url: string | Blob | File; }; export type PhotomakerInput = { /** @@ -18027,6 +19249,12 @@ export type QueryInput = { */ max_tokens?: number; }; +export type Ray2T2VOutput = { + /** + * The generated video + */ + video: File; +}; export type RealisticVisionImageToImageInput = { /** * The Realistic Vision model to use. @@ -18374,6 +19602,42 @@ export type Recraft20bOutput = { */ images: Array; }; +export type RecraftClarityUpscaleInput = { + /** + * The URL of the image to be upscaled. Must be in PNG format. + */ + image_url: string | Blob | File; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; +}; +export type RecraftClarityUpscaleOutput = { + /** + * The upscaled image. + */ + image: Image; +}; +export type RecraftCreativeUpscaleInput = { + /** + * The URL of the image to be upscaled. Must be in PNG format. + */ + image_url: string | Blob | File; + /** + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean; +}; +export type RecraftCreativeUpscaleOutput = { + /** + * The upscaled image. + */ + image: Image; +}; export type RecraftV3CreateStyleInput = { /** * URL to zip archive with images, use PNG format. Maximum 5 images are allowed. @@ -18891,6 +20155,94 @@ export type RGBAToRGBImageInput = { */ transparent_color: Color; }; +export type Sa2va4bImageInput = { + /** + * Prompt to be used for the chat completion + */ + prompt: string; + /** + * Url for the Input image. + */ + image_url: string | Blob | File; +}; +export type Sa2va4bImageOutput = { + /** + * Generated output + */ + output: string; + /** + * Dictionary of label: mask image + */ + masks: Array; +}; +export type Sa2va4bVideoInput = { + /** + * Prompt to be used for the chat completion + */ + prompt: string; + /** + * The URL of the input video. + */ + video_url: string | Blob | File; + /** + * Number of frames to sample from the video. If not provided, all frames are sampled. + */ + num_frames_to_sample?: number; +}; +export type Sa2va4bVideoOutput = { + /** + * Generated output + */ + output: string; + /** + * Dictionary of label: mask image + */ + masks: Array; +}; +export type Sa2va8bImageInput = { + /** + * Prompt to be used for the chat completion + */ + prompt: string; + /** + * Url for the Input image. + */ + image_url: string | Blob | File; +}; +export type Sa2va8bImageOutput = { + /** + * Generated output + */ + output: string; + /** + * Dictionary of label: mask image + */ + masks: Array; +}; +export type Sa2va8bVideoInput = { + /** + * Prompt to be used for the chat completion + */ + prompt: string; + /** + * The URL of the input video. + */ + video_url: string | Blob | File; + /** + * Number of frames to sample from the video. If not provided, all frames are sampled. + */ + num_frames_to_sample?: number; +}; +export type Sa2va8bVideoOutput = { + /** + * Generated output + */ + output: string; + /** + * Dictionary of label: mask image + */ + masks: Array; +}; export type SadtalkerInput = { /** * URL of the source image @@ -20795,6 +22147,12 @@ export type StyleReferenceOutput = { */ style_id: string; }; +export type SubjectReferenceOutput = { + /** + * The generated video + */ + video: File; +}; export type Switti512Input = { /** * The prompt to generate an image from. @@ -20971,9 +22329,9 @@ export type SwittiOutput = { }; export type SyncLipsyncInput = { /** - * The model to use for lipsyncing Default value: `"lipsync-1.8.0"` + * The model to use for lipsyncing Default value: `"lipsync-1.9.0-beta"` */ - model?: "lipsync-1.8.0" | "lipsync-1.7.1"; + model?: "lipsync-1.8.0" | "lipsync-1.7.1" | "lipsync-1.9.0-beta"; /** * URL of the input video */ @@ -21560,6 +22918,15 @@ export type TextToImageInput = { * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. Default value: `[object Object]` */ sigmas?: SigmasInput; + /** + * The type of prediction to use for the image generation. + * The `epsilon` is the default. Default value: `"epsilon"` + */ + prediction_type?: "v_prediction" | "epsilon"; + /** + * Whether to set the rescale_betas_snr_zero option or not for the sampler + */ + rescale_betas_snr_zero?: boolean; /** * The format of the generated image. Default value: `"png"` */ @@ -21922,6 +23289,53 @@ export type TransparentImageToMaskOutput = { */ image: Image; }; +export type TranspixarInput = { + /** + * The prompt to generate the video from. + */ + prompt: string; + /** + * The negative prompt to generate video from Default value: `""` + */ + negative_prompt?: string; + /** + * The number of inference steps to perform. Default value: `24` + */ + num_inference_steps?: number; + /** + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + */ + seed?: number | null; + /** + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related video to show you. Default value: `7` + */ + guidance_scale?: number; + /** + * The target FPS of the video Default value: `8` + */ + export_fps?: number; +}; +export type TranspixarOutput = { + /** + * The URL to the generated video + */ + videos: Array; + /** + * + */ + timings: any; + /** + * Seed of the generated video. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + */ + seed: number; + /** + * The prompt used for generating the video. + */ + prompt: string; +}; export type TrellisInput = { /** * URL of the input image to convert to 3D @@ -22120,6 +23534,20 @@ export type V3TTSOutput = { */ audio: AudioFile; }; +export type VideoInput = { + /** + * Prompt to be used for the chat completion + */ + prompt: string; + /** + * The URL of the input video. + */ + video_url: string | Blob | File; + /** + * Number of frames to sample from the video. If not provided, all frames are sampled. + */ + num_frames_to_sample?: number; +}; export type VideoOutput = { /** * The generated video @@ -22195,7 +23623,7 @@ export type VideoUpscalerOutput = { }; export type VisionInput = { /** - * Name of the model to use. Premium models are charged at 3x the rate of standard models, they include: anthropic/claude-3.5-sonnet, anthropic/claude-3-5-haiku, google/gemini-pro-1.5, openai/gpt-4o. Default value: `"google/gemini-flash-1.5"` + * Name of the model to use. Premium models are charged at 3x the rate of standard models, they include: openai/gpt-4o, anthropic/claude-3.5-sonnet, meta-llama/llama-3.2-90b-vision-instruct, google/gemini-pro-1.5, anthropic/claude-3-5-haiku. Default value: `"google/gemini-flash-1.5"` */ model?: | "anthropic/claude-3.5-sonnet" @@ -22203,7 +23631,8 @@ export type VisionInput = { | "google/gemini-pro-1.5" | "google/gemini-flash-1.5" | "google/gemini-flash-1.5-8b" - | "openai/gpt-4o"; + | "openai/gpt-4o" + | "meta-llama/llama-3.2-90b-vision-instruct"; /** * Prompt to be used for the image */ @@ -22269,6 +23698,42 @@ export type VTONOutput = { */ has_nsfw_concepts: boolean; }; +export type WaveformInput = { + /** + * URL of the audio file to analyze + */ + media_url: string | Blob | File; + /** + * Controls how many points are sampled per second of audio. Lower values (e.g. 1-2) create a coarser waveform, higher values (e.g. 4-10) create a more detailed one. Default value: `4` + */ + points_per_second?: number; + /** + * Number of decimal places for the waveform values. Higher values provide more precision but increase payload size. Default value: `2` + */ + precision?: number; + /** + * Size of the smoothing window. Higher values create a smoother waveform. Must be an odd number. Default value: `3` + */ + smoothing_window?: number; +}; +export type WaveformOutput = { + /** + * Normalized waveform data as an array of values between -1 and 1. The number of points is determined by audio duration × points_per_second. + */ + waveform: Array; + /** + * Duration of the audio in seconds + */ + duration: number; + /** + * Number of points in the waveform data + */ + points: number; + /** + * Number of decimal places used in the waveform values + */ + precision: number; +}; export type WhisperInput = { /** * URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm. @@ -22711,10 +24176,18 @@ export type EndpointTypeMap = { input: FluxProV11UltraInput; output: FluxProV11UltraOutput; }; + "fal-ai/flux-pro/v1.1-ultra-finetuned": { + input: FluxProV11UltraFinetunedInput; + output: FluxProV11UltraFinetunedOutput; + }; "fal-ai/ideogram/v2": { input: IdeogramV2Input; output: IdeogramV2Output; }; + "fal-ai/hunyuan-video-lora-training": { + input: HunyuanVideoLoraTrainingInput; + output: HunyuanVideoLoraTrainingOutput; + }; "fal-ai/flux-lora-fast-training": { input: FluxLoraFastTrainingInput; output: FluxLoraFastTrainingOutput; @@ -22723,6 +24196,10 @@ export type EndpointTypeMap = { input: FluxLoraPortraitTrainerInput; output: FluxLoraPortraitTrainerOutput; }; + "fal-ai/flux-pro-trainer": { + input: FluxProTrainerInput; + output: FluxProTrainerOutput; + }; "fal-ai/recraft-v3": { input: RecraftV3Input; output: RecraftV3Output; @@ -22735,10 +24212,18 @@ export type EndpointTypeMap = { input: MinimaxVideo01LiveImageToVideoInput; output: MinimaxVideo01LiveImageToVideoOutput; }; - "fal-ai/haiper-video-v2": { + "fal-ai/minimax/video-01-subject-reference": { + input: MinimaxVideo01SubjectReferenceInput; + output: MinimaxVideo01SubjectReferenceOutput; + }; + "fal-ai/haiper-video/v2": { input: HaiperVideoV2Input; output: HaiperVideoV2Output; }; + "fal-ai/haiper-video/v2.5/fast": { + input: HaiperVideoV25FastInput; + output: HaiperVideoV25FastOutput; + }; "fal-ai/hyper3d/rodin": { input: Hyper3dRodinInput; output: Hyper3dRodinOutput; @@ -22795,14 +24280,26 @@ export type EndpointTypeMap = { input: FluxProV1FillInput; output: FluxProV1FillOutput; }; + "fal-ai/flux-pro/v1/fill-finetuned": { + input: FluxProV1FillFinetunedInput; + output: FluxProV1FillFinetunedOutput; + }; "fal-ai/flux-pro/v1/canny": { input: FluxProV1CannyInput; output: FluxProV1CannyOutput; }; + "fal-ai/flux-pro/v1/canny-finetuned": { + input: FluxProV1CannyFinetunedInput; + output: FluxProV1CannyFinetunedOutput; + }; "fal-ai/flux-pro/v1/depth": { input: FluxProV1DepthInput; output: FluxProV1DepthOutput; }; + "fal-ai/flux-pro/v1/depth-finetuned": { + input: FluxProV1DepthFinetunedInput; + output: FluxProV1DepthFinetunedOutput; + }; "fal-ai/flux-lora-canny": { input: FluxLoraCannyInput; output: FluxLoraCannyOutput; @@ -22979,10 +24476,14 @@ export type EndpointTypeMap = { input: MinimaxVideo01Input; output: MinimaxVideo01Output; }; - "fal-ai/haiper-video-v2/image-to-video": { + "fal-ai/haiper-video/v2/image-to-video": { input: HaiperVideoV2ImageToVideoInput; output: HaiperVideoV2ImageToVideoOutput; }; + "fal-ai/haiper-video/v2.5/image-to-video/fast": { + input: HaiperVideoV25ImageToVideoFastInput; + output: HaiperVideoV25ImageToVideoFastOutput; + }; "fal-ai/mochi-v1": { input: MochiV1Input; output: MochiV1Output; @@ -22991,6 +24492,10 @@ export type EndpointTypeMap = { input: HunyuanVideoInput; output: HunyuanVideoOutput; }; + "fal-ai/hunyuan-video-lora": { + input: HunyuanVideoLoraInput; + output: HunyuanVideoLoraOutput; + }; "fal-ai/video-upscaler": { input: VideoUpscalerInput; output: VideoUpscalerOutput; @@ -23023,6 +24528,10 @@ export type EndpointTypeMap = { input: LumaPhotonFlashInput; output: LumaPhotonFlashOutput; }; + "fal-ai/kling/v1-5/kolors-virtual-try-on": { + input: KlingV15KolorsVirtualTryOnInput; + output: KlingV15KolorsVirtualTryOnOutput; + }; "fal-ai/kling-video/v1/standard/text-to-video": { input: KlingVideoV1StandardTextToVideoInput; output: KlingVideoV1StandardTextToVideoOutput; @@ -23059,6 +24568,10 @@ export type EndpointTypeMap = { input: KlingVideoV16ProImageToVideoInput; output: KlingVideoV16ProImageToVideoOutput; }; + "fal-ai/transpixar": { + input: TranspixarInput; + output: TranspixarOutput; + }; "fal-ai/cogvideox-5b": { input: Cogvideox5bInput; output: Cogvideox5bOutput; @@ -23107,6 +24620,18 @@ export type EndpointTypeMap = { input: CreativeUpscalerInput; output: CreativeUpscalerOutput; }; + "fal-ai/ffmpeg-api/compose": { + input: FfmpegApiComposeInput; + output: FfmpegApiComposeOutput; + }; + "fal-ai/ffmpeg-api/metadata": { + input: FfmpegApiMetadataInput; + output: FfmpegApiMetadataOutput; + }; + "fal-ai/ffmpeg-api/waveform": { + input: FfmpegApiWaveformInput; + output: FfmpegApiWaveformOutput; + }; "fal-ai/clarity-upscaler": { input: ClarityUpscalerInput; output: ClarityUpscalerOutput; @@ -23539,6 +25064,22 @@ export type EndpointTypeMap = { input: ImageutilsSamInput; output: ImageutilsSamOutput; }; + "fal-ai/sa2va/8b/image": { + input: Sa2va8bImageInput; + output: Sa2va8bImageOutput; + }; + "fal-ai/sa2va/8b/video": { + input: Sa2va8bVideoInput; + output: Sa2va8bVideoOutput; + }; + "fal-ai/sa2va/4b/image": { + input: Sa2va4bImageInput; + output: Sa2va4bImageOutput; + }; + "fal-ai/sa2va/4b/video": { + input: Sa2va4bVideoInput; + output: Sa2va4bVideoOutput; + }; "fal-ai/mini-cpm": { input: MiniCpmInput; output: MiniCpmOutput; @@ -23627,4 +25168,16 @@ export type EndpointTypeMap = { input: MoondreamNextDetectionInput; output: MoondreamNextDetectionOutput; }; + "fal-ai/moondream-next/batch": { + input: MoondreamNextBatchInput; + output: MoondreamNextBatchOutput; + }; + "fal-ai/recraft-clarity-upscale": { + input: RecraftClarityUpscaleInput; + output: RecraftClarityUpscaleOutput; + }; + "fal-ai/recraft-creative-upscale": { + input: RecraftCreativeUpscaleInput; + output: RecraftCreativeUpscaleOutput; + }; };