Skip to main content

Overview

Core AI uses a comprehensive type system to ensure type safety across all operations. This page documents the essential types for working with messages, models, configurations, and results.

Message Types

Message

Union type representing all message types in a conversation.
type Message =
    | SystemMessage
    | UserMessage
    | AssistantMessage
    | ToolResultMessage;

SystemMessage

System instructions that guide the model’s behavior.
type SystemMessage = {
    role: 'system';
    content: string;
};
Example:
const systemMsg: SystemMessage = {
  role: 'system',
  content: 'You are a helpful assistant.'
};

UserMessage

Messages from the user, supporting text and multimodal content.
type UserMessage = {
    role: 'user';
    content: string | UserContentPart[];
};

type UserContentPart = TextPart | ImagePart | FilePart;
Examples:
// Simple text message
const textMsg: UserMessage = {
  role: 'user',
  content: 'Hello!'
};

// Multimodal message with text and image
const multimodalMsg: UserMessage = {
  role: 'user',
  content: [
    { type: 'text', text: 'What is in this image?' },
    {
      type: 'image',
      source: {
        type: 'url',
        url: 'https://example.com/image.jpg'
      }
    }
  ]
};

// Image from base64
const base64Msg: UserMessage = {
  role: 'user',
  content: [
    {
      type: 'image',
      source: {
        type: 'base64',
        mediaType: 'image/png',
        data: 'iVBORw0KGgoAAAANSUhEUgA...'
      }
    }
  ]
};

TextPart

type TextPart = {
    type: 'text';
    text: string;
};

ImagePart

type ImagePart = {
    type: 'image';
    source:
        | { type: 'base64'; mediaType: string; data: string }
        | { type: 'url'; url: string };
};

FilePart

type FilePart = {
    type: 'file';
    data: string;
    mimeType: string;
    filename?: string;
};

AssistantMessage

Messages from the assistant, containing text, reasoning, or tool calls.
type AssistantMessage = {
    role: 'assistant';
    parts: AssistantContentPart[];
};

type AssistantContentPart =
    | AssistantTextPart
    | ReasoningPart
    | ToolCallPart;
Example:
const assistantMsg: AssistantMessage = {
  role: 'assistant',
  parts: [
    { type: 'text', text: 'I can help with that.' },
    {
      type: 'tool-call',
      toolCall: {
        id: 'call_123',
        name: 'get_weather',
        arguments: { location: 'Paris' }
      }
    }
  ]
};

AssistantTextPart

type AssistantTextPart = {
    type: 'text';
    text: string;
};

ReasoningPart

Internal reasoning/thinking from models that support extended reasoning.
type ReasoningPart = {
    type: 'reasoning';
    text: string;
    providerMetadata?: Record<string, unknown>;
};

ToolCallPart

type ToolCallPart = {
    type: 'tool-call';
    toolCall: ToolCall;
};

ToolCall

type ToolCall = {
    id: string;
    name: string;
    arguments: Record<string, unknown>;
};

ToolResultMessage

Results from executing a tool call.
type ToolResultMessage = {
    role: 'tool';
    toolCallId: string;
    content: string;
    isError?: boolean;
};
Example:
const toolResult: ToolResultMessage = {
  role: 'tool',
  toolCallId: 'call_123',
  content: JSON.stringify({ temp: 72, condition: 'sunny' })
};

Tool Types

ToolDefinition

type ToolDefinition = {
    name: string;
    description: string;
    parameters: z.ZodType;
};

ToolSet

type ToolSet = Record<string, ToolDefinition>;

ToolChoice

type ToolChoice =
    | 'auto'
    | 'none'
    | 'required'
    | { type: 'tool'; toolName: string };
Examples:
const auto: ToolChoice = 'auto';
const required: ToolChoice = 'required';
const specific: ToolChoice = { type: 'tool', toolName: 'get_weather' };

Model Types

ChatModel

Interface for chat/completion models.
type ChatModel = {
    readonly provider: string;
    readonly modelId: string;
    generate(options: GenerateOptions): Promise<GenerateResult>;
    stream(options: GenerateOptions): Promise<StreamResult>;
    generateObject<TSchema extends z.ZodType>(
        options: GenerateObjectOptions<TSchema>
    ): Promise<GenerateObjectResult<TSchema>>;
    streamObject<TSchema extends z.ZodType>(
        options: StreamObjectOptions<TSchema>
    ): Promise<StreamObjectResult<TSchema>>;
};

EmbeddingModel

Interface for embedding models.
type EmbeddingModel = {
    readonly provider: string;
    readonly modelId: string;
    embed(options: EmbedOptions): Promise<EmbedResult>;
};

ImageModel

Interface for image generation models.
type ImageModel = {
    readonly provider: string;
    readonly modelId: string;
    generate(options: ImageGenerateOptions): Promise<ImageGenerateResult>;
};

Configuration Types

ModelConfig

Model configuration parameters.
type ModelConfig = {
    temperature?: number;       // 0-2, default varies by model
    maxTokens?: number;        // Maximum tokens to generate
    topP?: number;            // 0-1, nucleus sampling
    stopSequences?: string[]; // Stop generation at these strings
    frequencyPenalty?: number; // -2 to 2
    presencePenalty?: number; // -2 to 2
};

ReasoningConfig

Configuration for extended thinking/reasoning.
type ReasoningConfig = {
    effort: ReasoningEffort;
};

type ReasoningEffort =
    | 'minimal'
    | 'low'
    | 'medium'
    | 'high'
    | 'max';

Generation Options

GenerateOptions

type GenerateOptions = {
    messages: Message[];
    reasoning?: ReasoningConfig;
    tools?: ToolSet;
    toolChoice?: ToolChoice;
    config?: ModelConfig;
    providerOptions?: Record<string, unknown>;
    signal?: AbortSignal;
};

GenerateObjectOptions

type GenerateObjectOptions<TSchema extends z.ZodType> = {
    messages: Message[];
    schema: TSchema;
    schemaName?: string;
    schemaDescription?: string;
    reasoning?: ReasoningConfig;
    config?: ModelConfig;
    providerOptions?: Record<string, unknown>;
    signal?: AbortSignal;
};

StreamObjectOptions

type StreamObjectOptions<TSchema extends z.ZodType> =
    GenerateObjectOptions<TSchema>;

Result Types

GenerateResult

type GenerateResult = {
    parts: AssistantContentPart[];
    content: string | null;
    reasoning: string | null;
    toolCalls: ToolCall[];
    finishReason: FinishReason;
    usage: ChatUsage;
};

GenerateObjectResult

type GenerateObjectResult<TSchema extends z.ZodType> = {
    object: z.infer<TSchema>;
    finishReason: FinishReason;
    usage: ChatUsage;
};

FinishReason

type FinishReason =
    | 'stop'            // Natural completion
    | 'length'          // Hit max tokens
    | 'tool-calls'      // Stopped to call tools
    | 'content-filter'  // Content filtered
    | 'unknown';        // Unknown reason

Streaming Types

StreamResult

type StreamResult = AsyncIterable<StreamEvent> & {
    toResponse(): Promise<GenerateResult>;
};

StreamEvent

type StreamEvent =
    | { type: 'reasoning-start' }
    | { type: 'reasoning-delta'; text: string }
    | { type: 'reasoning-end' }
    | { type: 'text-delta'; text: string }
    | { type: 'tool-call-start'; toolCallId: string; toolName: string }
    | { type: 'tool-call-delta'; toolCallId: string; argumentsDelta: string }
    | { type: 'tool-call-end'; toolCall: ToolCall }
    | { type: 'finish'; finishReason: FinishReason; usage: ChatUsage };

StreamObjectResult

type StreamObjectResult<TSchema extends z.ZodType> = AsyncIterable<
    ObjectStreamEvent<TSchema>
> & {
    toResponse(): Promise<GenerateObjectResult<TSchema>>;
};

ObjectStreamEvent

type ObjectStreamEvent<TSchema extends z.ZodType> =
    | { type: 'object-delta'; text: string }
    | { type: 'object'; object: z.infer<TSchema> }
    | { type: 'finish'; finishReason: FinishReason; usage: ChatUsage };

Usage Types

ChatUsage

Token usage reported after chat completion.
type ChatUsage = {
    /** Total input tokens, including cached and cache-write tokens. */
    inputTokens: number;
    /** Total output tokens, including both visible text and reasoning. */
    outputTokens: number;
    /** Breakdown of input token categories. */
    inputTokenDetails: ChatInputTokenDetails;
    /** Breakdown of output token categories. */
    outputTokenDetails: ChatOutputTokenDetails;
};
Important Notes:
  • inputTokens is the total including cached reads and cache writes
  • outputTokens is the total including visible text and reasoning
  • For providers like Anthropic, cache tokens are normalized and included in the total

ChatInputTokenDetails

type ChatInputTokenDetails = {
    /** Input tokens served from cache. Subset of inputTokens. */
    cacheReadTokens: number;
    /** Input tokens written to cache. Subset of inputTokens. */
    cacheWriteTokens: number;
};

ChatOutputTokenDetails

type ChatOutputTokenDetails = {
    /** Tokens consumed by reasoning. Subset of outputTokens. */
    reasoningTokens?: number;
};

EmbeddingUsage

type EmbeddingUsage = {
    /** Number of tokens consumed by embedding input. */
    inputTokens: number;
};

Embedding Types

EmbedOptions

type EmbedOptions = {
    input: string | string[];
    dimensions?: number;
    providerOptions?: Record<string, unknown>;
};

EmbedResult

type EmbedResult = {
    embeddings: number[][];
    usage?: EmbeddingUsage;
};

Image Generation Types

ImageGenerateOptions

type ImageGenerateOptions = {
    prompt: string;
    n?: number;
    size?: string;
    providerOptions?: Record<string, unknown>;
};

ImageGenerateResult

type ImageGenerateResult = {
    images: GeneratedImage[];
};

GeneratedImage

type GeneratedImage = {
    base64?: string;
    url?: string;
    revisedPrompt?: string;
};

Type Usage Examples

Building Type-Safe Conversations

import type { Message } from '@coreai/core';

const conversation: Message[] = [
  {
    role: 'system',
    content: 'You are a helpful assistant.'
  },
  {
    role: 'user',
    content: 'Hello!'
  },
  {
    role: 'assistant',
    parts: [{ type: 'text', text: 'Hi! How can I help?' }]
  },
  {
    role: 'user',
    content: 'What is 2+2?'
  }
];

Type-Safe Tool Handling

import type { ToolCall, ToolResultMessage } from '@coreai/core';

function handleToolCall(toolCall: ToolCall): ToolResultMessage {
  // TypeScript ensures correct structure
  return {
    role: 'tool',
    toolCallId: toolCall.id,
    content: JSON.stringify({ result: 'success' })
  };
}

Working with Usage Data

import type { ChatUsage } from '@coreai/core';

function logUsage(usage: ChatUsage): void {
  console.log('Total input tokens:', usage.inputTokens);
  console.log('Total output tokens:', usage.outputTokens);
  console.log('Cache read tokens:', usage.inputTokenDetails.cacheReadTokens);
  console.log('Cache write tokens:', usage.inputTokenDetails.cacheWriteTokens);
  
  if (usage.outputTokenDetails.reasoningTokens) {
    console.log('Reasoning tokens:', usage.outputTokenDetails.reasoningTokens);
  }
}

Source Location

~/workspace/source/packages/core-ai/src/types.ts