Skip to content
This repository was archived by the owner on May 15, 2026. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions apps/cli/src/lib/utils/context-window.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ function getModelIdForProvider(config: ProviderSettings): string | undefined {
return config.ollamaModelId
case "lmstudio":
return config.lmStudioModelId
case "atomic-chat":
return config.atomicChatModelId
case "openai":
return config.openAiModelId
case "requesty":
Expand Down
1 change: 1 addition & 0 deletions packages/types/src/global-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,7 @@ export const SECRET_STATE_KEYS = [
"awsSessionToken",
"openAiApiKey",
"ollamaApiKey",
"atomicChatApiKey",
"geminiApiKey",
"openAiNativeApiKey",
"deepSeekApiKey",
Expand Down
13 changes: 12 additions & 1 deletion packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ export const isDynamicProvider = (key: string): key is DynamicProvider =>
* Local providers require localhost API calls in order to get the model list.
*/

export const localProviders = ["ollama", "lmstudio"] as const
export const localProviders = ["ollama", "lmstudio", "atomic-chat"] as const

export type LocalProvider = (typeof localProviders)[number]

Expand Down Expand Up @@ -274,6 +274,12 @@ const lmStudioSchema = baseProviderSettingsSchema.extend({
lmStudioSpeculativeDecodingEnabled: z.boolean().optional(),
})

const atomicChatSchema = baseProviderSettingsSchema.extend({
atomicChatModelId: z.string().optional(),
atomicChatBaseUrl: z.string().optional(),
atomicChatApiKey: z.string().optional(),
})

const geminiSchema = apiModelIdProviderModelSchema.extend({
geminiApiKey: z.string().optional(),
googleGeminiBaseUrl: z.string().optional(),
Expand Down Expand Up @@ -394,6 +400,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
ollamaSchema.merge(z.object({ apiProvider: z.literal("ollama") })),
vsCodeLmSchema.merge(z.object({ apiProvider: z.literal("vscode-lm") })),
lmStudioSchema.merge(z.object({ apiProvider: z.literal("lmstudio") })),
atomicChatSchema.merge(z.object({ apiProvider: z.literal("atomic-chat") })),
geminiSchema.merge(z.object({ apiProvider: z.literal("gemini") })),
geminiCliSchema.merge(z.object({ apiProvider: z.literal("gemini-cli") })),
openAiCodexSchema.merge(z.object({ apiProvider: z.literal("openai-codex") })),
Expand Down Expand Up @@ -427,6 +434,7 @@ export const providerSettingsSchema = z.object({
...ollamaSchema.shape,
...vsCodeLmSchema.shape,
...lmStudioSchema.shape,
...atomicChatSchema.shape,
...geminiSchema.shape,
...geminiCliSchema.shape,
...openAiCodexSchema.shape,
Expand Down Expand Up @@ -473,6 +481,7 @@ export const modelIdKeys = [
"ollamaModelId",
"lmStudioModelId",
"lmStudioDraftModelId",
"atomicChatModelId",
"requestyModelId",
"unboundModelId",
"litellmModelId",
Expand Down Expand Up @@ -504,6 +513,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
"openai-native": "openAiModelId",
ollama: "ollamaModelId",
lmstudio: "lmStudioModelId",
"atomic-chat": "atomicChatModelId",
gemini: "apiModelId",
"gemini-cli": "apiModelId",
mistral: "apiModelId",
Expand Down Expand Up @@ -636,4 +646,5 @@ export const MODELS_BY_PROVIDER: Record<
// Local providers; models discovered from localhost endpoints.
lmstudio: { id: "lmstudio", label: "LM Studio", models: [] },
ollama: { id: "ollama", label: "Ollama", models: [] },
"atomic-chat": { id: "atomic-chat", label: "Atomic Chat", models: [] },
}
2 changes: 2 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,8 @@ export function getProviderDefaultModelId(
return "" // Ollama uses dynamic model selection
case "lmstudio":
return "" // LMStudio uses dynamic model selection
case "atomic-chat":
return "" // Atomic Chat uses dynamic model selection
case "vscode-lm":
return vscodeLlmDefaultModelId
case "sambanova":
Expand Down
3 changes: 3 additions & 0 deletions packages/types/src/vscode-extension-host.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ export interface ExtensionMessage {
| "openAiModels"
| "ollamaModels"
| "lmStudioModels"
| "atomicChatModels"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "updatePrompt"
Expand Down Expand Up @@ -126,6 +127,7 @@ export interface ExtensionMessage {
openAiModels?: string[]
ollamaModels?: ModelRecord
lmStudioModels?: ModelRecord
atomicChatModels?: ModelRecord
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
mcpServers?: McpServer[]
commits?: GitCommit[]
Expand Down Expand Up @@ -402,6 +404,7 @@ export interface WebviewMessage {
| "requestOpenAiModels"
| "requestOllamaModels"
| "requestLmStudioModels"
| "requestAtomicChatModels"
| "requestVsCodeLmModels"
| "openImage"
| "saveImage"
Expand Down
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import {
OpenAiHandler,
OpenAiCodexHandler,
LmStudioHandler,
AtomicChatHandler,
GeminiHandler,
OpenAiNativeHandler,
DeepSeekHandler,
Expand Down Expand Up @@ -137,6 +138,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new NativeOllamaHandler(options)
case "lmstudio":
return new LmStudioHandler(options)
case "atomic-chat":
return new AtomicChatHandler(options)
case "gemini":
return new GeminiHandler(options)
case "openai-codex":
Expand Down
207 changes: 207 additions & 0 deletions src/api/providers/atomic-chat.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"

import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types"

import type { ApiHandlerOptions } from "../../shared/api"

import { NativeToolCallParser } from "../../core/assistant-message/NativeToolCallParser"
import { TagMatcher } from "../../utils/tag-matcher"

import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"

import { BaseProvider } from "./base-provider"
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
import { getModelsFromCache } from "./fetchers/modelCache"
import { getApiRequestTimeout } from "./utils/timeout-config"
import { handleOpenAIError } from "./utils/openai-error-handler"
import { DEFAULT_HEADERS } from "./constants"

/**
* Atomic Chat — local OpenAI-compatible API (default http://127.0.0.1:1337/v1).
* @see https://github.com/AtomicBot-ai/Atomic-Chat
*/
export class AtomicChatHandler extends BaseProvider implements SingleCompletionHandler {
protected options: ApiHandlerOptions
private client: OpenAI
private readonly providerName = "Atomic Chat"

constructor(options: ApiHandlerOptions) {
super()
this.options = options

const baseRoot = (this.options.atomicChatBaseUrl || "http://127.0.0.1:1337").replace(/\/+$/, "")

Check failure on line 34 in src/api/providers/atomic-chat.ts

View check run for this annotation

GitHub Advanced Security / CodeQL

Polynomial regular expression used on uncontrolled data

This [regular expression](1) that depends on [library input](2) may run slow on strings with many repetitions of '/'. This [regular expression](1) that depends on [library input](3) may run slow on strings with many repetitions of '/'. This [regular expression](1) that depends on [library input](4) may run slow on strings with many repetitions of '/'.
const apiKey = this.options.atomicChatApiKey?.trim() || "noop"

this.client = new OpenAI({
baseURL: `${baseRoot}/v1`,
apiKey,
timeout: getApiRequestTimeout(),
defaultHeaders: {
...DEFAULT_HEADERS,
},
})
}

override async *createMessage(
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[],
metadata?: ApiHandlerCreateMessageMetadata,
): ApiStream {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
]

const toContentBlocks = (
blocks: Anthropic.Messages.MessageParam[] | string,
): Anthropic.Messages.ContentBlockParam[] => {
if (typeof blocks === "string") {
return [{ type: "text", text: blocks }]
}

const result: Anthropic.Messages.ContentBlockParam[] = []
for (const msg of blocks) {
if (typeof msg.content === "string") {
result.push({ type: "text", text: msg.content })
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text") {
result.push({ type: "text", text: part.text })
}
}
}
}
return result
}

let inputTokens = 0
try {
inputTokens = await this.countTokens([{ type: "text", text: systemPrompt }, ...toContentBlocks(messages)])
} catch (err) {
console.error("[AtomicChat] Failed to count input tokens:", err)
inputTokens = 0
}

let assistantText = ""

try {
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {
model: this.getModel().id,
messages: openAiMessages,
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
stream: true,
tools: this.convertToolsForOpenAI(metadata?.tools),
tool_choice: metadata?.tool_choice,
parallel_tool_calls: metadata?.parallelToolCalls ?? true,
}

let results
try {
results = await this.client.chat.completions.create(params)
} catch (error) {
throw handleOpenAIError(error, this.providerName)
}

const matcher = new TagMatcher(
"think",
(chunk) =>
({
type: chunk.matched ? "reasoning" : "text",
text: chunk.data,
}) as const,
)

for await (const chunk of results) {
const delta = chunk.choices[0]?.delta
const finishReason = chunk.choices[0]?.finish_reason

if (delta?.content) {
assistantText += delta.content
for (const processedChunk of matcher.update(delta.content)) {
yield processedChunk
}
}

if (delta?.tool_calls) {
for (const toolCall of delta.tool_calls) {
yield {
type: "tool_call_partial",
index: toolCall.index,
id: toolCall.id,
name: toolCall.function?.name,
arguments: toolCall.function?.arguments,
}
}
}

if (finishReason) {
const endEvents = NativeToolCallParser.processFinishReason(finishReason)
for (const event of endEvents) {
yield event
}
}
}

for (const processedChunk of matcher.final()) {
yield processedChunk
}

let outputTokens = 0
try {
outputTokens = await this.countTokens([{ type: "text", text: assistantText }])
} catch (err) {
console.error("[AtomicChat] Failed to count output tokens:", err)
outputTokens = 0
}

yield {
type: "usage",
inputTokens,
outputTokens,
} as const
} catch {
throw new Error(
"Atomic Chat request failed. Ensure the app is running, the local API server is enabled, and the model is loaded with enough context for Roo Code.",
)
}
}

override getModel(): { id: string; info: ModelInfo } {
const models = getModelsFromCache("atomic-chat")
if (models && this.options.atomicChatModelId && models[this.options.atomicChatModelId]) {
return {
id: this.options.atomicChatModelId,
info: models[this.options.atomicChatModelId],
}
}
return {
id: this.options.atomicChatModelId || "",
info: openAiModelInfoSaneDefaults,
}
}

async completePrompt(prompt: string): Promise<string> {
try {
const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {
model: this.getModel().id,
messages: [{ role: "user", content: prompt }],
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
stream: false,
}

let response
try {
response = await this.client.chat.completions.create(params)
} catch (error) {
throw handleOpenAIError(error, this.providerName)
}
return response.choices[0]?.message.content || ""
} catch {
throw new Error(
"Atomic Chat request failed. Ensure the app is running and the local API server is reachable.",
)
}
}
}
39 changes: 39 additions & 0 deletions src/api/providers/fetchers/atomic-chat.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import axios from "axios"
import type { ModelInfo, ModelRecord } from "@roo-code/types"
import { openAiModelInfoSaneDefaults } from "@roo-code/types"

/**
* Fetches model IDs from Atomic Chat's OpenAI-compatible API.
* @see https://github.com/AtomicBot-ai/Atomic-Chat
*/
export async function getAtomicChatModels(baseUrl = "http://127.0.0.1:1337", apiKey?: string): Promise<ModelRecord> {
const models: ModelRecord = {}
const root = baseUrl === "" ? "http://127.0.0.1:1337" : baseUrl.replace(/\/+$/, "")

try {
if (!URL.canParse(root)) {
return models
}

const headers: Record<string, string> = {}
if (apiKey?.trim()) {
headers.Authorization = `Bearer ${apiKey.trim()}`
}

const response = await axios.get<{ data?: Array<{ id: string }> }>(`${root}/v1/models`, {
headers,
timeout: 10_000,
})

const list = response.data?.data ?? []
for (const entry of list) {
if (entry?.id) {
models[entry.id] = { ...openAiModelInfoSaneDefaults }
}
}

return models
} catch {
return models
}
}
4 changes: 4 additions & 0 deletions src/api/providers/fetchers/modelCache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import { getLiteLLMModels } from "./litellm"
import { GetModelsOptions } from "../../../shared/api"
import { getOllamaModels } from "./ollama"
import { getLMStudioModels } from "./lmstudio"
import { getAtomicChatModels } from "./atomic-chat"
import { getPoeModels } from "./poe"

const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })
Expand Down Expand Up @@ -81,6 +82,9 @@ async function fetchModelsFromProvider(options: GetModelsOptions): Promise<Model
case "lmstudio":
models = await getLMStudioModels(options.baseUrl)
break
case "atomic-chat":
models = await getAtomicChatModels(options.baseUrl, options.apiKey)
break
case "vercel-ai-gateway":
models = await getVercelAiGatewayModels()
break
Expand Down
Loading
Loading