Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion lib/ruby_llm/providers/ollama.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,14 @@ class Ollama < OpenAI
include Ollama::Media
include Ollama::Models

# Ollama exposes two API surfaces:
# - Native API at /api/* (different request/response format)
# - OpenAI-compatible API at /v1/* (same format as OpenAI)
# Since this provider inherits from OpenAI, we use the /v1 endpoint
# so all OpenAI logic (chat, models, schemas) works without changes.
def api_base
@config.ollama_api_base
base = @config.ollama_api_base.to_s.chomp('/')
base.end_with?('/v1') ? base : "#{base}/v1"
end

def headers
Expand Down
79 changes: 70 additions & 9 deletions spec/ruby_llm/providers/ollama_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,82 @@
require 'spec_helper'

RSpec.describe RubyLLM::Providers::Ollama do
include_context 'with configured RubyLLM'
subject(:provider) { described_class.new(config) }

let(:config) do
instance_double(
RubyLLM::Configuration,
request_timeout: 300,
max_retries: 3,
retry_interval: 0.1,
retry_interval_randomness: 0.5,
retry_backoff_factor: 2,
http_proxy: nil,
ollama_api_base: ollama_api_base,
ollama_api_key: ollama_api_key
)
end

let(:ollama_api_base) { 'http://localhost:11434' }
let(:ollama_api_key) { nil }

describe '#headers' do
it 'returns empty headers when no API key is configured' do
RubyLLM.configure { |config| config.ollama_api_key = nil }
provider = described_class.new(RubyLLM.config)
context 'when no API key is configured' do
let(:ollama_api_key) { nil }

it 'returns empty headers' do
expect(provider.headers).to eq({})
end
end

context 'when API key is configured' do
let(:ollama_api_key) { 'test-ollama-key' }

it 'returns Authorization header' do
expect(provider.headers).to eq({ 'Authorization' => 'Bearer test-ollama-key' })
end
end
end

describe '#api_base' do
context 'when base URL does not include /v1' do
let(:ollama_api_base) { 'http://localhost:11434' }

it 'appends /v1 for OpenAI-compatible endpoint' do
expect(provider.api_base).to eq('http://localhost:11434/v1')
end
end

context 'when base URL already includes /v1' do
let(:ollama_api_base) { 'http://localhost:11434/v1' }

it 'does not double-append /v1' do
expect(provider.api_base).to eq('http://localhost:11434/v1')
end
end

context 'when base URL has a trailing slash' do
let(:ollama_api_base) { 'http://localhost:11434/' }

it 'strips trailing slash and appends /v1' do
expect(provider.api_base).to eq('http://localhost:11434/v1')
end
end

context 'when base URL has /v1/ with trailing slash' do
let(:ollama_api_base) { 'http://localhost:11434/v1/' }

expect(provider.headers).to eq({})
it 'normalizes to /v1 without trailing slash' do
expect(provider.api_base).to eq('http://localhost:11434/v1')
end
end

it 'returns Authorization header when API key is configured' do
RubyLLM.configure { |config| config.ollama_api_key = 'test-ollama-key' }
provider = described_class.new(RubyLLM.config)
context 'when using a custom host and port' do
let(:ollama_api_base) { 'https://my-ollama.com:8080' }

expect(provider.headers).to eq({ 'Authorization' => 'Bearer test-ollama-key' })
it 'appends /v1 to the custom base' do
expect(provider.api_base).to eq('https://my-ollama.com:8080/v1')
end
end
end
end
Loading