Skip to content

Commit 4b5ebe5

Browse files
committed
feat(ollama): add ollama types and meta
1 parent a641bad commit 4b5ebe5

File tree

66 files changed

+5372
-53
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+5372
-53
lines changed
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import type { ChatRequest } from 'ollama'
2+
3+
interface ModelMeta<TProviderOptions = unknown> {
4+
name: string
5+
providerOptions?: TProviderOptions
6+
supports?: {
7+
input?: Array<'text' | 'image' | 'video'>
8+
output?: Array<'text' | 'image' | 'video'>
9+
capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'>
10+
}
11+
size?: string
12+
context?: number
13+
}
14+
15+
const ATHENE_V2_LATEST = {
16+
name: 'athene-v2:latest',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: ['tools'],
21+
},
22+
size: '47gb',
23+
context: 32_000,
24+
} as const satisfies ModelMeta<any>
25+
26+
const ATHENE_V2_72b = {
27+
name: 'athene-v2:72b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: ['tools'],
32+
},
33+
size: '47gb',
34+
context: 32_000,
35+
} as const satisfies ModelMeta<any>
36+
37+
export const ATHENE_MODELS = [
38+
ATHENE_V2_LATEST.name,
39+
ATHENE_V2_72b.name,
40+
] as const
41+
42+
const ATHENE_IMAGE_MODELS = [] as const
43+
44+
export const ATHENE_EMBEDDING_MODELS = [] as const
45+
46+
const ATHENE_AUDIO_MODELS = [] as const
47+
48+
const ATHENE_VIDEO_MODELS = [] as const
49+
50+
// export type AtheneChatModels = (typeof ATHENE_MODELS)[number]
51+
52+
// Manual type map for per-model provider options
53+
export type AtheneChatModelProviderOptionsByName = {
54+
// Models with thinking and structured output support
55+
[ATHENE_V2_LATEST.name]: ChatRequest
56+
[ATHENE_V2_72b.name]: ChatRequest
57+
}
58+
59+
export type AtheneModelInputModalitiesByName = {
60+
// Models with text, image, audio, video (no document)
61+
[ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input
62+
[ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input
63+
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
import type { ChatRequest } from 'ollama'
2+
3+
interface ModelMeta<TProviderOptions = unknown> {
4+
name: string
5+
providerOptions?: TProviderOptions
6+
supports?: {
7+
input?: Array<'text' | 'image' | 'video'>
8+
output?: Array<'text' | 'image' | 'video'>
9+
capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'>
10+
}
11+
size?: string
12+
context?: number
13+
}
14+
15+
const AYA_LATEST = {
16+
name: 'aya:latest',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: [],
21+
},
22+
size: '4.8gb',
23+
context: 8_000,
24+
} as const satisfies ModelMeta<any>
25+
26+
const AYA_8b = {
27+
name: 'aya:8b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: [],
32+
},
33+
size: '4.8gb',
34+
context: 8_000,
35+
} as const satisfies ModelMeta<any>
36+
37+
const AYA_35b = {
38+
name: 'aya:35b',
39+
supports: {
40+
input: ['text'],
41+
output: ['text'],
42+
capabilities: [],
43+
},
44+
size: '20gb',
45+
context: 8_000,
46+
} as const satisfies ModelMeta<any>
47+
48+
export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const
49+
50+
const AYA_IMAGE_MODELS = [] as const
51+
52+
export const AYA_EMBEDDING_MODELS = [] as const
53+
54+
const AYA_AUDIO_MODELS = [] as const
55+
56+
const AYA_VIDEO_MODELS = [] as const
57+
58+
// export type AyaChatModels = (typeof AYA_MODELS)[number]
59+
60+
// Manual type map for per-model provider options
61+
export type AyaChatModelProviderOptionsByName = {
62+
// Models with thinking and structured output support
63+
[AYA_LATEST.name]: ChatRequest
64+
[AYA_8b.name]: ChatRequest
65+
[AYA_35b.name]: ChatRequest
66+
}
67+
68+
export type AyaModelInputModalitiesByName = {
69+
// Models with text, image, audio, video (no document)
70+
[AYA_LATEST.name]: typeof AYA_LATEST.supports.input
71+
[AYA_8b.name]: typeof AYA_8b.supports.input
72+
[AYA_35b.name]: typeof AYA_35b.supports.input
73+
}
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
import type { ChatRequest } from 'ollama'
2+
3+
interface ModelMeta<TProviderOptions = unknown> {
4+
name: string
5+
providerOptions?: TProviderOptions
6+
supports?: {
7+
input?: Array<'text' | 'image' | 'video'>
8+
output?: Array<'text' | 'image' | 'video'>
9+
capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'>
10+
}
11+
size?: string
12+
context?: number
13+
}
14+
15+
const CODEGEMMA_LATEST = {
16+
name: 'codegemma:latest',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: [],
21+
},
22+
size: '5gb',
23+
context: 8_000,
24+
} as const satisfies ModelMeta<any>
25+
26+
const CODEGEMMA_8b = {
27+
name: 'codegemma:2b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: [],
32+
},
33+
size: '1.65gb',
34+
context: 8_000,
35+
} as const satisfies ModelMeta<any>
36+
37+
const CODEGEMMA_35b = {
38+
name: 'codegemma:7b',
39+
supports: {
40+
input: ['text'],
41+
output: ['text'],
42+
capabilities: [],
43+
},
44+
size: '5gb',
45+
context: 8_000,
46+
} as const satisfies ModelMeta<any>
47+
48+
export const CODEGEMMA_MODELS = [
49+
CODEGEMMA_LATEST.name,
50+
CODEGEMMA_8b.name,
51+
CODEGEMMA_35b.name,
52+
] as const
53+
54+
const CODEGEMMA_IMAGE_MODELS = [] as const
55+
56+
export const CODEGEMMA_EMBEDDING_MODELS = [] as const
57+
58+
const CODEGEMMA_AUDIO_MODELS = [] as const
59+
60+
const CODEGEMMA_VIDEO_MODELS = [] as const
61+
62+
// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number]
63+
64+
// Manual type map for per-model provider options
65+
export type CodegemmaChatModelProviderOptionsByName = {
66+
// Models with thinking and structured output support
67+
[CODEGEMMA_LATEST.name]: ChatRequest
68+
[CODEGEMMA_8b.name]: ChatRequest
69+
[CODEGEMMA_35b.name]: ChatRequest
70+
}
71+
72+
export type CodegemmaModelInputModalitiesByName = {
73+
// Models with text, image, audio, video (no document)
74+
[CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input
75+
[CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input
76+
[CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input
77+
}
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
import type { ChatRequest } from 'ollama'
2+
3+
interface ModelMeta<TProviderOptions = unknown> {
4+
name: string
5+
providerOptions?: TProviderOptions
6+
supports?: {
7+
input?: Array<'text' | 'image' | 'video'>
8+
output?: Array<'text' | 'image' | 'video'>
9+
capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'>
10+
}
11+
size?: string
12+
context?: number
13+
}
14+
15+
const CODELLAMA_LATEST = {
16+
name: 'codellama:latest',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: [],
21+
},
22+
size: '3.8gb',
23+
context: 16_000,
24+
} as const satisfies ModelMeta<any>
25+
26+
const CODELLAMA_7b = {
27+
name: 'codellama:7b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: [],
32+
},
33+
size: '3.8gb',
34+
context: 16_000,
35+
} as const satisfies ModelMeta<any>
36+
37+
const CODELLAMA_13b = {
38+
name: 'codellama:13b',
39+
supports: {
40+
input: ['text'],
41+
output: ['text'],
42+
capabilities: [],
43+
},
44+
size: '7.4gb',
45+
context: 16_000,
46+
} as const satisfies ModelMeta<any>
47+
48+
const CODELLAMA_34b = {
49+
name: 'codellama:34b',
50+
supports: {
51+
input: ['text'],
52+
output: ['text'],
53+
capabilities: [],
54+
},
55+
size: '19gb',
56+
context: 16_000,
57+
} as const satisfies ModelMeta<any>
58+
59+
const CODELLAMA_70b = {
60+
name: 'codellama:70b',
61+
supports: {
62+
input: ['text'],
63+
output: ['text'],
64+
capabilities: [],
65+
},
66+
size: '39gb',
67+
context: 2_000,
68+
} as const satisfies ModelMeta<any>
69+
70+
export const CODELLAMA_MODELS = [
71+
CODELLAMA_LATEST.name,
72+
CODELLAMA_7b.name,
73+
CODELLAMA_13b.name,
74+
CODELLAMA_34b.name,
75+
CODELLAMA_70b.name,
76+
] as const
77+
78+
const CODELLAMA_IMAGE_MODELS = [] as const
79+
80+
export const CODELLAMA_EMBEDDING_MODELS = [] as const
81+
82+
const CODELLAMA_AUDIO_MODELS = [] as const
83+
84+
const CODELLAMA_VIDEO_MODELS = [] as const
85+
86+
// export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number]
87+
88+
// Manual type map for per-model provider options
89+
export type CodellamaChatModelProviderOptionsByName = {
90+
// Models with thinking and structured output support
91+
[CODELLAMA_LATEST.name]: ChatRequest
92+
[CODELLAMA_7b.name]: ChatRequest
93+
[CODELLAMA_13b.name]: ChatRequest
94+
[CODELLAMA_34b.name]: ChatRequest
95+
[CODELLAMA_70b.name]: ChatRequest
96+
}
97+
98+
export type CodellamaModelInputModalitiesByName = {
99+
// Models with text, image, audio, video (no document)
100+
[CODELLAMA_LATEST.name]: typeof CODELLAMA_LATEST.supports.input
101+
[CODELLAMA_7b.name]: typeof CODELLAMA_7b.supports.input
102+
[CODELLAMA_13b.name]: typeof CODELLAMA_13b.supports.input
103+
[CODELLAMA_34b.name]: typeof CODELLAMA_34b.supports.input
104+
[CODELLAMA_70b.name]: typeof CODELLAMA_70b.supports.input
105+
}
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import type { ChatRequest } from 'ollama'
2+
3+
interface ModelMeta<TProviderOptions = unknown> {
4+
name: string
5+
providerOptions?: TProviderOptions
6+
supports?: {
7+
input?: Array<'text' | 'image' | 'video'>
8+
output?: Array<'text' | 'image' | 'video'>
9+
capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'>
10+
}
11+
size?: string
12+
context?: number
13+
}
14+
15+
const COMMAND_R_PLUS_LATEST = {
16+
name: 'command-r-plus:latest',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: ['tools'],
21+
},
22+
size: '59gb',
23+
context: 128_000,
24+
} as const satisfies ModelMeta<any>
25+
26+
const COMMAND_R_PLUS_104b = {
27+
name: 'command-r-plus:104b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: ['tools'],
32+
},
33+
size: '59gb',
34+
context: 128_000,
35+
} as const satisfies ModelMeta<any>
36+
37+
export const COMMAND_R_PLUS_MODELS = [
38+
COMMAND_R_PLUS_LATEST.name,
39+
COMMAND_R_PLUS_104b.name,
40+
] as const
41+
42+
const COMMAND_R_PLUS_IMAGE_MODELS = [] as const
43+
44+
export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const
45+
46+
const COMMAND_R_PLUS_AUDIO_MODELS = [] as const
47+
48+
const COMMAND_R_PLUS_VIDEO_MODELS = [] as const
49+
50+
// export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number]
51+
52+
// Manual type map for per-model provider options
53+
export type CommandRPlusChatModelProviderOptionsByName = {
54+
// Models with thinking and structured output support
55+
[COMMAND_R_PLUS_LATEST.name]: ChatRequest
56+
[COMMAND_R_PLUS_104b.name]: ChatRequest
57+
}
58+
59+
export type CommandRPlusModelInputModalitiesByName = {
60+
// Models with text, image, audio, video (no document)
61+
[COMMAND_R_PLUS_LATEST.name]: typeof COMMAND_R_PLUS_LATEST.supports.input
62+
[COMMAND_R_PLUS_104b.name]: typeof COMMAND_R_PLUS_104b.supports.input
63+
}

0 commit comments

Comments
 (0)