Skip to content

Commit 078734b

Browse files
committed
fetch all models
1 parent 6b7aed6 commit 078734b

File tree

1 file changed

+56
-19
lines changed

1 file changed

+56
-19
lines changed

src/routes/api/models/+server.ts

Lines changed: 56 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,14 @@ interface ApiQueryParams {
5757
pipeline_tag?: "text-generation" | "image-text-to-text";
5858
filter: string;
5959
inference_provider: string;
60-
limit: number;
60+
limit?: number;
61+
skip?: number;
6162
expand: string[];
6263
}
6364

6465
const queryParams: ApiQueryParams = {
6566
filter: "conversational",
6667
inference_provider: "all",
67-
limit: 100,
6868
expand: ["inferenceProviderMapping", "config", "library_name", "pipeline_tag", "tags", "mask_token", "trendingScore"],
6969
};
7070

@@ -75,7 +75,7 @@ function buildApiUrl(params: ApiQueryParams): string {
7575

7676
// Add simple params
7777
Object.entries(params).forEach(([key, value]) => {
78-
if (!Array.isArray(value)) {
78+
if (!Array.isArray(value) && value !== undefined) {
7979
url.searchParams.append(key, String(value));
8080
}
8181
});
@@ -88,6 +88,44 @@ function buildApiUrl(params: ApiQueryParams): string {
8888
return url.toString();
8989
}
9090

91+
async function fetchAllModelsWithPagination(
92+
pipeline_tag: "text-generation" | "image-text-to-text",
93+
fetch: typeof globalThis.fetch,
94+
): Promise<Model[]> {
95+
const allModels: Model[] = [];
96+
let skip = 0;
97+
const batchSize = 1000;
98+
99+
while (true) {
100+
const url = buildApiUrl({
101+
...queryParams,
102+
pipeline_tag,
103+
limit: batchSize,
104+
skip,
105+
});
106+
107+
const response = await fetch(url, requestInit);
108+
109+
if (!response.ok) {
110+
break;
111+
}
112+
113+
const models: Model[] = await response.json();
114+
115+
if (models.length === 0) {
116+
break; // No more models to fetch
117+
}
118+
119+
allModels.push(...models);
120+
skip += batchSize;
121+
122+
// Optional: Add a small delay to be respectful to the API
123+
await new Promise(resolve => setTimeout(resolve, 100));
124+
}
125+
126+
return allModels;
127+
}
128+
91129
export type ApiModelsResponse = {
92130
models: Model[];
93131
};
@@ -134,31 +172,30 @@ export const GET: RequestHandler = async ({ fetch }) => {
134172
let imgText2TextModels: Model[] = [];
135173

136174
// Make the needed API calls in parallel
137-
const apiPromises: Promise<Response | void>[] = [];
175+
const apiPromises: Promise<void>[] = [];
138176
if (needTextGenFetch) {
139-
const url = buildApiUrl({ ...queryParams, pipeline_tag: "text-generation" });
140177
apiPromises.push(
141-
fetch(url, requestInit).then(async response => {
142-
if (!response.ok) {
143-
console.error(`Error fetching text-generation models`, response.status, response.statusText);
178+
fetchAllModelsWithPagination("text-generation", fetch)
179+
.then(models => {
180+
textGenModels = models;
181+
})
182+
.catch(error => {
183+
console.error(`Error fetching text-generation models:`, error);
144184
newFailedApiCalls.textGeneration = true;
145-
} else {
146-
textGenModels = await response.json();
147-
}
148-
}),
185+
}),
149186
);
150187
}
151188

152189
if (needImgTextFetch) {
153190
apiPromises.push(
154-
fetch(buildApiUrl({ ...queryParams, pipeline_tag: "image-text-to-text" }), requestInit).then(async response => {
155-
if (!response.ok) {
156-
console.error(`Error fetching image-text-to-text models`, response.status, response.statusText);
191+
fetchAllModelsWithPagination("image-text-to-text", fetch)
192+
.then(models => {
193+
imgText2TextModels = models;
194+
})
195+
.catch(error => {
196+
console.error(`Error fetching image-text-to-text models:`, error);
157197
newFailedApiCalls.imageTextToText = true;
158-
} else {
159-
imgText2TextModels = await response.json();
160-
}
161-
}),
198+
}),
162199
);
163200
}
164201

0 commit comments

Comments
 (0)