Skip to content

Commit 80b4a87

Browse files
committed
removed redundant tests and format
1 parent 59e50a7 commit 80b4a87

File tree

2 files changed

+1
-112
lines changed

2 files changed

+1
-112
lines changed

packages/inference/src/providers/baseten.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,7 @@
1414
*
1515
* Thanks!
1616
*/
17-
import {
18-
BaseConversationalTask,
19-
} from "./providerHelper.js";
17+
import { BaseConversationalTask } from "./providerHelper.js";
2018

2119
const BASETEN_API_BASE_URL = "https://inference.baseten.co";
2220

packages/inference/test/InferenceClient.spec.ts

Lines changed: 0 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -2357,34 +2357,6 @@ describe.skip("InferenceClient", () => {
23572357
status: "live",
23582358
task: "conversational",
23592359
},
2360-
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
2361-
provider: "baseten",
2362-
hfModelId: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
2363-
providerId: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
2364-
status: "live",
2365-
task: "conversational",
2366-
},
2367-
"moonshotai/Kimi-K2-Instruct-0905": {
2368-
provider: "baseten",
2369-
hfModelId: "moonshotai/Kimi-K2-Instruct-0905",
2370-
providerId: "moonshotai/Kimi-K2-Instruct-0905",
2371-
status: "live",
2372-
task: "conversational",
2373-
},
2374-
"deepseek-ai/DeepSeek-V3.1": {
2375-
provider: "baseten",
2376-
hfModelId: "deepseek-ai/DeepSeek-V3.1",
2377-
providerId: "deepseek-ai/DeepSeek-V3.1",
2378-
status: "live",
2379-
task: "conversational",
2380-
},
2381-
"deepseek-ai/DeepSeek-V3-0324": {
2382-
provider: "baseten",
2383-
hfModelId: "deepseek-ai/DeepSeek-V3-0324",
2384-
providerId: "deepseek-ai/DeepSeek-V3-0324",
2385-
status: "live",
2386-
task: "conversational",
2387-
},
23882360
};
23892361

23902362
it("chatCompletion - Qwen3 235B Instruct", async () => {
@@ -2402,63 +2374,6 @@ describe.skip("InferenceClient", () => {
24022374
}
24032375
});
24042376

2405-
it("chatCompletion - Qwen3 Coder 480B", async () => {
2406-
const res = await client.chatCompletion({
2407-
model: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
2408-
provider: "baseten",
2409-
messages: [{ role: "user", content: "Write a simple Python function to add two numbers" }],
2410-
max_tokens: 100,
2411-
});
2412-
if (res.choices && res.choices.length > 0) {
2413-
const completion = res.choices[0].message?.content;
2414-
expect(completion).toBeDefined();
2415-
expect(typeof completion).toBe("string");
2416-
expect(completion).toMatch(/def.*add/i);
2417-
}
2418-
});
2419-
2420-
it("chatCompletion - Kimi K2 Instruct", async () => {
2421-
const res = await client.chatCompletion({
2422-
model: "moonshotai/Kimi-K2-Instruct-0905",
2423-
provider: "baseten",
2424-
messages: [{ role: "user", content: "What is the capital of Japan?" }],
2425-
temperature: 0.1,
2426-
});
2427-
if (res.choices && res.choices.length > 0) {
2428-
const completion = res.choices[0].message?.content;
2429-
expect(completion).toBeDefined();
2430-
expect(typeof completion).toBe("string");
2431-
expect(completion).toMatch(/tokyo/i);
2432-
}
2433-
});
2434-
2435-
it("chatCompletion - DeepSeek V3.1", async () => {
2436-
const res = await client.chatCompletion({
2437-
model: "deepseek-ai/DeepSeek-V3.1",
2438-
provider: "baseten",
2439-
messages: [{ role: "user", content: "Complete this sentence with words, one plus one is equal " }],
2440-
});
2441-
if (res.choices && res.choices.length > 0) {
2442-
const completion = res.choices[0].message?.content;
2443-
expect(completion).toContain("two");
2444-
}
2445-
});
2446-
2447-
it("chatCompletion - DeepSeek V3", async () => {
2448-
const res = await client.chatCompletion({
2449-
model: "deepseek-ai/DeepSeek-V3-0324",
2450-
provider: "baseten",
2451-
messages: [{ role: "user", content: "What is 2 * 3?" }],
2452-
temperature: 0.1,
2453-
});
2454-
if (res.choices && res.choices.length > 0) {
2455-
const completion = res.choices[0].message?.content;
2456-
expect(completion).toBeDefined();
2457-
expect(typeof completion).toBe("string");
2458-
expect(completion).toMatch(/(six|6)/i);
2459-
}
2460-
});
2461-
24622377
it("chatCompletion stream - Qwen3 235B", async () => {
24632378
const stream = client.chatCompletionStream({
24642379
model: "Qwen/Qwen3-235B-A22B-Instruct-2507",
@@ -2483,30 +2398,6 @@ describe.skip("InferenceClient", () => {
24832398
expect(fullResponse.length).toBeGreaterThan(0);
24842399
expect(fullResponse).toMatch(/1.*2.*3/);
24852400
});
2486-
2487-
it("chatCompletion stream - DeepSeek V3.1", async () => {
2488-
const stream = client.chatCompletionStream({
2489-
model: "deepseek-ai/DeepSeek-V3.1",
2490-
provider: "baseten",
2491-
messages: [{ role: "user", content: "Complete the equation 1 + 1 = , just the answer" }],
2492-
stream: true,
2493-
}) as AsyncGenerator<ChatCompletionStreamOutput>;
2494-
2495-
let fullResponse = "";
2496-
for await (const chunk of stream) {
2497-
if (chunk.choices && chunk.choices.length > 0) {
2498-
const content = chunk.choices[0].delta?.content;
2499-
if (content) {
2500-
fullResponse += content;
2501-
}
2502-
}
2503-
}
2504-
2505-
// Verify we got a meaningful response
2506-
expect(fullResponse).toBeTruthy();
2507-
expect(fullResponse.length).toBeGreaterThan(0);
2508-
expect(fullResponse).toMatch(/(two|2)/i);
2509-
});
25102401
},
25112402
TIMEOUT
25122403
);

0 commit comments

Comments
 (0)