Skip to content

Commit 81bbfa1

Browse files
committed
fixes
1 parent dac4331 commit 81bbfa1

20 files changed

+513
-831
lines changed

src/commands/run.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,6 @@ export class Run extends Command {
141141

142142
const options: CrackedAgentOptions = {
143143
...config,
144-
model: this.modelManager.getCurrentModel(),
145144
options: this.parseOptions(config.options || ""),
146145
provider: config.provider as LLMProviderType,
147146
};
@@ -152,7 +151,7 @@ export class Run extends Command {
152151
}
153152

154153
console.log(
155-
`Using ${options.provider} provider and model: ${options.model}`,
154+
`Using ${options.provider} provider and model: ${this.modelManager.getCurrentModel()}`,
156155
);
157156

158157
const agent = container.resolve(CrackedAgent);

src/constants/modelScaling.ts

Lines changed: 0 additions & 64 deletions
This file was deleted.

src/jest/jestSetupFilesAfterEnv.ts

Lines changed: 38 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -1,84 +1,5 @@
11
import "reflect-metadata";
22

3-
// Mock modelScaling module
4-
jest.mock("@constants/modelScaling", () => ({
5-
__esModule: true,
6-
autoScaleAvailableModels: [
7-
{
8-
id: "qwen/qwen-2.5-coder-32b-instruct",
9-
description: "Cheap, fast, slightly better than GPT4o-mini",
10-
maxWriteTries: 2,
11-
maxGlobalTries: 5,
12-
},
13-
{
14-
id: "anthropic/claude-3.5-sonnet:beta",
15-
description: "Scaled model for retry attempts",
16-
maxWriteTries: 3,
17-
maxGlobalTries: 10,
18-
},
19-
{
20-
id: "openai/gpt-4o-2024-11-20",
21-
description: "Scaled model for retry attempts",
22-
maxWriteTries: 5,
23-
maxGlobalTries: 15,
24-
},
25-
{
26-
id: "openai/o1-mini",
27-
description: "Final model for complex cases (currently inactive)",
28-
maxWriteTries: 2,
29-
maxGlobalTries: 20,
30-
},
31-
],
32-
getModelForTryCount: (
33-
tryCount: string | null,
34-
globalTries: number,
35-
): string => {
36-
const models = [
37-
{
38-
id: "qwen/qwen-2.5-coder-32b-instruct",
39-
maxWriteTries: 2,
40-
maxGlobalTries: 5,
41-
},
42-
{
43-
id: "anthropic/claude-3.5-sonnet:beta",
44-
maxWriteTries: 3,
45-
maxGlobalTries: 10,
46-
},
47-
{
48-
id: "openai/gpt-4o-2024-11-20",
49-
maxWriteTries: 5,
50-
maxGlobalTries: 15,
51-
},
52-
{
53-
id: "openai/o1-mini",
54-
maxWriteTries: 2,
55-
maxGlobalTries: 20,
56-
},
57-
];
58-
59-
if (!tryCount) return models[0].id;
60-
61-
const tries = parseInt(tryCount, 10);
62-
63-
for (let i = 0; i < models.length; i++) {
64-
const previousTriesSum = models
65-
.slice(0, i)
66-
.reduce((sum, model) => sum + model.maxWriteTries, 0);
67-
68-
if (
69-
tries >= previousTriesSum + models[i].maxWriteTries ||
70-
globalTries >= models[i].maxGlobalTries
71-
) {
72-
continue;
73-
}
74-
75-
return models[i].id;
76-
}
77-
78-
return models[models.length - 1].id;
79-
},
80-
}));
81-
823
// Mock chalk with a default export that matches how it's used
834
jest.mock("chalk", () => ({
845
__esModule: true,
@@ -102,3 +23,41 @@ jest.mock("chalk", () => ({
10223
strikethrough: jest.fn((text) => text),
10324
},
10425
}));
26+
27+
// Global mock for ModelInfo
28+
jest.mock("@services/LLM/ModelInfo", () => {
29+
const originalModule = jest.requireActual("@services/LLM/ModelInfo");
30+
31+
return {
32+
...originalModule,
33+
ModelInfo: jest.fn().mockImplementation(() => ({
34+
initialize: jest.fn().mockResolvedValue(undefined),
35+
ensureInitialized: jest.fn().mockResolvedValue(undefined),
36+
getCurrentModel: jest.fn().mockReturnValue("gpt-4"),
37+
getCurrentModelInfo: jest.fn().mockReturnValue({
38+
id: "gpt-4",
39+
context_length: 8192,
40+
top_provider: {
41+
max_completion_tokens: 4096,
42+
},
43+
}),
44+
setCurrentModel: jest.fn().mockResolvedValue(undefined),
45+
getModelInfo: jest.fn().mockResolvedValue({
46+
id: "gpt-4",
47+
context_length: 8192,
48+
top_provider: {
49+
max_completion_tokens: 4096,
50+
},
51+
}),
52+
isModelAvailable: jest.fn().mockResolvedValue(true),
53+
getAllModels: jest.fn().mockResolvedValue(["gpt-4"]),
54+
getCurrentModelContextLength: jest.fn().mockResolvedValue(8192),
55+
getModelContextLength: jest.fn().mockResolvedValue(8192),
56+
getCurrentModelMaxCompletionTokens: jest.fn().mockResolvedValue(4096),
57+
getModelMaxCompletionTokens: jest.fn().mockResolvedValue(4096),
58+
logCurrentModelUsage: jest.fn().mockResolvedValue(undefined),
59+
})),
60+
};
61+
});
62+
63+
process.env.IS_UNIT_TEST = "true";

src/services/ConfigService.ts

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,14 @@ const configSchema = z.object({
2424
.string()
2525
.optional()
2626
.default("anthropic/claude-3.5-sonnet:beta"),
27-
autoScaleAvailableModels: z
28-
.array(
29-
z.object({
30-
id: z.string(),
31-
description: z.string(),
32-
maxWriteTries: z.number(),
33-
maxGlobalTries: z.number(),
34-
}),
35-
)
36-
.optional(),
27+
autoScaleAvailableModels: z.array(
28+
z.object({
29+
id: z.string(),
30+
description: z.string(),
31+
maxWriteTries: z.number(),
32+
maxGlobalTries: z.number(),
33+
}),
34+
),
3735
runAllTestsCmd: z.string().optional(),
3836
runOneTestCmd: z.string().optional(),
3937
runTypeCheckCmd: z.string().optional(),

src/services/CrackedAgent.ts

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,19 @@
1+
import { DEFAULT_INSTRUCTIONS } from "@constants/defaultInstructions";
12
import { FileReader } from "@services/FileManagement/FileReader";
23
import { ActionsParser } from "@services/LLM/actions/ActionsParser";
34
import { ILLMProvider } from "@services/LLM/ILLMProvider";
45
import { LLMContextCreator } from "@services/LLM/LLMContextCreator";
56
import { LLMProvider, LLMProviderType } from "@services/LLM/LLMProvider";
6-
import { ModelScaler } from "@services/LLM/ModelScaler";
77
import { DebugLogger } from "@services/logging/DebugLogger";
88
import { StreamHandler } from "@services/streaming/StreamHandler";
9-
import { HtmlEntityDecoder } from "@services/text/HTMLEntityDecoder";
109
import { autoInjectable, singleton } from "tsyringe";
11-
import { DEFAULT_INSTRUCTIONS } from "@constants/defaultInstructions";
10+
import { ModelManager } from "./LLM/ModelManager";
11+
import { PhaseManager } from "./LLM/PhaseManager";
1212

1313
export interface CrackedAgentOptions {
1414
root?: string;
1515
instructionsPath?: string;
1616
instructions?: string;
17-
model: string;
1817
provider?: LLMProviderType;
1918
stream?: boolean;
2019
debug?: boolean;
@@ -41,16 +40,18 @@ export class CrackedAgent {
4140
private debugLogger: DebugLogger,
4241
private actionsParser: ActionsParser,
4342
private streamHandler: StreamHandler,
44-
private htmlEntityDecoder: HtmlEntityDecoder,
45-
private modelScaler: ModelScaler,
43+
private phaseManager: PhaseManager,
44+
private modelManager: ModelManager,
4645
) {}
4746

4847
async execute(
4948
message: string,
5049
options: CrackedAgentOptions,
5150
): Promise<ExecutionResult> {
51+
this.phaseManager.initializePhaseConfigs();
52+
5253
const finalOptions = await this.setupExecution(options);
53-
this.currentModel = finalOptions.model;
54+
this.currentModel = this.modelManager.getCurrentModel();
5455

5556
const formattedMessage = await this.contextCreator.create(
5657
message,
@@ -108,14 +109,7 @@ export class CrackedAgent {
108109
this.clearConversationHistory();
109110
}
110111

111-
// Always explicitly set auto-scaler state with the model
112-
// If autoScaler is not specified, it defaults to false and uses the provided model
113-
this.modelScaler.setAutoScaler(
114-
finalOptions.autoScaler || false,
115-
finalOptions.model,
116-
);
117-
118-
await this.validateModel(finalOptions.model);
112+
await this.validateModel(this.modelManager.getCurrentModel());
119113
await this.setupInstructions(finalOptions);
120114

121115
return finalOptions;
@@ -286,4 +280,4 @@ export class CrackedAgent {
286280
this.llm.clearConversationContext();
287281
this.isFirstInteraction = true;
288282
}
289-
}
283+
}

src/services/LLM/LLMContextCreator.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ Run Type Check: ${runTypeCheckCmd}`;
9696
const phaseConfig = this.phaseManager.getCurrentPhaseConfig();
9797

9898
const envDetails = config.includeAllFilesOnEnvToContext
99-
? `\n${context.environmentDetails}`
99+
? context.environmentDetails
100100
: "";
101101

102102
const promptArgs: IPhasePromptArgs = {
@@ -119,6 +119,8 @@ ${context.message}
119119
- Break tasks into prioritized steps.
120120
- Use available actions sequentially.
121121
122+
${envDetails ? `\n${envDetails}` : ""}
123+
${context.projectInfo ? `\n${context.projectInfo}` : ""}
122124
123125
## Instructions
124126
${phaseConfig.generatePrompt(promptArgs)}

src/services/LLM/ModelInfo.ts

Lines changed: 27 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -47,25 +47,25 @@ export class ModelInfo {
4747
return; // Already tracking this model
4848
}
4949

50-
const modelInfo = this.modelInfoMap.get(modelId);
51-
if (modelInfo) {
52-
this.currentModel = modelId;
53-
this.currentModelInfo = modelInfo;
54-
this.debugLogger.log("ModelInfo", "Current model info", {
55-
model: modelId,
56-
contextLength: modelInfo.context_length,
57-
maxCompletionTokens: modelInfo.top_provider.max_completion_tokens,
58-
});
59-
} else {
60-
const availableModels = Array.from(this.modelInfoMap.keys());
61-
this.debugLogger.log("ModelInfo", "Model not found in available models", {
62-
modelId,
63-
availableModels,
64-
});
50+
if (!(await this.isModelAvailable(modelId))) {
6551
throw new Error(
66-
`Invalid model: ${modelId}. Available models: ${availableModels.join(", ")}`,
52+
`Invalid model: ${modelId}. Available models: ${Array.from(this.modelInfoMap.keys()).join(", ")}`,
6753
);
6854
}
55+
56+
const modelInfo = this.modelInfoMap.get(modelId)!; // Safe to use ! here since we checked availability
57+
this.currentModel = modelId;
58+
this.currentModelInfo = modelInfo;
59+
60+
if (!modelInfo) {
61+
return;
62+
}
63+
64+
this.debugLogger.log("ModelInfo", "Current model info", {
65+
model: modelId,
66+
contextLength: modelInfo.context_length,
67+
maxCompletionTokens: modelInfo.top_provider.max_completion_tokens,
68+
});
6969
}
7070

7171
getCurrentModel(): string | null {
@@ -99,7 +99,17 @@ export class ModelInfo {
9999

100100
async isModelAvailable(modelId: string): Promise<boolean> {
101101
await this.ensureInitialized();
102-
return this.modelInfoMap.has(modelId);
102+
const available = this.modelInfoMap.has(modelId);
103+
104+
if (!available) {
105+
const availableModels = Array.from(this.modelInfoMap.keys());
106+
this.debugLogger.log("ModelInfo", "Model not found in available models", {
107+
modelId,
108+
availableModels,
109+
});
110+
}
111+
112+
return available;
103113
}
104114

105115
async getModelMaxCompletionTokens(modelId: string): Promise<number> {

src/services/LLM/ModelManager.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import { getModelForTryCount } from "@constants/modelScaling";
21
import { DebugLogger } from "@services/logging/DebugLogger";
32
import { singleton } from "tsyringe";
43
import { ConfigService } from "../ConfigService";
@@ -15,9 +14,9 @@ export class ModelManager {
1514
private messageContextManager: MessageContextManager,
1615
private configService: ConfigService,
1716
) {
18-
// Initialize with discovery phase model from config or fallback to default
17+
// Initialize with discovery phase model from config
1918
const config = this.configService.getConfig();
20-
this.currentModel = config.discoveryModel || getModelForTryCount(null, 0);
19+
this.currentModel = config.discoveryModel;
2120
this.modelInfo.setCurrentModel(this.currentModel);
2221
this.debugLogger.log("Model", "Initialized model manager", {
2322
model: this.currentModel,

0 commit comments

Comments
 (0)