-
-
Notifications
You must be signed in to change notification settings - Fork 0
Claude/fix portal rollout timeout e t hy i #532
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
64ef1e5
027b040
d68a2d3
a81c940
caa67e7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -12,9 +12,9 @@ data: | |||||
| CLEAN_CLAIMS_TOPIC: "claims.clean" | ||||||
| FLAGGED_CLAIMS_TOPIC: "claims.flagged" | ||||||
| REJECTED_CLAIMS_TOPIC: "claims.rejected" | ||||||
| COSMOS_DATABASE: "CloudHealthOffice" | ||||||
| COSMOS_RULES_CONTAINER: "ScrubRules" | ||||||
| COSMOS_AUDIT_CONTAINER: "ScrubAudit" | ||||||
| MONGODB_DATABASE: "CloudHealthOffice" | ||||||
|
||||||
| MONGODB_DATABASE: "CloudHealthOffice" | |
| MONGODB_DATABASE: "cloudhealthoffice" |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -33,10 +33,10 @@ | |||||
| "author": "Cloud Health Office Team", | ||||||
| "license": "Apache-2.0", | ||||||
| "dependencies": { | ||||||
| "@azure/cosmos": "^4.2.0", | ||||||
| "@azure/identity": "^4.13.0", | ||||||
| "@azure/keyvault-secrets": "^4.9.0", | ||||||
| "@azure/storage-blob": "^12.30.0", | ||||||
| "mongodb": "^6.12.0", | ||||||
|
||||||
| "mongodb": "^6.12.0", | |
| "@azure/cosmos": "^4.0.0", |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,12 +8,12 @@ | |
| * - Configurable validation rule engine | ||
| * - Standard and custom rule support | ||
| * - Kafka integration for claim routing | ||
| * - Cosmos DB for rule storage and audit | ||
| * - MongoDB for rule storage and audit | ||
| * - First-pass rate metrics tracking | ||
| */ | ||
|
|
||
| import { Kafka, Producer, Consumer, logLevel } from 'kafkajs'; | ||
| import { CosmosClient, Container, Database } from '@azure/cosmos'; | ||
| import { MongoClient, Db, Collection } from 'mongodb'; | ||
| import { BlobServiceClient, ContainerClient } from '@azure/storage-blob'; | ||
| import { DefaultAzureCredential } from '@azure/identity'; | ||
| import { v4 as uuidv4 } from 'uuid'; | ||
|
|
@@ -43,10 +43,10 @@ export class ClaimsScrubberService { | |
| private kafka: Kafka | null = null; | ||
| private producer: Producer | null = null; | ||
| private consumer: Consumer | null = null; | ||
| private cosmosClient: CosmosClient | null = null; | ||
| private database: Database | null = null; | ||
| private rulesContainer: Container | null = null; | ||
| private auditContainer: Container | null = null; | ||
| private mongoClient: MongoClient | null = null; | ||
| private database: Db | null = null; | ||
| private rulesCollection: Collection | null = null; | ||
| private auditCollection: Collection | null = null; | ||
| private blobServiceClient: BlobServiceClient | null = null; | ||
| private archiveContainer: ContainerClient | null = null; | ||
| private startTime: number; | ||
|
|
@@ -104,14 +104,12 @@ export class ClaimsScrubberService { | |
| }); | ||
| } | ||
|
|
||
| // Initialize Cosmos DB | ||
| this.cosmosClient = new CosmosClient({ | ||
| endpoint: this.config.cosmosDb.endpoint, | ||
| aadCredentials: credential, | ||
| }); | ||
| this.database = this.cosmosClient.database(this.config.cosmosDb.databaseName); | ||
| this.rulesContainer = this.database.container(this.config.cosmosDb.rulesContainerName); | ||
| this.auditContainer = this.database.container(this.config.cosmosDb.auditContainerName); | ||
| // Initialize MongoDB | ||
| this.mongoClient = new MongoClient(this.config.mongoDb.connectionString); | ||
| await this.mongoClient.connect(); | ||
| this.database = this.mongoClient.db(this.config.mongoDb.databaseName); | ||
| this.rulesCollection = this.database.collection(this.config.mongoDb.rulesCollectionName); | ||
| this.auditCollection = this.database.collection(this.config.mongoDb.auditCollectionName); | ||
|
|
||
| // Initialize Blob Storage | ||
| if (this.config.storage.connectionString) { | ||
|
|
@@ -127,29 +125,26 @@ export class ClaimsScrubberService { | |
| this.archiveContainer = this.blobServiceClient.getContainerClient(this.config.storage.containerName); | ||
| } | ||
|
|
||
| // Load custom rules from Cosmos DB | ||
| // Load custom rules from MongoDB | ||
| await this.loadCustomRules(); | ||
| } | ||
|
|
||
| /** | ||
| * Load custom rules from Cosmos DB | ||
| * Load custom rules from MongoDB | ||
| */ | ||
| private async loadCustomRules(): Promise<void> { | ||
| if (!this.rulesContainer) return; | ||
| if (!this.rulesCollection) return; | ||
|
|
||
| try { | ||
| const query = { | ||
| query: 'SELECT * FROM c WHERE c.type = @type AND c.enabled = true', | ||
| parameters: [{ name: '@type', value: 'custom' }], | ||
| }; | ||
|
|
||
| const { resources } = await this.rulesContainer.items.query<CustomRule>(query).fetchAll(); | ||
| const resources = await this.rulesCollection | ||
| .find<CustomRule>({ type: 'custom', enabled: true }) | ||
| .toArray(); | ||
|
|
||
| for (const rule of resources) { | ||
| this.ruleEngine.addCustomRule(rule); | ||
| } | ||
|
|
||
| console.log(`Loaded ${resources.length} custom rules from Cosmos DB`); | ||
| console.log(`Loaded ${resources.length} custom rules from MongoDB`); | ||
| } catch (error) { | ||
| console.error('Failed to load custom rules:', error); | ||
| } | ||
|
|
@@ -376,18 +371,17 @@ export class ClaimsScrubberService { | |
| } | ||
|
|
||
| /** | ||
| * Audit the validation to Cosmos DB | ||
| * Audit the validation to MongoDB | ||
| */ | ||
| private async auditValidation( | ||
| claim: X12_837_Claim, | ||
| result: ClaimValidationResult, | ||
| correlationId: string | ||
| ): Promise<void> { | ||
| if (!this.auditContainer) return; | ||
| if (!this.auditCollection) return; | ||
|
|
||
| try { | ||
| const auditRecord = { | ||
| id: uuidv4(), | ||
| claimId: claim.claimId, | ||
| claimType: claim.claimType, | ||
| patientControlNumber: claim.claimHeader.patientControlNumber, | ||
|
|
@@ -405,11 +399,11 @@ export class ClaimsScrubberService { | |
| .map(r => r.editCode), | ||
| validationTimeMs: result.totalValidationTimeMs, | ||
| correlationId, | ||
| timestamp: new Date().toISOString(), | ||
| ttl: 90 * 24 * 60 * 60, // 90 days TTL | ||
| timestamp: new Date(), | ||
| expireAt: new Date(Date.now() + 90 * 24 * 60 * 60 * 1000), // 90 days TTL | ||
| }; | ||
|
|
||
| await this.auditContainer.items.create(auditRecord); | ||
| await this.auditCollection.insertOne(auditRecord); | ||
|
Comment on lines
+402
to
+406
|
||
| } catch (error) { | ||
| console.error('Failed to audit claim', { claimId: claim.claimId }, error); | ||
| } | ||
|
|
@@ -419,9 +413,9 @@ export class ClaimsScrubberService { | |
| * Add a custom validation rule | ||
| */ | ||
| async addCustomRule(rule: CustomRule): Promise<void> { | ||
| // Save to Cosmos DB | ||
| if (this.rulesContainer) { | ||
| await this.rulesContainer.items.create(rule); | ||
| // Save to MongoDB | ||
| if (this.rulesCollection) { | ||
| await this.rulesCollection.insertOne(rule); | ||
| } | ||
|
|
||
| // Add to in-memory rule engine | ||
|
|
@@ -448,7 +442,7 @@ export class ClaimsScrubberService { | |
| async getHealth(): Promise<HealthStatus> { | ||
| const checks: HealthStatus['checks'] = { | ||
| kafka: await this.checkKafkaHealth(), | ||
| cosmosDb: await this.checkCosmosDbHealth(), | ||
| mongoDb: await this.checkMongoDbHealth(), | ||
| storage: await this.checkStorageHealth(), | ||
| ruleEngine: this.checkRuleEngineHealth(), | ||
| }; | ||
|
|
@@ -515,19 +509,19 @@ export class ClaimsScrubberService { | |
| } | ||
|
|
||
| /** | ||
| * Check Cosmos DB health | ||
| * Check MongoDB health | ||
| */ | ||
| private async checkCosmosDbHealth(): Promise<ComponentHealth> { | ||
| private async checkMongoDbHealth(): Promise<ComponentHealth> { | ||
| const start = Date.now(); | ||
| try { | ||
| if (!this.database) { | ||
| return { | ||
| status: 'unhealthy', | ||
| lastCheck: new Date().toISOString(), | ||
| error: 'Cosmos DB client not initialized', | ||
| error: 'MongoDB client not initialized', | ||
| }; | ||
| } | ||
| await this.database.read(); | ||
| await this.database.command({ ping: 1 }); | ||
| return { | ||
| status: 'healthy', | ||
| latencyMs: Date.now() - start, | ||
|
|
@@ -629,6 +623,7 @@ export class ClaimsScrubberService { | |
| async close(): Promise<void> { | ||
| if (this.consumer) await this.consumer.disconnect(); | ||
| if (this.producer) await this.producer.disconnect(); | ||
| if (this.mongoClient) await this.mongoClient.close(); | ||
| } | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -49,11 +49,11 @@ function getConfig(): ClaimsScrubberConfig { | |||||
| containerName: process.env.CLAIMS_CONTAINER || 'claims-archive', | ||||||
| archivePathPattern: '{claimType}/{status}/{yyyy}/{MM}/{dd}', | ||||||
| }, | ||||||
| cosmosDb: { | ||||||
| endpoint: process.env.COSMOS_ENDPOINT || '', | ||||||
| databaseName: process.env.COSMOS_DATABASE || 'claims-scrubbing', | ||||||
| rulesContainerName: process.env.COSMOS_RULES_CONTAINER || 'validation-rules', | ||||||
| auditContainerName: process.env.COSMOS_AUDIT_CONTAINER || 'validation-audit', | ||||||
| mongoDb: { | ||||||
| connectionString: process.env.MONGODB_CONNECTION_STRING || '', | ||||||
| databaseName: process.env.MONGODB_DATABASE || 'CloudHealthOffice', | ||||||
|
||||||
| databaseName: process.env.MONGODB_DATABASE || 'CloudHealthOffice', | |
| databaseName: process.env.MONGODB_DATABASE || 'cloudhealthoffice', |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -5,8 +5,7 @@ metadata: | |||||
| namespace: cloudhealthoffice | ||||||
| data: | ||||||
| ASPNETCORE_ENVIRONMENT: "Production" | ||||||
| CosmosDb__DatabaseName: "CloudHealthOffice" | ||||||
| CosmosDb__ContainerName: "Encounters" | ||||||
| MongoDb__DatabaseName: "CloudHealthOffice" | ||||||
|
||||||
| MongoDb__DatabaseName: "CloudHealthOffice" | |
| MongoDb__DatabaseName: "cloudhealthoffice" |
Copilot
AI
Mar 21, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This reduces the Deployment replica count from 3 to 1 (and HPA minReplicas to 1). If this manifest is used for production, a single replica reduces availability during node drain/rollouts and can cause brief downtime. If the goal is to avoid rollout timeouts due to capacity, consider keeping minReplicas >= 2 and adjusting resources/rollout strategy instead, or document that this manifest is for dev/staging.
Copilot
AI
Mar 21, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Lowering HPA minReplicas to 1 reduces baseline availability and can introduce downtime during maintenance/rollouts. If this is intended as a capacity/rollout-timeout mitigation, consider adding a note about environment scope (dev/staging) or keeping minReplicas >= 2 for production HA.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -90,7 +90,7 @@ metadata: | |
| name: reference-data-service | ||
| namespace: cloudhealthoffice | ||
| spec: | ||
| replicas: 2 | ||
| replicas: 1 | ||
| selector: | ||
|
Comment on lines
92
to
94
|
||
| matchLabels: | ||
| app: reference-data-service | ||
|
|
@@ -155,7 +155,7 @@ spec: | |
| apiVersion: apps/v1 | ||
| kind: Deployment | ||
| name: reference-data-service | ||
| minReplicas: 2 | ||
| minReplicas: 1 | ||
| maxReplicas: 10 | ||
| metrics: | ||
| - type: Resource | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Reducing portal HPA
minReplicasfrom 2 to 1 removes redundancy; a single replica means any rollout/node disruption causes full portal downtime. If this change is to mitigate rollout timeouts due to cluster capacity, consider keeping minReplicas >= 2 for production and addressing the scheduling issue via resources/cluster sizing/rollout strategy, or document that this manifest is intended for smaller non-prod clusters.