Skip to content

Commit cf98b9d

Browse files
committed
update migrate script
1 parent 482bb03 commit cf98b9d

File tree

1 file changed

+15
-12
lines changed

1 file changed

+15
-12
lines changed

sitio/migrate-to-postgres.ts

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import fs from "fs";
2323

2424
const SQLITE_DB_PATH = "../sqld-prod/iku.db/dbs/default/data";
2525
const POSTGRES_URL = process.env.DATABASE_URL || "postgresql://localhost:5432/milei_test";
26+
const BATCH_SIZE = process.env.POSTGRES_BATCH_SIZE ? parseInt(process.env.POSTGRES_BATCH_SIZE) : 1000;
2627

2728
interface SqliteData {
2829
likedTweets: any[];
@@ -119,9 +120,10 @@ async function migrateToPostgres(data: SqliteData) {
119120
console.log(`\nConnecting to PostgreSQL: ${POSTGRES_URL}`);
120121

121122
const client = postgres(POSTGRES_URL, {
122-
max: 1,
123-
idle_timeout: 20,
124-
connect_timeout: 10,
123+
max: 5,
124+
idle_timeout: 60,
125+
connect_timeout: 30,
126+
prepare: false, // Disable prepared statements for better remote performance
125127
});
126128

127129
const db = drizzle(client, { schema });
@@ -153,8 +155,8 @@ async function migrateToPostgres(data: SqliteData) {
153155
totalTweetsSeen: scrap.total_tweets_seen
154156
}));
155157

156-
// Insert in batches of 1000 to avoid stack overflow
157-
const batchSize = 1000;
158+
// Insert in batches to avoid stack overflow and optimize for network
159+
const batchSize = BATCH_SIZE;
158160
for (let i = 0; i < scrapsData.length; i += batchSize) {
159161
const batch = scrapsData.slice(i, i + batchSize);
160162
await db.insert(schema.scraps).values(batch);
@@ -174,7 +176,7 @@ async function migrateToPostgres(data: SqliteData) {
174176
}));
175177

176178
// Insert in batches
177-
const batchSize = 1000;
179+
const batchSize = BATCH_SIZE;
178180
for (let i = 0; i < historicData.length; i += batchSize) {
179181
const batch = historicData.slice(i, i + batchSize);
180182
await db.insert(schema.historicLikedTweets).values(batch);
@@ -195,7 +197,7 @@ async function migrateToPostgres(data: SqliteData) {
195197
}));
196198

197199
// Insert in batches
198-
const batchSize = 1000;
200+
const batchSize = BATCH_SIZE;
199201
for (let i = 0; i < likedData.length; i += batchSize) {
200202
const batch = likedData.slice(i, i + batchSize);
201203
await db.insert(schema.likedTweets).values(batch);
@@ -219,7 +221,7 @@ async function migrateToPostgres(data: SqliteData) {
219221
}));
220222

221223
// Insert in batches
222-
const batchSize = 1000;
224+
const batchSize = BATCH_SIZE;
223225
for (let i = 0; i < retweetsData.length; i += batchSize) {
224226
const batch = retweetsData.slice(i, i + batchSize);
225227
await db.insert(schema.retweets).values(batch);
@@ -238,7 +240,7 @@ async function migrateToPostgres(data: SqliteData) {
238240
}));
239241

240242
// Insert in batches
241-
const batchSize = 1000;
243+
const batchSize = BATCH_SIZE;
242244
for (let i = 0; i < tweetsData.length; i += batchSize) {
243245
const batch = tweetsData.slice(i, i + batchSize);
244246
await db.insert(schema.tweets).values(batch);
@@ -283,9 +285,10 @@ async function verifyMigration() {
283285
console.log("\nVerifying migration...");
284286

285287
const client = postgres(POSTGRES_URL, {
286-
max: 1,
287-
idle_timeout: 20,
288-
connect_timeout: 10,
288+
max: 5,
289+
idle_timeout: 60,
290+
connect_timeout: 30,
291+
prepare: false, // Disable prepared statements for better remote performance
289292
});
290293

291294
const db = drizzle(client, { schema });

0 commit comments

Comments
 (0)