Skip to content

Commit bffe481

Browse files
authored
make storage weak (#31)
1 parent bcc184f commit bffe481

File tree

1 file changed

+8
-7
lines changed

1 file changed

+8
-7
lines changed

core/src/main/java/com/segment/analytics/kotlin/core/SegmentDestination.kt

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import kotlinx.serialization.json.jsonObject
1313
import kotlinx.serialization.json.jsonPrimitive
1414
import java.io.File
1515
import java.io.FileInputStream
16+
import java.lang.ref.WeakReference
1617
import java.util.concurrent.Executors
1718
import java.util.concurrent.ScheduledExecutorService
1819
import java.util.concurrent.TimeUnit
@@ -34,7 +35,7 @@ class SegmentDestination(
3435

3536
override val key: String = "Segment.io"
3637
internal val httpClient: HTTPClient = HTTPClient(apiKey)
37-
internal lateinit var storage: Storage
38+
internal lateinit var storage: WeakReference<Storage>
3839
lateinit var flushScheduler: ScheduledExecutorService
3940
internal val eventCount = AtomicInteger(0)
4041

@@ -73,7 +74,7 @@ class SegmentDestination(
7374
val stringVal = Json.encodeToString(jsonVal)
7475
analytics.log("$key running $stringVal")
7576
try {
76-
storage.write(Storage.Constants.Events, stringVal)
77+
storage.get()?.write(Storage.Constants.Events, stringVal)
7778
if (eventCount.incrementAndGet() >= flushCount) {
7879
flush()
7980
}
@@ -84,7 +85,7 @@ class SegmentDestination(
8485

8586
override fun setup(analytics: Analytics) {
8687
super.setup(analytics)
87-
storage = analytics.storage
88+
storage = WeakReference(analytics.storage)
8889

8990
// register timer for flush interval
9091
flushScheduler = Executors.newScheduledThreadPool(1)
@@ -93,7 +94,7 @@ class SegmentDestination(
9394

9495
// If we have events in queue flush them
9596
val eventFilePaths =
96-
parseFilePaths(storage.read(Storage.Constants.Events))
97+
parseFilePaths(storage.get()?.read(Storage.Constants.Events))
9798
if (eventFilePaths.isNotEmpty()) {
9899
initialDelay = 0
99100
}
@@ -127,7 +128,7 @@ class SegmentDestination(
127128
return
128129
}
129130
analytics.log("$key performing flush")
130-
val fileUrls = parseFilePaths(storage.read(Storage.Constants.Events))
131+
val fileUrls = parseFilePaths(storage.get()?.read(Storage.Constants.Events))
131132
if (fileUrls.isEmpty()) {
132133
analytics.log("No events to upload")
133134
return
@@ -152,7 +153,7 @@ class SegmentDestination(
152153
connection.close()
153154
}
154155
// Cleanup uploaded payloads
155-
storage.removeFile(fileUrl)
156+
storage.get()?.removeFile(fileUrl)
156157
analytics.log("$key uploaded $fileUrl")
157158
} catch (e: HTTPException) {
158159
analytics.log("$key exception while uploading, ${e.message}")
@@ -162,7 +163,7 @@ class SegmentDestination(
162163
message = "Payloads were rejected by server. Marked for removal.",
163164
type = LogType.ERROR
164165
)
165-
storage.removeFile(fileUrl)
166+
storage.get()?.removeFile(fileUrl)
166167
} else {
167168
analytics.log(
168169
message = "Error while uploading payloads",

0 commit comments

Comments
 (0)