Skip to content

Commit 4e22fa1

Browse files
RachelTuckerrpmoore
authored andcommitted
Adding optional logger interface to client so logs can be easily captured (#82)
* Adding optional logger interface to client so logs can be captured by programs using the sdk * Adding null loger and removing unused receiver from simple logger
1 parent 864c055 commit 4e22fa1

File tree

10 files changed

+125
-42
lines changed

10 files changed

+125
-42
lines changed

ds3/ds3Client.go

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package ds3
33
import (
44
"github.com/SpectraLogic/ds3_go_sdk/ds3/networking"
55
"net/url"
6+
"github.com/SpectraLogic/ds3_go_sdk/sdk_log"
67
)
78

89
const (
@@ -18,11 +19,15 @@ type Client struct {
1819
sendNetwork networking.Network
1920
clientPolicy *ClientPolicy
2021
connectionInfo *networking.ConnectionInfo
22+
23+
// Logger where all messages will be logged to
24+
sdk_log.Logger
2125
}
2226

2327
type ClientBuilder struct {
2428
connectionInfo *networking.ConnectionInfo
25-
clientPolicy *ClientPolicy
29+
clientPolicy *ClientPolicy
30+
logger sdk_log.Logger
2631
}
2732

2833
type ClientPolicy struct {
@@ -35,13 +40,14 @@ const DEFAULT_MAX_REDIRECTS = 5
3540

3641
func NewClientBuilder(endpoint *url.URL, creds *networking.Credentials) *ClientBuilder {
3742
return &ClientBuilder{
38-
&networking.ConnectionInfo{
43+
connectionInfo: &networking.ConnectionInfo{
3944
Endpoint: endpoint,
4045
Credentials: creds,
4146
Proxy: nil},
42-
&ClientPolicy{
47+
clientPolicy: &ClientPolicy{
4348
maxRetries: DEFAULT_MAX_RETRIES,
44-
maxRedirect: DEFAULT_MAX_REDIRECTS}}
49+
maxRedirect: DEFAULT_MAX_REDIRECTS},
50+
}
4551
}
4652

4753
func (clientBuilder *ClientBuilder) WithProxy(proxy *url.URL) *ClientBuilder {
@@ -59,10 +65,20 @@ func (clientBuilder *ClientBuilder) WithNetworkRetryCount(count int) *ClientBuil
5965
return clientBuilder
6066
}
6167

68+
func (clientBuilder *ClientBuilder) WithLogger(logger sdk_log.Logger) *ClientBuilder {
69+
clientBuilder.logger = logger
70+
return clientBuilder
71+
}
72+
6273
func (clientBuilder *ClientBuilder) BuildClient() *Client {
74+
if clientBuilder.logger == nil {
75+
clientBuilder.logger = sdk_log.NewSimpleLogger()
76+
}
77+
6378
return &Client{
6479
sendNetwork: networking.NewSendNetwork(clientBuilder.connectionInfo),
6580
clientPolicy: clientBuilder.clientPolicy,
6681
connectionInfo: clientBuilder.connectionInfo,
82+
Logger: clientBuilder.logger,
6783
}
6884
}

helpers/consumer_test.go

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,16 @@ package helpers
33
import (
44
"testing"
55
"sync"
6-
"fmt"
76
"github.com/SpectraLogic/ds3_go_sdk/ds3_utils/ds3Testing"
87
)
98

10-
func testTransferBuilder(i int, waitGroup *sync.WaitGroup, resultCount *int, resultMux *sync.Mutex) TransferOperation {
9+
func testTransferBuilder(t *testing.T, i int, resultCount *int, resultMux *sync.Mutex) TransferOperation {
1110
return func() {
12-
//defer waitGroup.Done()
13-
1411
resultMux.Lock()
1512
*resultCount++
1613
resultMux.Unlock()
1714

18-
fmt.Printf("Transfer Op: '%d'\n", i)
15+
t.Logf("Transfer Op: '%d'\n", i)
1916
}
2017
}
2118

@@ -31,9 +28,9 @@ func TestProducerConsumerModel(t *testing.T) {
3128
for i := 0; i < 10; i++ {
3229
wg.Add(1)
3330

34-
var transferOf = testTransferBuilder(i, &wg, &resultCount, &resultMux)
31+
var transferOf = testTransferBuilder(t, i, &resultCount, &resultMux)
3532

36-
fmt.Printf("Producer: '%d'\n", i)
33+
t.Logf("Producer: '%d'\n", i)
3734

3835
*queue <- transferOf
3936
}

helpers/getProducer.go

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@ import (
44
ds3Models "github.com/SpectraLogic/ds3_go_sdk/ds3/models"
55
"github.com/SpectraLogic/ds3_go_sdk/ds3"
66
"sync"
7-
"log"
87
"io"
98
"github.com/SpectraLogic/ds3_go_sdk/helpers/ranges"
109
helperModels "github.com/SpectraLogic/ds3_go_sdk/helpers/models"
10+
"github.com/SpectraLogic/ds3_go_sdk/sdk_log"
1111
)
1212

1313
type getProducer struct {
@@ -21,6 +21,7 @@ type getProducer struct {
2121
processedBlobTracker blobTracker
2222
deferredBlobQueue BlobDescriptionQueue // queue of blobs whose channels are not yet ready for transfer
2323
rangeFinder ranges.BlobRangeFinder
24+
sdk_log.Logger
2425
}
2526

2627
func newGetProducer(jobMasterObjectList *ds3Models.MasterObjectList, getObjects *[]helperModels.GetObject, queue *chan TransferOperation, strategy *ReadTransferStrategy, client *ds3.Client, waitGroup *sync.WaitGroup) *getProducer {
@@ -35,6 +36,7 @@ func newGetProducer(jobMasterObjectList *ds3Models.MasterObjectList, getObjects
3536
processedBlobTracker: newProcessedBlobTracker(),
3637
deferredBlobQueue: NewBlobDescriptionQueue(),
3738
rangeFinder: ranges.NewBlobRangeFinder(getObjects),
39+
Logger: client.Logger, //use the same logger as the client
3840
}
3941
}
4042

@@ -55,11 +57,11 @@ func toReadObjectMap(getObjects *[]helperModels.GetObject) map[string]helperMode
5557

5658
// Processes all the blobs in a chunk that are ready for transfer from BP
5759
func (producer *getProducer) processChunk(curChunk *ds3Models.Objects, bucketName string, jobId string, aggErr *ds3Models.AggregateError) {
58-
log.Printf("DEBUG begin chunk processing %s", curChunk.ChunkId)
60+
producer.Debugf("begin chunk processing %s", curChunk.ChunkId)
5961

6062
// transfer blobs that are ready, and queue those that are waiting for channel
6163
for _, curObj := range curChunk.Objects {
62-
log.Printf("DEBUG queuing object in waiting to be processed %s offset=%d length=%d", *curObj.Name, curObj.Offset, curObj.Length)
64+
producer.Debugf("queuing object in waiting to be processed %s offset=%d length=%d", *curObj.Name, curObj.Offset, curObj.Length)
6365
blob := helperModels.NewBlobDescription(*curObj.Name, curObj.Offset, curObj.Length)
6466
producer.queueBlobForTransfer(&blob, bucketName, jobId, aggErr)
6567
}
@@ -78,7 +80,7 @@ func (producer *getProducer) transferOperationBuilder(info getObjectInfo, aggErr
7880
return func() {
7981
blobRanges := producer.rangeFinder.GetRanges(info.blob.Name(), info.blob.Offset(), info.blob.Length())
8082

81-
log.Printf("TRANSFER: objectName='%s' offset=%d ranges=%v", info.blob.Name(), info.blob.Offset(), blobRanges)
83+
producer.Debugf("transferring objectName='%s' offset=%d ranges=%v", info.blob.Name(), info.blob.Offset(), blobRanges)
8284

8385
getObjRequest := ds3Models.NewGetObjectRequest(info.bucketName, info.blob.Name()).
8486
WithOffset(info.blob.Offset()).
@@ -91,22 +93,22 @@ func (producer *getProducer) transferOperationBuilder(info getObjectInfo, aggErr
9193
getObjResponse, err := producer.client.GetObject(getObjRequest)
9294
if err != nil {
9395
aggErr.Append(err)
94-
log.Printf("ERROR during retrieval of %s: %s", info.blob.Name(), err.Error())
96+
producer.Errorf("unable to retrieve object '%s' at offset %d: %s", info.blob.Name(), info.blob.Offset(), err.Error())
9597
return
9698
}
9799

98100
if len(blobRanges) == 0 {
99101
writer, err := info.channelBuilder.GetChannel(info.blob.Offset())
100102
if err != nil {
101103
aggErr.Append(err)
102-
log.Printf("ERROR when copying content for object '%s' at offset '%d': %s", info.blob.Name(), info.blob.Offset(), err.Error())
104+
producer.Errorf("unable to read contents of object '%s' at offset '%d': %s", info.blob.Name(), info.blob.Offset(), err.Error())
103105
return
104106
}
105107
defer info.channelBuilder.OnDone(writer)
106108
_, err = io.Copy(writer, getObjResponse.Content) //copy all content from response reader to destination writer
107109
if err != nil {
108110
aggErr.Append(err)
109-
log.Printf("ERROR when copying content of object '%s' at offset '%d' from source to destination: %s", info.blob.Name(), info.blob.Offset(), err.Error())
111+
producer.Errorf("unable to copy content of object '%s' at offset '%d' from source to destination: %s", info.blob.Name(), info.blob.Offset(), err.Error())
110112
}
111113
return
112114
}
@@ -116,7 +118,7 @@ func (producer *getProducer) transferOperationBuilder(info getObjectInfo, aggErr
116118
err := writeRangeToDestination(info.channelBuilder, r, getObjResponse.Content)
117119
if err != nil {
118120
aggErr.Append(err)
119-
log.Printf("ERROR when writing to destination channel for object '%s' with range '%v': %s", info.blob.Name(), r, err.Error())
121+
producer.Errorf("unable to write to destination channel for object '%s' with range '%v': %s", info.blob.Name(), r, err.Error())
120122
}
121123
}
122124
}
@@ -145,12 +147,12 @@ func (producer *getProducer) queueBlobForTransfer(blob *helperModels.BlobDescrip
145147
curReadObj := producer.readObjectMap[blob.Name()]
146148

147149
if !curReadObj.ChannelBuilder.IsChannelAvailable(blob.Offset()) {
148-
log.Printf("DEBUG channel is NOT available for getting blob %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
150+
producer.Debugf("channel is not currently available for getting blob '%s' offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
149151
producer.deferredBlobQueue.Push(blob)
150152
return
151153
}
152154

153-
log.Printf("DEBUG channel is available for getting blob %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
155+
producer.Debugf("channel is available for getting blob '%s' offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
154156

155157
// Create transfer operation
156158
objInfo := getObjectInfo{
@@ -178,11 +180,11 @@ func (producer *getProducer) processWaitingBlobs(bucketName string, jobId string
178180
for i := 0; i < waitingBlobs; i++ {
179181
//attempt transfer
180182
curBlob, err := producer.deferredBlobQueue.Pop()
181-
log.Printf("DEBUG attempting to process %s offset=%d length=%d", curBlob.Name(), curBlob.Offset(), curBlob.Length())
183+
producer.Debugf("attempting to process '%s' offset=%d length=%d", curBlob.Name(), curBlob.Offset(), curBlob.Length())
182184
if err != nil {
183185
//should not be possible to get here
184186
aggErr.Append(err)
185-
log.Printf("ERROR when attempting blob transfer: %s", err.Error())
187+
producer.Errorf("failure during blob transfer '%s' at offset %d: %s", curBlob.Name(), curBlob.Offset(), err.Error())
186188
}
187189
producer.queueBlobForTransfer(curBlob, bucketName, jobId, aggErr)
188190
}
@@ -197,7 +199,7 @@ func (producer *getProducer) run(aggErr *ds3Models.AggregateError) {
197199

198200
// determine number of blobs to be processed
199201
var totalBlobCount int64 = producer.totalBlobCount()
200-
log.Printf("DEBUG totalBlobs=%d processedBlobs=%d", totalBlobCount, producer.processedBlobTracker.NumberOfProcessedBlobs())
202+
producer.Debugf("job status totalBlobs=%d processedBlobs=%d", totalBlobCount, producer.processedBlobTracker.NumberOfProcessedBlobs())
201203

202204
// process all chunks and make sure all blobs are queued for transfer
203205
for producer.processedBlobTracker.NumberOfProcessedBlobs() < totalBlobCount || producer.deferredBlobQueue.Size() > 0 {
@@ -208,7 +210,8 @@ func (producer *getProducer) run(aggErr *ds3Models.AggregateError) {
208210
chunksReadyResponse, err := producer.client.GetJobChunksReadyForClientProcessingSpectraS3(chunksReady)
209211
if err != nil {
210212
aggErr.Append(err)
211-
log.Fatal(err)
213+
producer.Errorf("unrecoverable error: %v", err)
214+
return
212215
}
213216

214217
// Check to see if any chunks can be processed

helpers/getTransfernator.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ func (transceiver *getTransceiver) transfer() (string, error) {
7575
// init queue, producer and consumer
7676
var waitGroup sync.WaitGroup
7777

78-
queue := newOperationQueue(transceiver.Strategy.BlobStrategy.maxWaitingTransfers())
78+
queue := newOperationQueue(transceiver.Strategy.BlobStrategy.maxWaitingTransfers(), transceiver.Client.Logger)
7979
producer := newGetProducer(&bulkGetResponse.MasterObjectList, transceiver.ReadObjects, &queue, transceiver.Strategy, transceiver.Client, &waitGroup)
8080
consumer := newConsumer(&queue, &waitGroup, transceiver.Strategy.BlobStrategy.maxConcurrentTransfers())
8181

helpers/operationQueue.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,22 @@
11
package helpers
22

3-
import "log"
3+
import (
4+
"github.com/SpectraLogic/ds3_go_sdk/sdk_log"
5+
)
46

57
type TransferOperation func() // transfer operation that sends/gets stuff from BP
68

79
const MinQueueSize uint = 1
810
const MaxQueueSize uint = 100
911

10-
func newOperationQueue(size uint) chan TransferOperation {
12+
func newOperationQueue(size uint, logger sdk_log.Logger) chan TransferOperation {
1113
var queue chan TransferOperation
1214

1315
if size > MaxQueueSize {
14-
log.Printf("WARNING Invalid operation queue size: specified value '%d' which exceeds the maximum, defaulting to '%d'\n", size, MaxQueueSize)
16+
logger.Warningf("invalid operation queue size: specified value '%d' which exceeds the maximum, defaulting to '%d'", size, MaxQueueSize)
1517
queue = make(chan TransferOperation, MaxQueueSize)
1618
} else if size < MinQueueSize {
17-
log.Printf("WARNING Invalid operation queue size: specified value '%d' which is below the minimum, defaulting to '%d'\n", size, MinQueueSize)
19+
logger.Warningf("invalid operation queue size: specified value '%d' which is below the minimum, defaulting to '%d'", size, MinQueueSize)
1820
queue = make(chan TransferOperation, MinQueueSize)
1921
} else {
2022
queue = make(chan TransferOperation, size)

helpers/putProducer.go

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
package helpers
22

33
import (
4-
"log"
54
ds3Models "github.com/SpectraLogic/ds3_go_sdk/ds3/models"
65
helperModels "github.com/SpectraLogic/ds3_go_sdk/helpers/models"
76
"github.com/SpectraLogic/ds3_go_sdk/ds3"
87
"sync"
8+
"github.com/SpectraLogic/ds3_go_sdk/sdk_log"
99
)
1010

1111
type putProducer struct {
@@ -18,6 +18,7 @@ type putProducer struct {
1818
writeObjectMap map[string]helperModels.PutObject
1919
processedBlobTracker blobTracker
2020
deferredBlobQueue BlobDescriptionQueue // queue of blobs whose channels are not yet ready for transfer
21+
sdk_log.Logger
2122
}
2223

2324
func newPutProducer(jobMasterObjectList *ds3Models.MasterObjectList, putObjects *[]helperModels.PutObject, queue *chan TransferOperation, strategy *WriteTransferStrategy, client *ds3.Client, waitGroup *sync.WaitGroup) *putProducer {
@@ -31,6 +32,7 @@ func newPutProducer(jobMasterObjectList *ds3Models.MasterObjectList, putObjects
3132
writeObjectMap: toWriteObjectMap(putObjects),
3233
deferredBlobQueue: NewBlobDescriptionQueue(),
3334
processedBlobTracker: newProcessedBlobTracker(),
35+
Logger: client.Logger, // use the same logger as the client
3436
}
3537
}
3638

@@ -63,7 +65,7 @@ func (producer *putProducer) transferOperationBuilder(info putObjectInfo, aggErr
6365
reader, err := info.channelBuilder.GetChannel(info.blob.Offset())
6466
if err != nil {
6567
aggErr.Append(err)
66-
log.Printf("ERROR could not get reader for object with name='%s' offset=%d length=%d", info.blob.Name(), info.blob.Offset(), info.blob.Length())
68+
producer.Errorf("could not get reader for object with name='%s' offset=%d length=%d", info.blob.Name(), info.blob.Offset(), info.blob.Length())
6769
return
6870
}
6971
defer info.channelBuilder.OnDone(reader)
@@ -78,7 +80,7 @@ func (producer *putProducer) transferOperationBuilder(info putObjectInfo, aggErr
7880
_, err = producer.client.PutObject(putObjRequest)
7981
if err != nil {
8082
aggErr.Append(err)
81-
log.Printf("ERROR during transfer of %s: %s\n", info.blob.Name(), err.Error())
83+
producer.Errorf("problem during transfer of %s: %s", info.blob.Name(), err.Error())
8284
}
8385
}
8486
}
@@ -111,11 +113,11 @@ func (producer *putProducer) metadataFrom(info putObjectInfo) map[string]string
111113
// Processes all the blobs in a chunk and attempts to add them to the transfer queue.
112114
// If a blob is not ready for transfer, then it is added to the waiting to be transferred queue.
113115
func (producer *putProducer) processChunk(curChunk *ds3Models.Objects, bucketName string, jobId string, aggErr *ds3Models.AggregateError) {
114-
log.Printf("DEBUG begin chunk processing %s", curChunk.ChunkId)
116+
producer.Debugf("begin chunk processing %s", curChunk.ChunkId)
115117

116118
// transfer blobs that are ready, and queue those that are waiting for channel
117119
for _, curObj := range curChunk.Objects {
118-
log.Printf("DEBUG queuing object in waiting to be processed %s offset=%d length=%d", *curObj.Name, curObj.Offset, curObj.Length)
120+
producer.Debugf("queuing object in waiting to be processed %s offset=%d length=%d", *curObj.Name, curObj.Offset, curObj.Length)
119121
blob := helperModels.NewBlobDescription(*curObj.Name, curObj.Offset, curObj.Length)
120122
producer.queueBlobForTransfer(&blob, bucketName, jobId, aggErr)
121123
}
@@ -129,11 +131,12 @@ func (producer *putProducer) processWaitingBlobs(bucketName string, jobId string
129131
for i := 0; i < waitingBlobs; i++ {
130132
//attempt transfer
131133
curBlob, err := producer.deferredBlobQueue.Pop()
132-
log.Printf("DEBUG attempting to process %s offset=%d length=%d", curBlob.Name(), curBlob.Offset(), curBlob.Length())
133134
if err != nil {
134135
aggErr.Append(err)
135-
log.Printf("ERROR when attempting blob transfer: %s", err.Error())
136+
producer.Errorf("problem when getting next blob to be transferred: %s", err.Error())
137+
continue
136138
}
139+
producer.Debugf("attempting to process %s offset=%d length=%d", curBlob.Name(), curBlob.Offset(), curBlob.Length())
137140
producer.queueBlobForTransfer(curBlob, bucketName, jobId, aggErr)
138141
}
139142
}
@@ -148,13 +151,13 @@ func (producer *putProducer) queueBlobForTransfer(blob *helperModels.BlobDescrip
148151
curWriteObj := producer.writeObjectMap[blob.Name()]
149152

150153
if !curWriteObj.ChannelBuilder.IsChannelAvailable(blob.Offset()) {
151-
log.Printf("DEBUG channel is NOT available for blob %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
154+
producer.Debugf("channel is not currently available for blob %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
152155
// Not ready to be transferred
153156
producer.deferredBlobQueue.Push(blob)
154157
return
155158
}
156159

157-
log.Printf("DEBUG channel is available for blob %s offset=%d length=%d", curWriteObj.PutObject.Name, blob.Offset(), blob.Length())
160+
producer.Debugf("channel is available for blob %s offset=%d length=%d", curWriteObj.PutObject.Name, blob.Offset(), blob.Length())
158161
// Blob ready to be transferred
159162

160163
// Create transfer operation
@@ -184,7 +187,7 @@ func (producer *putProducer) run(aggErr *ds3Models.AggregateError) {
184187

185188
// determine number of blobs to be processed
186189
var totalBlobCount int64 = producer.totalBlobCount()
187-
log.Printf("DEBUG totalBlobs=%d processedBlobs=%d", totalBlobCount, producer.processedBlobTracker.NumberOfProcessedBlobs())
190+
producer.Debugf("job status totalBlobs=%d processedBlobs=%d", totalBlobCount, producer.processedBlobTracker.NumberOfProcessedBlobs())
188191

189192
// process all chunks and make sure all blobs are queued for transfer
190193
for producer.processedBlobTracker.NumberOfProcessedBlobs() < totalBlobCount || producer.deferredBlobQueue.Size() > 0 {
@@ -194,7 +197,9 @@ func (producer *putProducer) run(aggErr *ds3Models.AggregateError) {
194197
chunksReady := ds3Models.NewGetJobChunksReadyForClientProcessingSpectraS3Request(producer.JobMasterObjectList.JobId)
195198
chunksReadyResponse, err := producer.client.GetJobChunksReadyForClientProcessingSpectraS3(chunksReady)
196199
if err != nil {
197-
log.Fatal(err)
200+
aggErr.Append(err)
201+
producer.Errorf("unrecoverable error: %v", err)
202+
return
198203
}
199204

200205
// Check to see if any chunks can be processed

helpers/putTransceiver.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ func (transceiver *putTransceiver) transfer() (string, error) {
7373
// init queue, producer and consumer
7474
var waitGroup sync.WaitGroup
7575

76-
queue := newOperationQueue(transceiver.Strategy.BlobStrategy.maxWaitingTransfers())
76+
queue := newOperationQueue(transceiver.Strategy.BlobStrategy.maxWaitingTransfers(), transceiver.Client.Logger)
7777
producer := newPutProducer(&bulkPutResponse.MasterObjectList, transceiver.WriteObjects, &queue, transceiver.Strategy, transceiver.Client, &waitGroup)
7878
consumer := newConsumer(&queue, &waitGroup, transceiver.Strategy.BlobStrategy.maxConcurrentTransfers())
7979

sdk_log/logger.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
package sdk_log
2+
3+
type Logger interface {
4+
Infof(format string, args ...interface{})
5+
Debugf(format string, args ...interface{})
6+
Warningf(format string, args ...interface{})
7+
Errorf(format string, args ...interface{})
8+
}

0 commit comments

Comments
 (0)