@@ -26,11 +26,13 @@ import scala.collection.mutable
26
26
import scala .util .{Failure , Success }
27
27
import scala .util .control .NonFatal
28
28
29
+ import com .palantir .logsafe .{SafeArg , UnsafeArg }
30
+
29
31
import org .apache .spark ._
30
32
import org .apache .spark .TaskState .TaskState
31
33
import org .apache .spark .deploy .SparkHadoopUtil
32
34
import org .apache .spark .deploy .worker .WorkerWatcher
33
- import org .apache .spark .internal .Logging
35
+ import org .apache .spark .internal .{ Logging , SafeLogging }
34
36
import org .apache .spark .rpc ._
35
37
import org .apache .spark .scheduler .{ExecutorLossReason , TaskDescription }
36
38
import org .apache .spark .scheduler .cluster .CoarseGrainedClusterMessages ._
@@ -45,7 +47,7 @@ private[spark] class CoarseGrainedExecutorBackend(
45
47
cores : Int ,
46
48
userClassPath : Seq [URL ],
47
49
env : SparkEnv )
48
- extends ThreadSafeRpcEndpoint with ExecutorBackend with Logging {
50
+ extends ThreadSafeRpcEndpoint with ExecutorBackend with SafeLogging {
49
51
50
52
private [this ] val stopping = new AtomicBoolean (false )
51
53
var executor : Executor = null
@@ -56,7 +58,7 @@ private[spark] class CoarseGrainedExecutorBackend(
56
58
private [this ] val ser : SerializerInstance = env.closureSerializer.newInstance()
57
59
58
60
override def onStart () {
59
- logInfo (" Connecting to driver: " + driverUrl)
61
+ safeLogInfo (" Connecting to driver" , UnsafeArg .of( " driverUrl " , driverUrl) )
60
62
rpcEnv.asyncSetupEndpointRefByURI(driverUrl).flatMap { ref =>
61
63
// This is a very fast action so we can use "ThreadUtils.sameThread"
62
64
driver = Some (ref)
@@ -78,7 +80,7 @@ private[spark] class CoarseGrainedExecutorBackend(
78
80
79
81
override def receive : PartialFunction [Any , Unit ] = {
80
82
case RegisteredExecutor =>
81
- logInfo (" Successfully registered with driver" )
83
+ safeLogInfo (" Successfully registered with driver" )
82
84
try {
83
85
executor = new Executor (executorId, hostname, env, userClassPath, isLocal = false )
84
86
} catch {
@@ -94,7 +96,7 @@ private[spark] class CoarseGrainedExecutorBackend(
94
96
exitExecutor(1 , " Received LaunchTask command but executor was null" )
95
97
} else {
96
98
val taskDesc = TaskDescription .decode(data.value)
97
- logInfo (" Got assigned task " + taskDesc.taskId)
99
+ safeLogInfo (" Got assigned task" , SafeArg .of( " taskId " , taskDesc.taskId) )
98
100
executor.launchTask(this , taskDesc)
99
101
}
100
102
@@ -107,7 +109,7 @@ private[spark] class CoarseGrainedExecutorBackend(
107
109
108
110
case StopExecutor =>
109
111
stopping.set(true )
110
- logInfo (" Driver commanded a shutdown" )
112
+ safeLogInfo (" Driver commanded a shutdown" )
111
113
// Cannot shutdown here because an ack may need to be sent back to the caller. So send
112
114
// a message to self to actually do the shutdown.
113
115
self.send(Shutdown )
@@ -125,26 +127,29 @@ private[spark] class CoarseGrainedExecutorBackend(
125
127
}.start()
126
128
127
129
case UpdateDelegationTokens (tokenBytes) =>
128
- logInfo( s " Received tokens of ${ tokenBytes.length} bytes " )
130
+ safeLogInfo( " Received tokens" , UnsafeArg .of( " tokenBytesLength " , tokenBytes.length) )
129
131
SparkHadoopUtil .get.addDelegationTokens(tokenBytes, env.conf)
130
132
}
131
133
132
134
override def onDisconnected (remoteAddress : RpcAddress ): Unit = {
133
135
if (stopping.get()) {
134
- logInfo(s " Driver from $remoteAddress disconnected during shutdown " )
136
+ safeLogInfo(" Driver disconnected during shutdown" ,
137
+ UnsafeArg .of(" remoteAddress" , remoteAddress))
135
138
} else if (driver.exists(_.address == remoteAddress)) {
136
139
exitExecutor(1 , s " Driver $remoteAddress disassociated! Shutting down. " , null ,
137
140
notifyDriver = false )
138
141
} else {
139
- logWarning(s " An unknown ( $remoteAddress) driver disconnected. " )
142
+ safeLogWarning(" An unknown driver disconnected." ,
143
+ UnsafeArg .of(" remoteAddress" , remoteAddress))
140
144
}
141
145
}
142
146
143
147
override def statusUpdate (taskId : Long , state : TaskState , data : ByteBuffer ) {
144
148
val msg = StatusUpdate (executorId, taskId, state, data)
145
149
driver match {
146
150
case Some (driverRef) => driverRef.send(msg)
147
- case None => logWarning(s " Drop $msg because has not yet connected to driver " )
151
+ case None => safeLogWarning(" Drop message because has not yet connected to driver" ,
152
+ UnsafeArg .of(" msg" , msg))
148
153
}
149
154
}
150
155
@@ -157,11 +162,11 @@ private[spark] class CoarseGrainedExecutorBackend(
157
162
reason : String ,
158
163
throwable : Throwable = null ,
159
164
notifyDriver : Boolean = true ) = {
160
- val message = " Executor self-exiting due to : " + reason
165
+ val message = " Executor self-exiting"
161
166
if (throwable != null ) {
162
- logError (message, throwable)
167
+ safeLogError (message, throwable, UnsafeArg .of( " reason " , reason) )
163
168
} else {
164
- logError (message)
169
+ safeLogError (message, UnsafeArg .of( " reason " , reason) )
165
170
}
166
171
167
172
if (notifyDriver && driver.nonEmpty) {
0 commit comments