-
Notifications
You must be signed in to change notification settings - Fork 23
Expand file tree
/
Copy path6tempcontrol
More file actions
428 lines (373 loc) · 15.5 KB
/
6tempcontrol
File metadata and controls
428 lines (373 loc) · 15.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
#!/bin/bash
##########################################################################
##########################################################################
#################### nvOC 3.2 - Community Release ####################
########## Based on the original nvOC v0019-1.4 by fullzero ##########
##########################################################################
##########################################################################
# Based on the original code by Maxximus007
#
# Changelog:
# v=0001 : leenoox
# Set higher process and disk I/O priorities - Temp control is essential service
# Auto detection of number of GPU's
# Only set, adjust and display available GPU's, dynamic variables creation
# Numeric check of the return values from nvidia-smi (looking for numbers only, anything else = error)
# Reboot if GPU error detected and watchdog didn't react for 60 seconds (act as a backup watchdog)
# Added query delay to nvidia API (no burst spamming, added 0.5 sec delay, prevent overload), helps reduce stale shares
# New (improved) display output including colors and bold text
# Fixed the log file handling (Bug fix: Previous implemantation was not limiting the file size)
# Removed repetitive GPUFanControlState setting, it only needs to be set once, not every cycle (prevent API overload)
# Workaround for some 1050's reporting "Unknown" or "ERR" when power.draw is queried from nvidi-smi
# v=0002 : Stubo: Added secondary fix for 1050's reporting "[Not Supported]" or "[Unknown Error]" when power.draw is
# queried from nvidia-smi (a.k.a. bleed issue of power.draw)
# v=0003 : Papampi: Telegram alerts
#
# v=0004 : Papampi:
# MAXIMAL_FAN_SPEED
# v=0005 : LukePicci
# Relocate nvOC to arbitrary install directory
# v=0006 : papampi (suggestion by abdeldev)
# Use sysrq reboot, to prevent freeze/hang
# v=0007 : LuKePicci
# Reduce calls to nvidia-smi and nvidia-settings
# v=0008 : Spiral
# Add loop to shutdown rig if SHUTDOWN_TEMP exceeded
# v=0009 : brightskye
# Add supports for disabled_gpu
# v=0010 : LuKePicci
# parallel tempcontrol threads
# v=0011 : papampi
# tempcontrol cycle
# v=0012 : papampi
# Allowed fan diff
source ${NVOC}/1bash
source ${NVOC}/helpers/coin_algo_mapping
source ${NVOC}/helpers/disabled_gpu.sh
nvOC_temp_Dev="0012"
nvOC_temp_ver="$nvOC_Ver.$nvOC_temp_Dev" # Do not edit this
export DISPLAY=:0
echo "Temp Control for nvOC $nvOC_Ver"
echo "Version: $nvOC_temp_ver"
echo ""
# Set higher process and disk I/O priorities because we are essential service
sudo renice -n -15 -p $$ && sudo ionice -c2 -n0 -p$$ >/dev/null 2>&1
sleep 1
NVD="nvidia-settings"
SMI="nvidia-smi"
if [[ $TEMPCONTROL_USE_COLOR == YES ]]; then
N='\e[0m' # Normal
B='\e[1m' # Bold
R='\e[31m' # Red
G='\e[32m' # Green
C='\e[36m' # Cyan
Y='\e[33m' # Yellow
else
N=""
B=""
R=""
G=""
C=""
Y=""
fi
# Log file handling (check existance, size limitation or creation, show 10 lines if not empty)
LOG_FILE="${NVOC}/6_autotemplog"
if [ -e "$LOG_FILE" ]; then
LASTLOG=$(tail -n 100 $LOG_FILE) # Limit the log file, just keep the last 100 entries
echo "$LASTLOG" > $LOG_FILE
if [[ $(wc -l <$LOG_FILE) -gt 1 ]]; then
echo -e "${B}LOG FILE:${N} (Showing the last 10 recorded entries)${R}"
cat $LOG_FILE | tail -n 10
echo -e "${N}"
echo ""
else
echo -e "${B}LOG FILE${N} is empty."
echo ""
echo ""
fi
else
touch $LOG_FILE # if log file does not exist, create one
echo -e "New ${B}LOG FILE${N} created."
echo ""
echo ""
fi
chmod a+w "${LOG_FILE}" # allow clearing by www
WD_LOG_FILE="${NVOC}/5_watchdoglog"
# Display version info
echo ""
for i in {16..21} ; do
echo -en "\e[48;5;${i}m "
done
echo -en "${B}TEMP_CONTROL $nvOC_temp_ver - nvOC $nvOC_Ver - Community Release${N}"
for i in {21..16} ; do
echo -en "\e[48;5;${i}m \e[0m"
done
echo ""
echo ""
sleep 1
# Determine the number of available GPU's
GPUS=$(${SMI} -i 0 --query-gpu=count --format=csv,noheader,nounits)
echo -e "Detected: ${B}$GPUS${N} GPU's"
echo ""
count=0
# Dynamic variables creation - assign variables for the available GPU's only
# Set variables for Temp and Power Limit; Enable fan control; Display info
while (( count < GPUS ))
do
if [ ${DISABLED_GPU_ARRAY[$count]} ]; then
# skip if disabled
echo -e "${B}GPU $count:${N} ${R}${B}Disabled${N}"
(( count++ ))
continue
fi
if [[ $POWERLIMIT_MODE == GPU_SPECIFIC ]]
then
POWER_LIMIT[$count]=$(( INDIVIDUAL_POWERLIMIT_$count ))
elif [[ $POWERLIMIT_MODE == GLOBAL_with_GPU_OFFSET || $POWERLIMIT_MODE == ALGO_SPECIFIC_with_GPU_OFFSET ]]
then
POWER_LIMIT[$count]=$(( ${ALGO}_POWERLIMIT_WATTS + INDIVIDUAL_POWERLIMIT_$count ))
elif [[ $POWERLIMIT_MODE == ALGO_SPECIFIC ]]
then
POWER_LIMIT[$count]=$(( ${ALGO}_POWERLIMIT_WATTS ))
elif [[ $POWERLIMIT_MODE == GLOBAL ]]
then
POWER_LIMIT[$count]=$_POWERLIMIT_WATTS
fi
if [[ $INDIVIDUAL_TARGET_TEMPS == YES ]]
then
TARGET_TEMP[$count]=$(( TARGET_TEMP_$count ))
elif [[ $INDIVIDUAL_TARGET_TEMPS == NO ]]
then
TARGET_TEMP[$count]=$TARGET_TEMP
fi
# Info - display assigned values per GPU
echo -e "${B}GPU $count:${N} POWER LIMIT: ${B}${POWER_LIMIT[$count]}${N}, TARGET TEMP: ${B}${TARGET_TEMP[$count]}${N}"
__CTL_ENABLE="${__CTL_ENABLE} -a [gpu:${count}]/GPUFanControlState=1"
(( count++ ))
done
# Enable fan control
sudo ${NVD} $__CTL_ENABLE >/dev/null 2>&1
sleep 0.1
FAN_ADJUST=$__FAN_ADJUST
# If user sets the fan speed too low in 1bash, override and set it to 30%
if (( MINIMAL_FAN_SPEED < 30 ))
then
MINIMAL_FAN_SPEED=30
fi
echo ""
# Info - display the Global settings
echo -e "${B}GLOBAL${N} FAN_ADJUST (%): ${B}$FAN_ADJUST${N}"
echo -e "${B}GLOBAL${N} POWER_ADJUST (W): ${B}$POWER_ADJUST${N}"
echo -e "${B}GLOBAL${N} ALLOWED_TEMP_DIFF (C): ${B}$ALLOWED_TEMP_DIFF${N}"
echo -e "${B}GLOBAL${N} RESTORE_POWER_LIMIT (%): ${B}$RESTORE_POWER_LIMIT${N}"
echo -e "${B}GLOBAL${N} MINIMAL_FAN_SPEED (%): ${B}$MINIMAL_FAN_SPEED${N}"
echo ""
# Setting persistance mode on, will keep settings in between sessions
# fullzero, Papampi, Stubo..., we need to decide if we gonna use Persistane Mode.
# I am using it for a while and had no problems with it.
# If we decide to use it, please move this to the top of 3main:
# sudo nvidia-smi -pm 1
# How often should TEMP_CONTROL check and adjust the fans
# Allowed value between 15 and 30 seconds (IMO, 20 seconds works well)
LOOP_TIMER_SLEEP=$TEMP_CONTROL_CYCLE
if (( LOOP_TIMER_SLEEP < 15 ))
then
LOOP_TIMER_SLEEP=15
elif (( LOOP_TIMER_SLEEP > 30 ))
then
LOOP_TIMER_SLEEP=30
fi
# Calculating the main timer dependant on the number of GPU's because
# we are adding 0.5 seconds delay between every GPU check so that
# we don't overload nvidia API (previously the API was spammed, especialy on
# systems with 13+ GPU's causing slight delay for the miner. This has reduced stale shares for me)
LOOP_TIMER=$(echo "$LOOP_TIMER_SLEEP - ( $GPUS * 0.5 )" | bc )
# When API returns error message due to frozen/hung GPU, the original Temp Control script
# was breaking with error because it was expecting numeric but received a text value, leaving
# the system without temp control and potential to damage GPU's.
# Adding numtest check to the returned values from nvidia-smi to prevent such occurance
numtest='^[0-9.]+$'
# Time in seconds before we reboot should we detect error and watchdog didn't react
ERR_TIMER=60
ERR_TIMER_BRK=$ERR_TIMER
#Allowed fan diff fail safe and protection
if [ -z "${ALLOWED_FAN_DIFF+x}" ]; then
echo "ALLOWED_FAN_DIFF is not set at all, setting it to 0"
ALLOWED_FAN_DIFF=0
elif [[ $ALLOWED_FAN_DIFF == "" ]]; then
echo "ALLOWED_FAN_DIFF is empty, setting it to 0"
ALLOWED_FAN_DIFF=0
elif (( ALLOWED_FAN_DIFF < 0 )); then
echo "ALLOWED_FAN_DIFF is negative, setting it to 0"
ALLOWED_FAN_DIFF=0
elif (( ALLOWED_FAN_DIFF > __FAN_ADJUST / 2 )); then
echo "ALLOWED_FAN_DIFF is greater than FAN_ADJUST/2, setting it to $(( __FAN_ADJUST / 2 ))"
ALLOWED_FAN_DIFF=$(( __FAN_ADJUST / 2 ))
fi
# The Main Loop
while true
do
SMI_VARS="CURRENT_TEMP CURRENT_FAN PWRLIMIT POWERDRAW GPU_NAME"
SMI_QUERY_OUTPUT=$(${SMI} --query-gpu=temperature.gpu,fan.speed,power.limit,power.draw,name --format=csv,noheader,nounits)
NVD_SETTINGS=""
GPU=0
while (( GPU < GPUS ))
do
if [ ${DISABLED_GPU_ARRAY[$GPU]} ]; then
# skip if disabled
(( GPU++ ))
continue
fi
{ IFS=', ' read $SMI_VARS; } < <(echo "$SMI_QUERY_OUTPUT" | sed "$((GPU+1))q;d")
# Numeric check to avoid script breakage should nvidia-smi return error, also acts as backup watchdog
#Shutdown rig if SHUTDOWN_TEMP reached
if [[ $HOT_SHUTDOWN == YES ]] && (( CURRENT_TEMP > SHUTDOWN_TEMP ))
then
if [[ $TELEGRAM_ALERTS == YES || $TELEGRAM_MESSAGES == YES ]]
then
/usr/bin/curl -m 5 -s -X POST --output /dev/null https://api.telegram.org/bot$TELEGRAM_APIKEY/sendMessage -d "text=GPU$GPU exceeded $SHUTDOWN_TEMP c. System shutting down." -d chat_id=$TELEGRAM_CHATID &
fi
sleep 2
sudo poweroff -f
fi
if ! [[ $CURRENT_TEMP =~ $numtest && $CURRENT_FAN =~ $numtest && $PWRLIMIT =~ $numtest && ( $GPU_NAME =~ 1050 || $POWERDRAW =~ $numtest ) ]]
then
SMI_TESTS="_CURRENT_TEMP _CURRENT_FAN _PWRLIMIT"
SMI_TEST_QUERY="temperature.gpu,fan.speed,power.limit"
# Workaround for 1050's reporting "[Not Supported]" or "[Unknown Error]" when power.draw is queried from nvidia-smi
if [[ ! $GPU_NAME =~ 1050 ]]
then
SMI_TESTS="$SMI_TESTS _POWERDRAW"
SMI_TEST_QUERY="$SMI_TEST_QUERY,power.draw"
fi
echo -e "${Y}${B}WARNING: $(date) - tempcontrol@[GPU$GPU] Problem detected: nvidia-smi failed to query current status. Watchdog has $ERR_TIMER seconds to react, otherwise we will reboot!${N}" | tee -a "$LOG_FILE"
(
echo "========= SMI Query output with errors @[GPU$GPU]:"
echo $SMI_QUERY_OUTPUT
echo "========= End of SMI query output"
) >> "$LOG_FILE"
if [[ $TELEGRAM_ALERTS == YES ]]
then
bash "${NVOC}/telegram" &
fi
(
sleep $(( GPU * 3 ))
# Non numeric value! Problem detected! Give watchdog 60 seconds to react, if not, assume watchdog froze - we will reboot in 60 sec (backup watchdog function)
while (( ERR_TIMER > 0 ))
do
sleep 15
{ IFS=', ' read ${SMI_TESTS}; } < <(${SMI} -i $GPU --query-gpu=${SMI_TEST_QUERY} --format=csv,noheader,nounits)
if ! [[ $_CURRENT_TEMP =~ $numtest && $_CURRENT_FAN =~ $numtest && $_PWRLIMIT =~ $numtest && ( $GPU_NAME =~ 1050 || $_POWERDRAW =~ $numtest ) ]]
then
ERR_TIMER=$((ERR_TIMER - 15))
echo -e "${Y}${B}WARNING: $(date) - tempcontrol@[GPU$GPU] Problem persists: watchdog has $ERR_TIMER seconds to react, otherwise we will reboot!${N}" | tee -a "$LOG_FILE"
else
ERR_TIMER=$ERR_TIMER_BRK
echo -e "${G}${B}INFO: $(date) - tempcontrol@[GPU$GPU] Problem solved.${N}" | tee -a "$LOG_FILE" "$WD_LOG_FILE"
break
fi
if (( ERR_TIMER <= 0 ))
then
echo -e "${R}${B}ERROR: $(date) - tempcontrol@[GPU$GPU] Problem persists: the GPU is in unkonwn status. System is now rebooting to correct the problem.${N}" | tee -a "$LOG_FILE" "$WD_LOG_FILE"
if [[ $TELEGRAM_ALERTS == YES ]]
then
bash "${NVOC}/telegram" &
fi
sleep 3
if [[ $SYSRQ_REBOOT == YES ]]
then
sudo bash ${NVOC}/sysrq_reboot.sh
else
sudo reboot -f
fi
fi
done
) &
(( GPU++ ))
continue # fast forward to next gpu tempcontrol loop
# NOTE: if nvidia-smi query failed we are likely getting errors for more gpus
fi
POWERLIMIT=${PWRLIMIT%%.*}
TEMP_DIFF=$(( CURRENT_TEMP - TARGET_TEMP[GPU] ))
NEW_FAN_SPEED=$CURRENT_FAN
HIGH_TEMP_DIFF=$(( ALLOWED_TEMP_DIFF * 2 ))
echo -e "${B}GPU $GPU${N}, Target temp: ${B}${TARGET_TEMP[${GPU}]}${N}, Current: ${B}$CURRENT_TEMP${N}, Diff: ${B}$TEMP_DIFF${N}, Fan: ${B}$CURRENT_FAN${N}, Power: ${B}$POWERDRAW${N}"
if (( TEMP_DIFF > 0 ))
then
# Things are getting hot, speed up fans
# If temp difference is more than double the allowed, multiply adjustement by 2
FAN_ADJUST_CALCULATED=$FAN_ADJUST
if (( TEMP_DIFF > ALLOWED_TEMP_DIFF ))
then
(( FAN_ADJUST_CALCULATED += FAN_ADJUST ))
fi
if (( TEMP_DIFF > HIGH_TEMP_DIFF ))
then
(( FAN_ADJUST_CALCULATED += FAN_ADJUST ))
fi
NEW_FAN_SPEED=$(( CURRENT_FAN + FAN_ADJUST_CALCULATED ))
if (( NEW_FAN_SPEED > MAXIMAL_FAN_SPEED ))
then
NEW_FAN_SPEED=$MAXIMAL_FAN_SPEED
# Fan speed reached MAXIMAL_FAN_SPEED, we have to drop the power limit
NEW_POWER_LIMIT=$((POWERLIMIT - POWER_ADJUST))
if [[ $WARN_PL_DROPS == YES ]]
then
echo -e "${B}WARNING: GPU $GPU${N}, ${R}$(date) - Adjusting Power Limit for ${B}GPU$GPU${N}${R}. Old Limit: ${B}$POWERLIMIT${N}${R} New Limit: ${B}$NEW_POWER_LIMIT${N}${R} Fan speed: ${B}$NEW_FAN_SPEED${N}" | tee -a "$LOG_FILE"
if [[ $TELEGRAM_ALERTS == "YES" ]]
then
bash "${NVOC}/telegram" &
fi
else
echo -e "${B}INFO: GPU $GPU${N}, ${R}$(date) - Adjusting Power Limit for ${B}GPU$GPU${N}${R}. Old Limit: ${B}$POWERLIMIT${N}${R} New Limit: ${B}$NEW_POWER_LIMIT${N}${R} Fan speed: ${B}$NEW_FAN_SPEED${N}"
fi
echo ""
sudo ${SMI} -i $GPU -pl ${NEW_POWER_LIMIT}
fi
elif (( TEMP_DIFF < - ALLOWED_TEMP_DIFF ))
then
# Current temp is lower than target, so we can relax fan speed, and restore original power limit if applicable
# Faster restore fan speed. This can be far more advanced too
if (( TEMP_DIFF < - HIGH_TEMP_DIFF ))
then
FAN_ADJUST_CALCULATED=$(( FAN_ADJUST * 2 ))
else
FAN_ADJUST_CALCULATED=$FAN_ADJUST
fi
NEW_FAN_SPEED=$(( CURRENT_FAN - FAN_ADJUST_CALCULATED ))
# Set to minimal fan speed if calculated is below
if (( NEW_FAN_SPEED < MINIMAL_FAN_SPEED ))
then
NEW_FAN_SPEED=$MINIMAL_FAN_SPEED
fi
# Restore original power limit when possible using fan speed
if (( ${POWER_LIMIT[${GPU}]} != POWERLIMIT ))
then
if (( NEW_FAN_SPEED < RESTORE_POWER_LIMIT ))
then
NEW_POWER_LIMIT=${POWER_LIMIT[${GPU}]}
echo -e "${B}GPU$GPU${N}${C}$(date) - Restoring Power Limit for ${N}${B}GPU$GPU${N}. ${C}Old limit: ${N}${B}$POWERLIMIT${N}${C} New limit: ${N}${B}$NEW_POWER_LIMIT${N}${C} Fan speed: ${N}${B}$NEW_FAN_SPEED${N}"
echo ""
sudo ${SMI} -i $GPU -pl ${NEW_POWER_LIMIT}
fi
fi
fi
if (( NEW_FAN_SPEED > CURRENT_FAN + ALLOWED_FAN_DIFF )) || (( NEW_FAN_SPEED < CURRENT_FAN - ALLOWED_FAN_DIFF ))
then
echo -e "${B}GPU $GPU${N}, ${C}$(date) - Adjusting fan from: ${N}${B}$CURRENT_FAN${N} ${C}to: ${N}${B}$NEW_FAN_SPEED${N} ${C}Temp: ${N}${B}$CURRENT_TEMP${N}"
echo ""
NVD_SETTINGS="${NVD_SETTINGS} -a [fan:${GPU}]/GPUTargetFanSpeed=${NEW_FAN_SPEED}"
fi
(( GPU++ ))
done
# Apply adjustments
if [[ $NVD_SETTINGS != "" ]]
then
sudo ${NVD} ${NVD_SETTINGS} >/dev/null 2>&1
fi
echo "$(date) - All good, will check again in $LOOP_TIMER seconds"
echo ""
echo ""
echo ""
sleep $LOOP_TIMER
done