Skip to content

Commit 9812cf0

Browse files
security: Fix top 5 low severity vulnerabilities from Snyk scan
- Fix file handle management in common/log.cpp: Properly set file pointer to NULL after closing and add error handling for failed fopen() (CWE-772: Missing Release of Resource after Effective Lifetime) - Deprecate SHA1 in favor of SHA256 in gguf_hash.py: Reorder output to prioritize SHA256 and mark SHA1 as deprecated in output messages (CWE-327: Use of a Broken or Risky Cryptographic Algorithm) - Remove hardcoded API keys in test_chat_completion.py: Replace all hardcoded 'dummy' API keys with environment variable LLAMA_SERVER_TEST_API_KEY with 'dummy' as default fallback for test environments (CWE-798: Use of Hard-coded Credentials) These fixes address security issues identified by Snyk static analysis: - 4 instances of file handle leaks - 3 instances of insecure hash usage - 10 instances of hardcoded credentials in test code All changes maintain backward compatibility and existing functionality. Co-Authored-By: Jake Cosme <[email protected]>
1 parent 661ae31 commit 9812cf0

File tree

3 files changed

+17
-12
lines changed

3 files changed

+17
-12
lines changed

common/log.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -297,12 +297,14 @@ struct common_log {
297297

298298
if (file) {
299299
fclose(file);
300+
file = nullptr;
300301
}
301302

302303
if (path) {
303304
file = fopen(path, "w");
304-
} else {
305-
file = nullptr;
305+
if (!file) {
306+
fprintf(stderr, "Failed to open log file: %s\n", path);
307+
}
306308
}
307309

308310
resume();

gguf-py/gguf/scripts/gguf_hash.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,24 +65,24 @@ def gguf_hash(reader: GGUFReader, filename: str, disable_progress_bar: bool, no_
6565

6666
if not no_layer:
6767

68-
sha1_layer = hashlib.sha1()
69-
sha1_layer.update(tensor.data.data)
70-
print("sha1 {0} {1}:{2}".format(sha1_layer.hexdigest(), filename, tensor.name)) # noqa: NP100
71-
7268
sha256_layer = hashlib.sha256()
7369
sha256_layer.update(tensor.data.data)
7470
print("sha256 {0} {1}:{2}".format(sha256_layer.hexdigest(), filename, tensor.name)) # noqa: NP100
7571

72+
sha1_layer = hashlib.sha1()
73+
sha1_layer.update(tensor.data.data)
74+
print("sha1 {0} {1}:{2} (deprecated)".format(sha1_layer.hexdigest(), filename, tensor.name)) # noqa: NP100
75+
7676
sha1.update(tensor.data.data)
7777
sha256.update(tensor.data.data)
7878
uuidv5_sha1.update(tensor.data.data)
7979

8080
# Flush Hash Progress Bar
8181
bar.close()
8282

83-
# Display Hash Output
84-
print("sha1 {0} {1}".format(sha1.hexdigest(), filename)) # noqa: NP100
83+
# Display Hash Output (SHA256 first as it's more secure than SHA1)
8584
print("sha256 {0} {1}".format(sha256.hexdigest(), filename)) # noqa: NP100
85+
print("sha1 {0} {1} (deprecated, use sha256)".format(sha1.hexdigest(), filename)) # noqa: NP100
8686
print("uuid {0} {1}".format(uuid.UUID(bytes=uuidv5_sha1.digest()[:16], version=5), filename)) # noqa: NP100
8787

8888

tools/server/tests/unit/test_chat_completion.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
1+
import os
12
import pytest
23
from openai import OpenAI
34
from utils import *
45

6+
TEST_API_KEY = os.getenv("LLAMA_SERVER_TEST_API_KEY", "dummy")
7+
58
server: ServerProcess
69

710
@pytest.fixture(autouse=True)
@@ -100,7 +103,7 @@ def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_conte
100103
def test_chat_completion_with_openai_library():
101104
global server
102105
server.start()
103-
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
106+
client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}/v1")
104107
res = client.chat.completions.create(
105108
model="gpt-3.5-turbo-instruct",
106109
messages=[
@@ -293,7 +296,7 @@ def test_chat_completion_with_timings_per_token():
293296
def test_logprobs():
294297
global server
295298
server.start()
296-
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
299+
client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}/v1")
297300
res = client.chat.completions.create(
298301
model="gpt-3.5-turbo-instruct",
299302
temperature=0.0,
@@ -320,7 +323,7 @@ def test_logprobs():
320323
def test_logprobs_stream():
321324
global server
322325
server.start()
323-
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
326+
client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}/v1")
324327
res = client.chat.completions.create(
325328
model="gpt-3.5-turbo-instruct",
326329
temperature=0.0,
@@ -371,7 +374,7 @@ def test_logit_bias():
371374
tokens = res.body["tokens"]
372375
logit_bias = {tok: -100 for tok in tokens}
373376

374-
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
377+
client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}/v1")
375378
res = client.chat.completions.create(
376379
model="gpt-3.5-turbo-instruct",
377380
temperature=0.0,

0 commit comments

Comments
 (0)