Skip to content

Commit 8e3108f

Browse files
authored
Establish basic compiler warnings, and fix a few style issues (#3039)
Signed-off-by: Jared Van Bortel <[email protected]>
1 parent 3165e1d commit 8e3108f

File tree

12 files changed

+71
-20
lines changed

12 files changed

+71
-20
lines changed

common/common.cmake

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
function(gpt4all_add_warning_options target)
2+
if (MSVC)
3+
return()
4+
endif()
5+
target_compile_options("${target}" PRIVATE
6+
# base options
7+
-Wall
8+
-Wextra
9+
# extra options
10+
-Wcast-align
11+
-Wextra-semi
12+
-Wformat=2
13+
-Wmissing-include-dirs
14+
-Wnull-dereference
15+
-Wstrict-overflow=2
16+
-Wvla
17+
# errors
18+
-Werror=format-security
19+
-Werror=init-self
20+
-Werror=pointer-arith
21+
-Werror=undef
22+
# disabled warnings
23+
-Wno-sign-compare
24+
-Wno-unused-parameter
25+
-Wno-unused-function
26+
-Wno-unused-variable
27+
)
28+
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
29+
target_compile_options("${target}" PRIVATE
30+
-Wduplicated-branches
31+
-Wduplicated-cond
32+
-Wlogical-op
33+
-Wno-reorder
34+
-Wno-null-dereference
35+
)
36+
elseif (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
37+
target_compile_options("${target}" PRIVATE
38+
-Wunreachable-code-break
39+
-Wunreachable-code-return
40+
-Werror=pointer-integer-compare
41+
-Wno-reorder-ctor
42+
)
43+
endif()
44+
endfunction()

gpt4all-backend/CMakeLists.txt

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
cmake_minimum_required(VERSION 3.23) # for FILE_SET
2+
3+
include(../common/common.cmake)
4+
25
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
36
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
47

@@ -94,8 +97,6 @@ if (LLMODEL_ROCM)
9497
list(APPEND BUILD_VARIANTS rocm rocm-avxonly)
9598
endif()
9699

97-
set(CMAKE_VERBOSE_MAKEFILE ON)
98-
99100
# Go through each build variant
100101
foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
101102
# Determine flags
@@ -151,6 +152,7 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
151152
# Add each individual implementations
152153
add_library(llamamodel-mainline-${BUILD_VARIANT} SHARED
153154
src/llamamodel.cpp src/llmodel_shared.cpp)
155+
gpt4all_add_warning_options(llamamodel-mainline-${BUILD_VARIANT})
154156
target_compile_definitions(llamamodel-mainline-${BUILD_VARIANT} PRIVATE
155157
LLAMA_VERSIONS=>=3 LLAMA_DATE=999999)
156158
target_include_directories(llamamodel-mainline-${BUILD_VARIANT} PRIVATE
@@ -169,6 +171,7 @@ add_library(llmodel
169171
src/llmodel_c.cpp
170172
src/llmodel_shared.cpp
171173
)
174+
gpt4all_add_warning_options(llmodel)
172175
target_sources(llmodel PUBLIC
173176
FILE_SET public_headers TYPE HEADERS BASE_DIRS include
174177
FILES include/gpt4all-backend/llmodel.h

gpt4all-backend/include/gpt4all-backend/llmodel.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ class LLModel {
146146
virtual bool supportsEmbedding() const = 0;
147147
virtual bool supportsCompletion() const = 0;
148148
virtual bool loadModel(const std::string &modelPath, int n_ctx, int ngl) = 0;
149-
virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; };
149+
virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; }
150150
virtual bool isEmbeddingModel(const std::string &modelPath) const { (void)modelPath; return false; }
151151
virtual bool isModelLoaded() const = 0;
152152
virtual size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) = 0;

gpt4all-backend/src/llmodel_shared.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ void LLModel::generateResponse(std::function<bool(int32_t, const std::string&)>
260260
cachedTokens.push_back(new_tok.value());
261261
cachedResponse += new_piece;
262262

263-
auto accept = [this, &promptCtx, &cachedTokens, &new_tok, allowContextShift]() -> bool {
263+
auto accept = [this, &promptCtx, &new_tok, allowContextShift]() -> bool {
264264
// Shift context if out of space
265265
if (promptCtx.n_past >= promptCtx.n_ctx) {
266266
(void)allowContextShift;

gpt4all-chat/CMakeLists.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
cmake_minimum_required(VERSION 3.25) # for try_compile SOURCE_FROM_VAR
22

3+
include(../common/common.cmake)
4+
35
set(APP_VERSION_MAJOR 3)
46
set(APP_VERSION_MINOR 4)
57
set(APP_VERSION_PATCH 1)
@@ -157,6 +159,7 @@ qt_add_executable(chat
157159
src/xlsxtomd.cpp src/xlsxtomd.h
158160
${CHAT_EXE_RESOURCES}
159161
)
162+
gpt4all_add_warning_options(chat)
160163

161164
qt_add_qml_module(chat
162165
URI gpt4all

gpt4all-chat/src/chat.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class Chat : public QObject
3333
Q_PROPERTY(ResponseState responseState READ responseState NOTIFY responseStateChanged)
3434
Q_PROPERTY(QList<QString> collectionList READ collectionList NOTIFY collectionListChanged)
3535
Q_PROPERTY(QString modelLoadingError READ modelLoadingError NOTIFY modelLoadingErrorChanged)
36-
Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged);
36+
Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged)
3737
Q_PROPERTY(QString deviceBackend READ deviceBackend NOTIFY loadedModelInfoChanged)
3838
Q_PROPERTY(QString device READ device NOTIFY loadedModelInfoChanged)
3939
Q_PROPERTY(QString fallbackReason READ fallbackReason NOTIFY loadedModelInfoChanged)

gpt4all-chat/src/chatllm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ bool ChatLLM::loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadPro
585585

586586
modelLoadProps.insert("$duration", modelLoadTimer.elapsed() / 1000.);
587587
return true;
588-
};
588+
}
589589

590590
bool ChatLLM::isModelLoaded() const
591591
{

gpt4all-chat/src/chatmodel.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ struct ChatItem
6565
Q_PROPERTY(bool thumbsDownState MEMBER thumbsDownState)
6666
Q_PROPERTY(QList<ResultInfo> sources MEMBER sources)
6767
Q_PROPERTY(QList<ResultInfo> consolidatedSources MEMBER consolidatedSources)
68-
Q_PROPERTY(QList<PromptAttachment> promptAttachments MEMBER promptAttachments);
69-
Q_PROPERTY(QString promptPlusAttachments READ promptPlusAttachments);
68+
Q_PROPERTY(QList<PromptAttachment> promptAttachments MEMBER promptAttachments)
69+
Q_PROPERTY(QString promptPlusAttachments READ promptPlusAttachments)
7070

7171
public:
7272
QString promptPlusAttachments() const

gpt4all-chat/src/database.cpp

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -296,10 +296,12 @@ static bool selectAllUncompletedChunks(QSqlQuery &q, QHash<IncompleteChunk, QStr
296296
while (q.next()) {
297297
QString collection = q.value(0).toString();
298298
IncompleteChunk ic {
299-
/*embedding_model =*/ q.value(1).toString(),
300-
/*chunk_id =*/ q.value(2).toInt(),
301-
/*folder_id =*/ q.value(3).toInt(),
302-
/*text =*/ q.value(4).toString(),
299+
/*EmbeddingKey*/ {
300+
.embedding_model = q.value(1).toString(),
301+
.chunk_id = q.value(2).toInt(),
302+
},
303+
/*folder_id =*/ q.value(3).toInt(),
304+
/*text =*/ q.value(4).toString(),
303305
};
304306
chunks[ic] << collection;
305307
}
@@ -1659,7 +1661,7 @@ void Database::scanQueue()
16591661
if (info.isPdf()) {
16601662
QPdfDocument doc;
16611663
if (doc.load(document_path) != QPdfDocument::Error::None) {
1662-
qWarning() << "ERROR: Could not load pdf" << document_id << document_path;;
1664+
qWarning() << "ERROR: Could not load pdf" << document_id << document_path;
16631665
return updateFolderToIndex(folder_id, countForFolder);
16641666
}
16651667
title = doc.metaData(QPdfDocument::MetaDataField::Title).toString();

gpt4all-chat/src/database.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,6 @@ class ChunkStreamer {
176176
QString m_author;
177177
QString m_subject;
178178
QString m_keywords;
179-
bool m_atStart;
180179

181180
// working state
182181
QString m_chunk; // has a trailing space for convenience

0 commit comments

Comments
 (0)