Skip to content

Commit c3357b7

Browse files
authored
Enable more warning flags, and fix more warnings (#3065)
Signed-off-by: Jared Van Bortel <[email protected]>
1 parent eed92fd commit c3357b7

File tree

16 files changed

+27
-69
lines changed

16 files changed

+27
-69
lines changed

common/common.cmake

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ function(gpt4all_add_warning_options target)
1111
-Wextra-semi
1212
-Wformat=2
1313
-Wmissing-include-dirs
14-
-Wnull-dereference
1514
-Wstrict-overflow=2
1615
-Wvla
1716
# errors
@@ -22,8 +21,6 @@ function(gpt4all_add_warning_options target)
2221
# disabled warnings
2322
-Wno-sign-compare
2423
-Wno-unused-parameter
25-
-Wno-unused-function
26-
-Wno-unused-variable
2724
)
2825
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
2926
target_compile_options("${target}" PRIVATE

gpt4all-backend/include/gpt4all-backend/llmodel.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ class LLModel {
213213
protected:
214214
// These are pure virtual because subclasses need to implement as the default implementation of
215215
// 'prompt' above calls these functions
216-
virtual std::vector<Token> tokenize(PromptContext &ctx, std::string_view str, bool special = false) = 0;
216+
virtual std::vector<Token> tokenize(std::string_view str, bool special = false) = 0;
217217
virtual bool isSpecialToken(Token id) const = 0;
218218
virtual std::string tokenToString(Token id) const = 0;
219219
virtual void initSampler(PromptContext &ctx) = 0;

gpt4all-backend/src/llamamodel.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -511,7 +511,7 @@ size_t LLamaModel::restoreState(std::span<const uint8_t> src)
511511
return llama_state_set_data(d_ptr->ctx, src.data(), src.size());
512512
}
513513

514-
std::vector<LLModel::Token> LLamaModel::tokenize(PromptContext &ctx, std::string_view str, bool special)
514+
std::vector<LLModel::Token> LLamaModel::tokenize(std::string_view str, bool special)
515515
{
516516
bool atStart = m_tokenize_last_token == -1;
517517
bool insertSpace = atStart || isSpecialToken(m_tokenize_last_token);

gpt4all-backend/src/llamamodel_impl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ class LLamaModel : public LLModel {
5454
bool m_supportsCompletion = false;
5555

5656
protected:
57-
std::vector<Token> tokenize(PromptContext &ctx, std::string_view str, bool special) override;
57+
std::vector<Token> tokenize(std::string_view str, bool special) override;
5858
bool isSpecialToken(Token id) const override;
5959
std::string tokenToString(Token id) const override;
6060
void initSampler(PromptContext &ctx) override;

gpt4all-backend/src/llmodel_shared.cpp

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -90,41 +90,33 @@ void LLModel::prompt(const std::string &prompt,
9090
}
9191
}
9292

93-
auto old_n_past = promptCtx.n_past; // prepare to fake n_past for tokenize
94-
9593
// tokenize the user prompt
9694
std::vector<Token> embd_inp;
9795
if (placeholders.empty()) {
9896
// this is unusual, but well-defined
9997
std::cerr << __func__ << ": prompt template has no placeholder\n";
100-
embd_inp = tokenize(promptCtx, promptTemplate, true);
98+
embd_inp = tokenize(promptTemplate, true);
10199
} else {
102100
// template: beginning of user prompt
103101
const auto &phUser = placeholders[0];
104102
std::string userPrefix(phUser.prefix());
105-
if (!userPrefix.empty()) {
106-
embd_inp = tokenize(promptCtx, userPrefix, true);
107-
promptCtx.n_past += embd_inp.size();
108-
}
103+
if (!userPrefix.empty())
104+
embd_inp = tokenize(userPrefix, true);
109105

110106
// user input (shouldn't have special token processing)
111-
auto tokens = tokenize(promptCtx, prompt, special);
107+
auto tokens = tokenize(prompt, special);
112108
embd_inp.insert(embd_inp.end(), tokens.begin(), tokens.end());
113-
promptCtx.n_past += tokens.size();
114109

115110
// template: end of user prompt + start of assistant prompt
116111
size_t start = phUser.position() + phUser.length();
117112
size_t end = placeholders.size() >= 2 ? placeholders[1].position() : promptTemplate.length();
118113
auto userToAsst = promptTemplate.substr(start, end - start);
119114
if (!userToAsst.empty()) {
120-
tokens = tokenize(promptCtx, userToAsst, true);
115+
tokens = tokenize(userToAsst, true);
121116
embd_inp.insert(embd_inp.end(), tokens.begin(), tokens.end());
122-
promptCtx.n_past += tokens.size();
123117
}
124118
}
125119

126-
promptCtx.n_past = old_n_past; // restore n_past so decodePrompt can increment it
127-
128120
// decode the user prompt
129121
if (!decodePrompt(promptCallback, responseCallback, allowContextShift, promptCtx, embd_inp))
130122
return; // error
@@ -133,7 +125,7 @@ void LLModel::prompt(const std::string &prompt,
133125
if (!fakeReply) {
134126
generateResponse(responseCallback, allowContextShift, promptCtx);
135127
} else {
136-
embd_inp = tokenize(promptCtx, *fakeReply, false);
128+
embd_inp = tokenize(*fakeReply, false);
137129
if (!decodePrompt(promptCallback, responseCallback, allowContextShift, promptCtx, embd_inp, true))
138130
return; // error
139131
}
@@ -148,7 +140,7 @@ void LLModel::prompt(const std::string &prompt,
148140
asstSuffix = "\n\n"; // default to a blank link, good for e.g. Alpaca
149141
}
150142
if (!asstSuffix.empty()) {
151-
embd_inp = tokenize(promptCtx, asstSuffix, true);
143+
embd_inp = tokenize(asstSuffix, true);
152144
decodePrompt(promptCallback, responseCallback, allowContextShift, promptCtx, embd_inp);
153145
}
154146
}

gpt4all-chat/deps/usearch

gpt4all-chat/src/chat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ Chat::Chat(QObject *parent)
3131
connectLLM();
3232
}
3333

34-
Chat::Chat(bool isServer, QObject *parent)
34+
Chat::Chat(server_tag_t, QObject *parent)
3535
: QObject(parent)
3636
, m_id(Network::globalInstance()->generateUniqueId())
3737
, m_name(tr("Server Chat"))

gpt4all-chat/src/chat.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,10 @@ class Chat : public QObject
4545
QML_UNCREATABLE("Only creatable from c++!")
4646

4747
public:
48+
// tag for constructing a server chat
49+
struct server_tag_t { explicit server_tag_t() = default; };
50+
static inline constexpr server_tag_t server_tag = server_tag_t();
51+
4852
enum ResponseState {
4953
ResponseStopped,
5054
LocalDocsRetrieval,
@@ -56,7 +60,7 @@ class Chat : public QObject
5660
Q_ENUM(ResponseState)
5761

5862
explicit Chat(QObject *parent = nullptr);
59-
explicit Chat(bool isServer, QObject *parent = nullptr);
63+
explicit Chat(server_tag_t, QObject *parent = nullptr);
6064
virtual ~Chat();
6165
void destroy() { m_llmodel->destroy(); }
6266
void connectLLM();

gpt4all-chat/src/chatapi.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,8 @@ class ChatAPI : public QObject, public LLModel {
9898
// them as they are only called from the default implementation of 'prompt' which we override and
9999
// completely replace
100100

101-
std::vector<Token> tokenize(PromptContext &ctx, std::string_view str, bool special) override
101+
std::vector<Token> tokenize(std::string_view str, bool special) override
102102
{
103-
(void)ctx;
104103
(void)str;
105104
(void)special;
106105
throw std::logic_error("not implemented");

0 commit comments

Comments
 (0)