@@ -610,7 +610,7 @@ enum llm_tensor {
610610 LLM_TENSOR_CLS_OUT,
611611};
612612
613- static const std::map<llm_arch, std::map<llm_tensor, std::string >> LLM_TENSOR_NAMES = {
613+ static const std::map<llm_arch, std::map<llm_tensor, const char * >> LLM_TENSOR_NAMES = {
614614 {
615615 LLM_ARCH_LLAMA,
616616 {
@@ -1566,32 +1566,32 @@ struct LLM_TN {
15661566 return LLM_TENSOR_NAMES.at(arch).at(tensor);
15671567 }
15681568
1569- std::string operator()(llm_tensor tensor, const std::string & suffix) const {
1569+ std::string operator()(llm_tensor tensor, const char * suffix) const {
15701570 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15711571 return "__missing__";
15721572 }
1573- return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
1573+ return std::string( LLM_TENSOR_NAMES.at(arch).at(tensor) ) + "." + suffix;
15741574 }
15751575
15761576 std::string operator()(llm_tensor tensor, int bid) const {
15771577 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15781578 return "__missing__";
15791579 }
1580- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str() , bid);
1580+ return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid);
15811581 }
15821582
1583- std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
1583+ std::string operator()(llm_tensor tensor, const char * suffix, int bid) const {
15841584 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15851585 return "__missing__";
15861586 }
1587- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str() , bid) + "." + suffix;
1587+ return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid) + "." + suffix;
15881588 }
15891589
1590- std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
1590+ std::string operator()(llm_tensor tensor, const char * suffix, int bid, int xid) const {
15911591 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15921592 return "__missing__";
15931593 }
1594- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str() , bid, xid) + "." + suffix;
1594+ return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid) + "." + suffix;
15951595 }
15961596};
15971597
@@ -4916,7 +4916,7 @@ struct llama_model_loader {
49164916 static const int TENSOR_NOT_REQUIRED = 1;
49174917 static const int TENSOR_DUPLICATED = 2;
49184918
4919- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector <int64_t> & ne, int flags = 0) {
4919+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list <int64_t> & ne, int flags = 0) {
49204920 const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
49214921
49224922 if (cur == NULL) {
@@ -4926,7 +4926,7 @@ struct llama_model_loader {
49264926 return create_tensor_for(ctx, cur, flags & TENSOR_DUPLICATED);
49274927 }
49284928
4929- struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::vector <int64_t> & ne, size_t offset, bool required = true) {
4929+ struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list <int64_t> & ne, size_t offset, bool required = true) {
49304930 const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
49314931
49324932 if (cur == NULL) {
@@ -4939,7 +4939,7 @@ struct llama_model_loader {
49394939
49404940 std::array<int64_t, GGML_MAX_DIMS> dims;
49414941 for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
4942- dims[i] = i < ne.size() ? ne[i] : 1;
4942+ dims[i] = i < ne.size() ? ne.begin() [i] : 1;
49434943 }
49444944
49454945 struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
0 commit comments