@@ -608,7 +608,7 @@ enum llm_tensor {
608608 LLM_TENSOR_CLS_OUT,
609609};
610610
611- static const std::map<llm_arch, std::map<llm_tensor, std::string >> LLM_TENSOR_NAMES = {
611+ static const std::map<llm_arch, std::map<llm_tensor, const char * >> LLM_TENSOR_NAMES = {
612612 {
613613 LLM_ARCH_LLAMA,
614614 {
@@ -1564,32 +1564,32 @@ struct LLM_TN {
15641564 return LLM_TENSOR_NAMES.at(arch).at(tensor);
15651565 }
15661566
1567- std::string operator()(llm_tensor tensor, const std::string & suffix) const {
1567+ std::string operator()(llm_tensor tensor, const char * suffix) const {
15681568 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15691569 return "__missing__";
15701570 }
1571- return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
1571+ return std::string( LLM_TENSOR_NAMES.at(arch).at(tensor) ) + "." + suffix;
15721572 }
15731573
15741574 std::string operator()(llm_tensor tensor, int bid) const {
15751575 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15761576 return "__missing__";
15771577 }
1578- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str() , bid);
1578+ return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid);
15791579 }
15801580
1581- std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
1581+ std::string operator()(llm_tensor tensor, const char * suffix, int bid) const {
15821582 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15831583 return "__missing__";
15841584 }
1585- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str() , bid) + "." + suffix;
1585+ return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid) + "." + suffix;
15861586 }
15871587
1588- std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
1588+ std::string operator()(llm_tensor tensor, const char * suffix, int bid, int xid) const {
15891589 if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
15901590 return "__missing__";
15911591 }
1592- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str() , bid, xid) + "." + suffix;
1592+ return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid) + "." + suffix;
15931593 }
15941594};
15951595
@@ -4918,7 +4918,7 @@ struct llama_model_loader {
49184918 static const int TENSOR_NOT_REQUIRED = 1;
49194919 static const int TENSOR_DUPLICATED = 2;
49204920
4921- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector <int64_t> & ne, int flags = 0) {
4921+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list <int64_t> & ne, int flags = 0) {
49224922 const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
49234923
49244924 if (cur == NULL) {
@@ -4928,7 +4928,7 @@ struct llama_model_loader {
49284928 return create_tensor_for(ctx, cur, flags & TENSOR_DUPLICATED);
49294929 }
49304930
4931- struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::vector <int64_t> & ne, size_t offset, bool required = true) {
4931+ struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list <int64_t> & ne, size_t offset, bool required = true) {
49324932 const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
49334933
49344934 if (cur == NULL) {
@@ -4941,7 +4941,7 @@ struct llama_model_loader {
49414941
49424942 std::array<int64_t, GGML_MAX_DIMS> dims;
49434943 for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
4944- dims[i] = i < ne.size() ? ne[i] : 1;
4944+ dims[i] = i < ne.size() ? ne.begin() [i] : 1;
49454945 }
49464946
49474947 struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
0 commit comments