diff --git a/src/CLR/Core/CLR_RT_HeapBlock_Array.cpp b/src/CLR/Core/CLR_RT_HeapBlock_Array.cpp index a5293a8987..c655986912 100644 --- a/src/CLR/Core/CLR_RT_HeapBlock_Array.cpp +++ b/src/CLR/Core/CLR_RT_HeapBlock_Array.cpp @@ -18,8 +18,6 @@ HRESULT CLR_RT_HeapBlock_Array::CreateInstance( CLR_RT_HeapBlock_Array *pArray; CLR_RT_TypeDef_Index cls; CLR_RT_TypeDef_Instance inst{}; - CLR_RT_TypeDescriptor desc{}; - CLR_RT_ReflectionDef_Index workingReflex = reflex; reference.SetObjectReference(nullptr); @@ -28,31 +26,12 @@ HRESULT CLR_RT_HeapBlock_Array::CreateInstance( if (reflex.kind != REFLECTION_TYPE) { - // check for typespec - if (reflex.kind == REFLECTION_TYPESPEC) - { - // get the type descriptor for the typespec - (desc.InitializeFromTypeSpec(reflex.data.typeSpec)); - - // check that this ends up being a reflecion type - if (desc.m_reflex.kind != REFLECTION_TYPE) - { - NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); - } - - // copy over to working reflex - workingReflex = desc.m_reflex; - workingReflex.levels++; - } - else - { - NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); - } + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } - if (workingReflex.levels == 1) + if (reflex.levels == 1) { - cls = workingReflex.data.type; + cls = reflex.data.type; } else { @@ -115,7 +94,7 @@ HRESULT CLR_RT_HeapBlock_Array::CreateInstance( CLR_RT_HeapBlock ref; CLR_RT_TypeDef_Instance cls{}; - CLR_RT_TypeSpec_Instance def{}; + CLR_RT_TypeSpec_Instance tsInst{}; memset(&ref, 0, sizeof(struct CLR_RT_HeapBlock)); @@ -123,9 +102,15 @@ HRESULT CLR_RT_HeapBlock_Array::CreateInstance( { NANOCLR_CHECK_HRESULT(ref.SetReflection(cls)); } - else if (def.ResolveToken(tk, assm)) + else if (tsInst.ResolveToken(tk, assm)) { - NANOCLR_CHECK_HRESULT(ref.SetReflection((CLR_RT_TypeSpec_Index)def)); + // Create a fake reflection index to pass the element type and levels. + CLR_RT_ReflectionDef_Index reflex{}; + reflex.kind = REFLECTION_TYPE; + reflex.levels = tsInst.levels; + reflex.data.type = tsInst.cachedElementType; + + NANOCLR_CHECK_HRESULT(ref.SetReflection(reflex)); } else { diff --git a/src/CLR/Core/Interpreter.cpp b/src/CLR/Core/Interpreter.cpp index 1916bd7af6..4211e80831 100644 --- a/src/CLR/Core/Interpreter.cpp +++ b/src/CLR/Core/Interpreter.cpp @@ -3307,13 +3307,61 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { case TBL_TypeSpec: { + // this has to provide the closed instance of the type in the context of the caller'sc CLR_RT_TypeSpec_Instance tsInst{}; - if (tsInst.ResolveToken(arg, assm) == false) + if (tsInst.ResolveToken(arg, assm, &stack->m_call) == false) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } - evalPos[0].SetReflection((const CLR_RT_TypeSpec_Index &)tsInst.data); + // Check if this is an array type + if (tsInst.levels > 0) + { + // This is an array + + // Create a fake reflection index to pass the element type and levels. + CLR_RT_ReflectionDef_Index reflex; + reflex.kind = REFLECTION_TYPE; + reflex.levels = tsInst.levels; + + // prefer generic type + if (NANOCLR_INDEX_IS_VALID(tsInst.genericTypeDef) && + NANOCLR_INDEX_IS_INVALID(tsInst.cachedElementType)) + { + reflex.data.type = tsInst.genericTypeDef; + } + else if (NANOCLR_INDEX_IS_VALID(tsInst.cachedElementType)) + { + reflex.data.type = tsInst.cachedElementType; + } + else + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + + evalPos[0].SetReflection(reflex); + } + else + { + // prefer generic type + if (NANOCLR_INDEX_IS_VALID(tsInst.genericTypeDef) && + NANOCLR_INDEX_IS_INVALID(tsInst.cachedElementType)) + { + evalPos[0].SetReflection((const CLR_RT_TypeSpec_Index &)tsInst.data); + } + else if (NANOCLR_INDEX_IS_VALID(tsInst.cachedElementType)) + { + // set reflection with TypeDef instance + CLR_RT_TypeDef_Instance cls{}; + cls.InitializeFromIndex(tsInst.cachedElementType); + + evalPos[0].SetReflection(cls); + } + else + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + } } break; diff --git a/src/CLR/Core/TypeSystem.cpp b/src/CLR/Core/TypeSystem.cpp index 57bee990d8..27cdd79f28 100644 --- a/src/CLR/Core/TypeSystem.cpp +++ b/src/CLR/Core/TypeSystem.cpp @@ -615,6 +615,9 @@ HRESULT CLR_RT_SignatureParser::Advance(Element &res) // need to update the parser counter too ParamCount = GenParamCount; + + // reset the generic instance flag + IsGenericInst = false; } NANOCLR_SET_AND_LEAVE(S_OK); @@ -709,7 +712,14 @@ bool CLR_RT_TypeSpec_Instance::InitializeFromIndex(const CLR_RT_TypeSpec_Index & CLR_RT_SignatureParser::Element element; // if this is a generic, advance another one - parser.Advance(element); + if (FAILED(parser.Advance(element))) + { + ClearInstance(); + + return false; + } + + levels = element.Levels; if (element.DataType == DATATYPE_GENERICINST) { @@ -722,9 +732,7 @@ bool CLR_RT_TypeSpec_Instance::InitializeFromIndex(const CLR_RT_TypeSpec_Index & return true; } - data = 0; - assembly = nullptr; - target = nullptr; + ClearInstance(); return false; } @@ -736,9 +744,15 @@ void CLR_RT_TypeSpec_Instance::ClearInstance() assembly = nullptr; target = nullptr; + levels = 0; + genericTypeDef.Clear(); + data = 0; } -bool CLR_RT_TypeSpec_Instance::ResolveToken(CLR_UINT32 token, CLR_RT_Assembly *assm) +bool CLR_RT_TypeSpec_Instance::ResolveToken( + CLR_UINT32 token, + CLR_RT_Assembly *assm, + const CLR_RT_MethodDef_Instance *caller) { NATIVE_PROFILE_CLR_CORE(); if (assm && CLR_TypeFromTk(token) == TBL_TypeSpec) @@ -758,6 +772,8 @@ bool CLR_RT_TypeSpec_Instance::ResolveToken(CLR_UINT32 token, CLR_RT_Assembly *a // if this is a generic, advance another one parser.Advance(element); + levels = element.Levels; + if (element.DataType == DATATYPE_GENERICINST) { // this is a generic instance, so we need to advance one more time @@ -765,6 +781,44 @@ bool CLR_RT_TypeSpec_Instance::ResolveToken(CLR_UINT32 token, CLR_RT_Assembly *a genericTypeDef = element.Class; } + else if (element.DataType == DATATYPE_VAR) + { + // this is type‐generic slot (!T), resolve against the caller's closed generic + + int pos = element.GenericParamPosition; + + // Use the *caller's* bound genericType (Stack, etc.) + if (caller == nullptr || caller->genericType == nullptr) + { + ClearInstance(); + + return false; + } + + auto &tsi = *caller->genericType; + CLR_UINT32 closedTsRow = tsi.TypeSpec(); + + Set(caller->genericType->Assembly(), closedTsRow); + assembly = g_CLR_RT_TypeSystem.m_assemblies[caller->genericType->Assembly() - 1]; + + target = assm->GetTypeSpec(closedTsRow); + + NanoCLRDataType realDataType; + + g_CLR_RT_TypeSystem.m_assemblies[caller->genericType->Assembly() - 1] + ->FindGenericParamAtTypeSpec(closedTsRow, (CLR_UINT32)pos, cachedElementType, realDataType); + } + else if (element.DataType == DATATYPE_MVAR) + { + ASSERT(false); + } + else + { + cachedElementType = element.Class; + + genericTypeDef.Clear(); + } + return true; } @@ -981,6 +1035,7 @@ bool CLR_RT_TypeDef_Instance::ResolveToken( const CLR_RT_MethodDef_Instance *caller) { NATIVE_PROFILE_CLR_CORE(); + if (assm) { CLR_UINT32 index = CLR_DataFromTk(tk); diff --git a/src/CLR/Include/nanoCLR_Runtime.h b/src/CLR/Include/nanoCLR_Runtime.h index e3d77e6608..4336ea58e6 100644 --- a/src/CLR/Include/nanoCLR_Runtime.h +++ b/src/CLR/Include/nanoCLR_Runtime.h @@ -2087,13 +2087,16 @@ struct CLR_RT_TypeSpec_Instance : public CLR_RT_TypeSpec_Index const CLR_RECORD_TYPESPEC *target; CLR_RT_TypeDef_Index genericTypeDef; + CLR_UINT32 levels; + + CLR_RT_TypeDef_Index cachedElementType; //--// bool InitializeFromIndex(const CLR_RT_TypeSpec_Index &index); void ClearInstance(); - bool ResolveToken(CLR_UINT32 tk, CLR_RT_Assembly *assm); + bool ResolveToken(CLR_UINT32 tk, CLR_RT_Assembly *assm, const CLR_RT_MethodDef_Instance *caller = nullptr); }; //--//