diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 4f2c5a4919..5415a4a251 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -986,6 +986,24 @@ jobs: python testRegexMatching.py shell: bash + deeploy-test-dmas: + runs-on: ${{ needs.select-docker-image-and-runner.outputs.runner }} + needs: select-docker-image-and-runner + container: + image: ${{ needs.select-docker-image-and-runner.outputs.image }} + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + with: + submodules: recursive + - name: Build Deeploy + run: pip install -e . + - name: Run Test + run: | + cd DeeployTest + python testDmas.py + shell: bash + linting: runs-on: ${{ needs.select-docker-image-and-runner.outputs.runner }} needs: select-docker-image-and-runner diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e056c0213..4a9764f227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,20 +2,52 @@ This file contains the changelog for the Deeploy project. The changelog is divided into sections based on the version of the project. Each section contains a list of pull requests, features, changes, fixes, and removals that were made in that version. ## Unreleased (Planned Release Target: v0.2.1) + ### List of Pull Requests - Change order of typeMatching entries [#68](https://github.com/pulp-platform/Deeploy/pull/68) - Node Mangling to avoid duplication [#93](https://github.com/pulp-platform/Deeploy/pull/93) - Prepare Post v0.2.0 Release [#104](https://github.com/pulp-platform/Deeploy/pull/104) - Use Docker digests instead of arch-specific tags [#106](https://github.com/pulp-platform/Deeploy/pull/106) +- Refactor tiling code generation [#105](https://github.com/pulp-platform/Deeploy/pull/105) ### Added - Add manual type inference feature (CLI: `--input-type-map`/`--input-offset-map`) to resolve ambiguities when test inputs are not representative enough - Added a `testTypeInferenceDifferentTypes` test case to validate type inference for different input types - Added `_mangleNodeNames` function to avoid duplicate node mappings - Output Docker image digests per platform (`amd64`, `arm64`) after build, which is used to construct the multi-arch Docker manifest. This preventes registry clutter caused by unnecessary per-architecture Docker tags. +- AsyncDma abstraction of DMA's +- test runner per DMA and a script that tests all the DMA's +- generic Single/DoubleBufferingTilingCodeGeneration classes +- TilingVariableReplacementUpdate class that updates the variable replacement refs +- TilingHoistingMixIn class that encapsulates all the hoisting helper functions of tiling +- sorting of input memory allocations to allow references that live in the same memory level as the memory they are referencing +- a function that tests the tiling solution for correctness which currently only tests buffer allocation for byte alignment +- IntrospectiveCodeTransformation: `_indexPointer()`, `indexVars()`, `dereferenceVars()`. The `*Vars` functions index/dereference a list of variables (useful for tiling) +- NetworkContext: `unravelReference()` that unravels a `_ReferenceBuffer` until the base buffer +- NetworkContext: `is_object()` - helper function that determines whether the string represents a name of a local or global object +- NetworkContext: `is_buffer()` - helper function that determines whether the string represents a name of a buffer +- missing checks for environment variables +- `_permuteHyperRectangle` helper function ### Changed - Replaced platform-specific tags (`*-amd64`, `*-arm64`) with direct digest references in `Noelware/docker-manifest-action`. +- mchan HAL is now reduced to bare-bones +- refactor of the IntrospectiveCodeTransformation to work on the Mako template +- refactor of memory allocation code transformation passes +- _ReferenceBuffer accepts an optional `offset` argument to offset the reference +- NetworkContext: `hoistReference` - accepts the actual buffer as reference instead of name, accepts shape, offset, and override_type arguments, and returns the actual buffer, not its name +- `_mangleNodeRep` -> `_mangleOpRepr` - the canonical name we use is `OperatorRepresentation`. `NodeRep` and `ParseDict` are old iterations of the name. +- rename of permutation functions to follow this convention: `permute` is an action that permutes something, `permutation` is a function that generates a permutation +- `_permuteList` to just `_permute` +- removed manual buffer name mangling since we do it in the ExecutionBlock generate() function, simplifies templates +- we now check that buffer shapes/hyperrectangles/tiling ranks match which required changing a few `serializeTilingSolution` functions to preserve the same shape rank +- big refactor of the code generation part of the TilingExtension and needed changes to PULPOpen and Snitch due to it +- PULPClusterTilingSB and PULPClusterTilingDB now allow for transfers of any rank (dimensionality) +- PULP's final output diff is now calculated as absolute error, instead of just subtraction +- common code generation code between testMVP/generateNetwork/... was extracted into a single `generateTestNetwork` function +- in some functions, instead of passing the name of a buffer, the actual buffer is just passed +- tile function allows overriding the optimizer with external tilingSolution and memoryMap +- refactor of the permutation functions for clarity ### Fixed - Prevent node duplication for graphs generated via GraphSurgeon @@ -23,6 +55,7 @@ This file contains the changelog for the Deeploy project. The changelog is divid ### Removed - Delete outdated and unused `.gitlab-ci.yml` file +- dory_dma.c and dory_dma.h ## Release v0.2.0 (2025-07-08) [#103](https://github.com/pulp-platform/Deeploy/pull/103) This release containing major architectural changes, new platform support, enhanced simulation workflows, floating-point kernel support, training infrastructure for CCT models, memory allocation strategies, and documentation improvements. diff --git a/Deeploy/CommonExtensions/CodeTransformationPasses/Closure.py b/Deeploy/CommonExtensions/CodeTransformationPasses/Closure.py index b137266de6..8c1786dd5b 100644 --- a/Deeploy/CommonExtensions/CodeTransformationPasses/Closure.py +++ b/Deeploy/CommonExtensions/CodeTransformationPasses/Closure.py @@ -109,7 +109,7 @@ def _generateClosureStruct(self, ctxt: NetworkContext, executionBlock: Execution closureStruct: Dict[str, Union[Pointer, Immediate, Struct]] = {} makoDynamicReferences = self.extractDynamicReferences(ctxt, executionBlock, True) - for arg in list(dict.fromkeys(makoDynamicReferences)): + for arg in makoDynamicReferences: ref = ctxt.lookup(arg) if isinstance(ref, TransientBuffer): closureStructArgsType[ctxt._mangle(arg)] = PointerClass(VoidType) @@ -202,7 +202,7 @@ def _generateClosureStruct(self, ctxt: NetworkContext, executionBlock: Execution # Add closure struct info to operatorRepresentation closureStructArgsType = {} closureStruct = {} - makoDynamicReferences = self.extractDynamicReferences(ctxt, executionBlock, True) + makoDynamicReferences = self.extractDynamicReferences(ctxt, executionBlock, unrollStructs = True) filteredMakoDynamicReferences = [] diff --git a/Deeploy/CommonExtensions/CodeTransformationPasses/IntrospectiveCodeTransformation.py b/Deeploy/CommonExtensions/CodeTransformationPasses/IntrospectiveCodeTransformation.py index acdcc0d09c..51d7dba617 100644 --- a/Deeploy/CommonExtensions/CodeTransformationPasses/IntrospectiveCodeTransformation.py +++ b/Deeploy/CommonExtensions/CodeTransformationPasses/IntrospectiveCodeTransformation.py @@ -23,16 +23,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy import types from typing import Dict, List import mako.codegen as codegen from mako.lexer import Lexer -from mako.parsetree import Expression, TemplateNode +from mako.parsetree import Expression, TemplateNode, Text +from mako.template import Template from Deeploy.AbstractDataTypes import Pointer, Struct -from Deeploy.DeeployTypes import ExecutionBlock, NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer +from Deeploy.DeeployTypes import ExecutionBlock, NetworkContext, OperatorRepresentation, VariableBuffer _NULL: str = "NULL" @@ -42,65 +42,76 @@ class IntrospectiveCodeTransformationMixIn(): parseTreeDict: Dict[int, TemplateNode] = {} @staticmethod - def _generateParseTree(template: NodeTemplate) -> TemplateNode: - return Lexer(template.template._source).parse() + def _generateParseTree(template: Template) -> TemplateNode: + return Lexer(template._source).parse() @staticmethod - def _reconstructCode(template: NodeTemplate, node: TemplateNode): - - def fixupParseTree(parseTree: TemplateNode) -> TemplateNode: - nodes = [] - prevLine = 0 - prevPos = 0 - for node in parseTree.nodes: - - newNode = copy.copy(node) - offset = len(node.source) - - # Expression contain the actual expression + the symbols "${}", i.e. 3 offset symbols - if isinstance(newNode, Expression): - offset += 3 + def _reconstructCode(template: Template, node: TemplateNode) -> Template: + lexer = Lexer(template._source) + source = codegen.compile( + node, + template.uri, + None, + default_filters = template.default_filters, + buffer_filters = template.buffer_filters, + imports = template.imports, + future_imports = template.future_imports, + source_encoding = lexer.encoding, + generate_magic_comment = True, + strict_undefined = template.strict_undefined, + enable_loop = template.enable_loop, + reserved_names = template.reserved_names, + ) + module = types.ModuleType(template.module_id) + code = compile(source, template.module_id, "exec") + exec(code, module.__dict__, module.__dict__) - prevPos = prevPos + offset + template._code = code + template.module = module + template.callable_ = template.module.render_body + return template - if prevLine != node.lineno: - prevPos = node.pos + @staticmethod + def _indexPointer(parseTree: TemplateNode, ptrName: str, index: str) -> TemplateNode: + indexes = [i for i, node in enumerate(parseTree.nodes) if isinstance(node, Expression) and node.text == ptrName] - newNode.pos = prevPos - prevLine = node.lineno + for offset, idx in enumerate(indexes): + bracketOpen = Text("[", source = "[", lineno = 0, pos = 0, filename = None) + indexExpr = Expression(index, '', source = index, lineno = 0, pos = 0, filename = None) + bracketClose = Text("]", source = "]", lineno = 0, pos = 0, filename = None) + parseTree.nodes.insert(idx + 3 * offset + 1, bracketOpen) + parseTree.nodes.insert(idx + 3 * offset + 2, indexExpr) + parseTree.nodes.insert(idx + 3 * offset + 3, bracketClose) - nodes.append(newNode) + return parseTree - parseTree.nodes = nodes + @staticmethod + def indexVars(template: Template, varNames: List[str], index: str) -> None: + if len(varNames) == 0: + return + parseTree = IntrospectiveCodeTransformationMixIn._generateParseTree(template) + for name in varNames: + parseTree = IntrospectiveCodeTransformationMixIn._indexPointer(parseTree, name, index) + IntrospectiveCodeTransformationMixIn._reconstructCode(template, parseTree) - return parseTree + @staticmethod + def _dereferencePointer(parseTree: TemplateNode, ptrName: str) -> TemplateNode: + indexes = [i for i, node in enumerate(parseTree.nodes) if isinstance(node, Expression) and node.text == ptrName] - node = fixupParseTree(node) + for offset, idx in enumerate(indexes): + text = Text("*", source = "*", lineno = 0, pos = 0, filename = None) + parseTree.nodes.insert(idx + offset, text) - temp = template.template - lexer = Lexer(temp._source) - source = codegen.compile( - node, - temp.uri, - None, - default_filters = temp.default_filters, - buffer_filters = temp.buffer_filters, - imports = temp.imports, - future_imports = temp.future_imports, - source_encoding = lexer.encoding, - generate_magic_comment = True, - strict_undefined = temp.strict_undefined, - enable_loop = temp.enable_loop, - reserved_names = temp.reserved_names, - ) - module = types.ModuleType(temp.module_id) - code = compile(source, temp.module_id, "exec") - exec(code, module.__dict__, module.__dict__) + return parseTree - temp._code = code - temp.module = module - temp.callable_ = temp.module.render_body - template.template = temp + @staticmethod + def dereferenceVars(template: Template, varNames: List[str]) -> None: + if len(varNames) == 0: + return + parseTree = IntrospectiveCodeTransformationMixIn._generateParseTree(template) + for name in varNames: + parseTree = IntrospectiveCodeTransformationMixIn._dereferencePointer(parseTree, name) + IntrospectiveCodeTransformationMixIn._reconstructCode(template, parseTree) def extractDynamicReferences(self, ctxt: NetworkContext, @@ -112,7 +123,7 @@ def extractDynamicReferences(self, for codeSnippet in executionBlock.codeSnippets: template, operatorRepresentation = codeSnippet.template, codeSnippet.operatorRepresentation - newRefs = self._extractDynamicExpressions(ctxt, operatorRepresentation, template, unrollStructs, + newRefs = self._extractDynamicExpressions(ctxt, operatorRepresentation, template.template, unrollStructs, includeGobalReferences) makoDynamicReferences += newRefs @@ -132,11 +143,10 @@ def _fixCtxtOrdering(ctxt: NetworkContext, nameList: List[str]) -> List[str]: def _extractDynamicExpressions(self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - template: NodeTemplate, + template: Template, unrollStructs = False, includeGobalReferences = False): - - codeHash = hash(template.template._source) + codeHash = hash(template._source) if codeHash in self.parseTreeDict.keys(): makoParseTree = self.parseTreeDict[codeHash] @@ -146,60 +156,43 @@ def _extractDynamicExpressions(self, self.parseTreeDict[codeHash] = makoParseTree # Filter parsing tree for expressions - makoExpressions = [node.text for node in makoParseTree.nodes if type(node) == Expression] + makoExpressions = [node.text for node in makoParseTree.nodes if isinstance(node, Expression)] - # Filter expressions for local variables contained in operatorRepresentation - makoLocalReferences = [ - node for node in makoExpressions - if ((node in operatorRepresentation) and type(operatorRepresentation[node]) == str and ( - operatorRepresentation[node] in ctxt.localObjects.keys())) + # Filter represented expressions + representedExpressions = [ + operatorRepresentation[expr] for expr in makoExpressions if expr in operatorRepresentation ] - # Filter expressions for global variables contained in operatorRepresentation - makoGlobalReferences = [ - node for node in makoExpressions - if ((node in operatorRepresentation) and type(operatorRepresentation[node]) == str and ( - operatorRepresentation[node] in ctxt.globalObjects.keys())) - ] + # Filter buffers from expressions + references = [expr for expr in representedExpressions if ctxt.is_buffer(expr)] + + if unrollStructs: + + def _unrollStructReferences(val: Struct) -> List[str]: + assert isinstance(val, Struct) + # Recursively unroll struct references + structReferences = [] + for field in val.value.values(): + if isinstance(field, Struct): + structReferences += _unrollStructReferences(field) + elif isinstance(field, Pointer) and field.referenceName != _NULL: + structReferences.append(field.referenceName) + return structReferences + + # Unroll local struct references + for ref in references: + if hasattr(ctxt.lookup(ref), "structDict"): + references += _unrollStructReferences(ctxt.lookup(ref).structDict) - def _unrollStructReferences(val) -> List[str]: - # Unroll struct references - structReferences = [] - if isinstance(val, Struct): - for key, _type in val.value.items(): - if isinstance(_type, Struct): - structReferences += _unrollStructReferences(val.value[key]) - elif isinstance(_type, Pointer) and val.value[key].referenceName != _NULL: - structReferences.append(val.value[key].referenceName) - return structReferences - - # Unroll local struct references - localReferences = [] - localStructReferences = [] - for ref in makoLocalReferences: - localReferences.append(operatorRepresentation[ref]) - if unrollStructs: - if ctxt.is_local(operatorRepresentation[ref]) and hasattr(ctxt.lookup(operatorRepresentation[ref]), - "structDict"): - localStructReferences += _unrollStructReferences( - ctxt.lookup(operatorRepresentation[ref]).structDict) - - # Unroll global struct references - globalReferences = [] - globalStructReferences = [] - for ref in makoGlobalReferences: - globalReferences.append(operatorRepresentation[ref]) - if unrollStructs: - if ctxt.is_global(operatorRepresentation[ref]) and hasattr(ctxt.lookup(operatorRepresentation[ref]), - "structDict"): - globalStructReferences += _unrollStructReferences( - ctxt.lookup(operatorRepresentation[ref]).structDict) + # Filter expressions for local variables contained in operatorRepresentation + localReferences = [ref for ref in references if ctxt.is_local(ref)] + + # Filter expressions for global variables contained in operatorRepresentation + globalReferences = [ref for ref in references if ctxt.is_global(ref)] # Filter for dynamically allocated tensors - dynamicLocalReferences = [ref for ref in localReferences + localStructReferences if ctxt.lookup(ref)._deploy] - dynamicGlobalReferences = [ - ref for ref in globalReferences + globalStructReferences if isinstance(ctxt.lookup(ref), VariableBuffer) - ] + dynamicLocalReferences = [ref for ref in localReferences if ctxt.lookup(ref)._deploy] + dynamicGlobalReferences = [ref for ref in globalReferences if isinstance(ctxt.lookup(ref), VariableBuffer)] if includeGobalReferences: return dynamicLocalReferences + dynamicGlobalReferences diff --git a/Deeploy/CommonExtensions/CodeTransformationPasses/MemoryAllocation.py b/Deeploy/CommonExtensions/CodeTransformationPasses/MemoryAllocation.py index b95d31a01b..2293ded8e5 100644 --- a/Deeploy/CommonExtensions/CodeTransformationPasses/MemoryAllocation.py +++ b/Deeploy/CommonExtensions/CodeTransformationPasses/MemoryAllocation.py @@ -30,7 +30,7 @@ from Deeploy.CommonExtensions.CodeTransformationPasses.IntrospectiveCodeTransformation import \ IntrospectiveCodeTransformationMixIn from Deeploy.DeeployTypes import CodeGenVerbosity, CodeTransformationPass, ExecutionBlock, NetworkContext, \ - NodeTemplate, StructBuffer, TransientBuffer, _NoVerbosity + NodeTemplate, StructBuffer, TransientBuffer, VariableBuffer, _NoVerbosity, _ReferenceBuffer class _ArgStructAllocateTemplate(NodeTemplate): @@ -77,112 +77,84 @@ def apply(self, class MemoryManagementGeneration(CodeTransformationPass, IntrospectiveCodeTransformationMixIn): - def __init__(self, memoryHierarchyRegex: Optional[str] = None): + def __init__(self, memoryLevelRegex: Optional[str] = None): super().__init__() - if memoryHierarchyRegex is not None: - self.regex = re.compile(memoryHierarchyRegex) + if memoryLevelRegex is not None: + self.regex = re.compile(memoryLevelRegex) else: self.regex = None - def _matchesRegex(self, ctxt: NetworkContext, key: str) -> bool: - _buffer = ctxt.lookup(key) - + def is_memory_level(self, buffer: VariableBuffer) -> bool: if self.regex is None: - return not hasattr(_buffer, "_memoryLevel") - - if not hasattr(_buffer, "_memoryLevel"): - return False - - ret = self.regex.findall(ctxt.lookup(key)._memoryLevel) - return ret != [] - - def _extractTransientBuffers(self, ctxt: NetworkContext, name: str) -> List[str]: - names = [] - - for key, _buffer in ctxt.localObjects.items(): - if isinstance(_buffer, TransientBuffer) and name in _buffer._users: - names.append(key) - - filteredNames = [key for key in names if self._matchesRegex(ctxt, key)] - - return filteredNames + return not hasattr(buffer, "_memoryLevel") + else: + return hasattr(buffer, "_memoryLevel") and self.regex.fullmatch(buffer._memoryLevel) is not None - def _getOutputNames(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, name: str) -> List[str]: - outputs = [] - references = self.extractDynamicReferences(ctxt, executionBlock, True) - localKeys = [key for key in references if ctxt.is_local(key)] + @staticmethod + def is_final_input(buffer: VariableBuffer, nodeName: str) -> bool: + return not isinstance(buffer, (StructBuffer, TransientBuffer)) and \ + len(buffer._users) > 0 and nodeName == buffer._users[-1] - filteredKeys = [key for key in localKeys if self._matchesRegex(ctxt, key)] + @staticmethod + def is_output(buffer: VariableBuffer, nodeName: str) -> bool: + return not isinstance(buffer, (StructBuffer, TransientBuffer)) and nodeName not in buffer._users - for key in filteredKeys: - _buffer = ctxt.lookup(key) - if isinstance(_buffer, (StructBuffer, TransientBuffer)): - continue - if name not in _buffer._users: - outputs.append(_buffer.name) + @staticmethod + def is_transient(buffer: VariableBuffer, nodeName: str) -> bool: + return isinstance(buffer, TransientBuffer) and nodeName in buffer._users - return list(dict.fromkeys(outputs)) + @staticmethod + def topologicallySortBuffers(buffers: List[VariableBuffer]) -> List[VariableBuffer]: + sortedBuffers = [] + unsortedBufferNames = [buff.name for buff in buffers] + lastLen = len(unsortedBufferNames) - def _getFinalInputNames(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, name: str) -> List[str]: - inputs = [] - references = self.extractDynamicReferences(ctxt, executionBlock, True) - localKeys = [key for key in references if ctxt.is_local(key)] + while len(unsortedBufferNames) > 0: + for buffer in buffers: + if isinstance(buffer, _ReferenceBuffer) and buffer._referenceName in unsortedBufferNames: + continue - filteredKeys = [key for key in localKeys if self._matchesRegex(ctxt, key)] + sortedBuffers.append(buffer) + unsortedBufferNames.remove(buffer.name) - for key in filteredKeys: - _buffer = ctxt.lookup(key) - if isinstance(_buffer, (StructBuffer, TransientBuffer)) or _buffer._users == []: - continue - if name == _buffer._users[-1]: - inputs.append(_buffer.name) + assert len(unsortedBufferNames) != lastLen, f"Circular reference detected." + lastLen = len(unsortedBufferNames) - return list(dict.fromkeys(inputs)) + return sortedBuffers def apply(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, name: str, verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]: + references = self.extractDynamicReferences(ctxt, + executionBlock, + unrollStructs = True, + includeGobalReferences = False) + localBuffers = [ctxt.localObjects[ref] for ref in references] + memoryLevelBuffers = [buff for buff in localBuffers if self.is_memory_level(buff)] - outputNames = self._getOutputNames(ctxt, executionBlock, name) - inputNames = self._getFinalInputNames(ctxt, executionBlock, name) - transientBuffers = self._extractTransientBuffers(ctxt, name) + transients = [buff for buff in memoryLevelBuffers if self.is_transient(buff, name)] + outputs = [buff for buff in memoryLevelBuffers if self.is_output(buff, name)] + inputs = [buff for buff in memoryLevelBuffers if self.is_final_input(buff, name)] # We have to allocate the output buffers, unless they are global - - for buffer in list(reversed(outputNames)) + transientBuffers: - # Extract buffer info from context - nb = ctxt.lookup(buffer) - - # Check that it was not already allocated - assert ctxt.localObjects[nb.name]._live == False, f"Tried to allocate already live buffer {nb.name}" - - # Mark it as live - ctxt.localObjects[nb.name]._live = True - - # Add the allocation code to the execution block - executionBlock.addLeft(nb.allocTemplate, nb._bufferRepresentation()) - - for buffer in inputNames + transientBuffers: - # Extract buffer info from context - nb = ctxt.lookup(buffer) - - # Check that it was not already deallocated - assert ctxt.localObjects[nb.name]._live == True, f"Tried to deallocate already dead buffer {nb.name}" - - # Mark it as dead (not useful anymore) - ctxt.localObjects[nb.name]._live = False - - # Check for live ancestors (buffers that this is an alias of, that are still live), - # and add the deallocation code to the execution block if none found - if not nb.has_live_ancestors(ctxt = ctxt): - executionBlock.addRight(nb.deallocTemplate, nb._bufferRepresentation()) + for buffer in reversed(self.topologicallySortBuffers(outputs + transients)): + assert buffer._live == False, f"Tried to allocate already live buffer {buffer.name}" + buffer._live = True + executionBlock.addLeft(buffer.allocTemplate, buffer._bufferRepresentation()) + + for buffer in inputs + transients: + assert buffer._live == True, f"Tried to deallocate already dead buffer {buffer.name}" + buffer._live = False + # Don't deallocate if it's an alias of a live buffer + if not buffer.has_live_ancestors(ctxt = ctxt): + executionBlock.addRight(buffer.deallocTemplate, buffer._bufferRepresentation()) return ctxt, executionBlock -class MemoryPassthroughGeneration(MemoryManagementGeneration, IntrospectiveCodeTransformationMixIn): +class MemoryPassthroughGeneration(MemoryManagementGeneration): def __init__(self, memoryHierarchyRegex: Optional[str] = None): super().__init__(memoryHierarchyRegex) @@ -192,22 +164,23 @@ def apply(self, executionBlock: ExecutionBlock, name: str, verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]: - - outputNames = self._getOutputNames(ctxt, executionBlock, name) - inputNames = self._getFinalInputNames(ctxt, executionBlock, name) - transientBuffers = self._extractTransientBuffers(ctxt, name) - - # We have to allocate the output buffers, unless they are global - for buffer in outputNames + transientBuffers: - nb = ctxt.lookup(buffer) - - assert ctxt.localObjects[nb.name]._live == False, f"Tried to allocate already live buffer {nb.name}" - ctxt.localObjects[nb.name]._live = True - - for buffer in inputNames + transientBuffers: - nb = ctxt.lookup(buffer) - - assert ctxt.localObjects[nb.name]._live == True, f"Tried to deallocate already dead buffer {nb.name}" - ctxt.localObjects[nb.name]._live = False + references = self.extractDynamicReferences(ctxt, + executionBlock, + unrollStructs = True, + includeGobalReferences = False) + localBuffers = [ctxt.localObjects[ref] for ref in references] + memoryLevelBuffers = [buff for buff in localBuffers if self.is_memory_level(buff)] + + transients = [buff for buff in memoryLevelBuffers if self.is_transient(buff, name)] + outputs = [buff for buff in memoryLevelBuffers if self.is_output(buff, name)] + inputs = [buff for buff in memoryLevelBuffers if self.is_final_input(buff, name)] + + for buffer in outputs + transients: + assert buffer._live == False, f"Tried to allocate already live buffer {buffer.name}" + buffer._live = True + + for buffer in inputs + transients: + assert buffer._live == True, f"Tried to deallocate already dead buffer {buffer.name}" + buffer._live = False return ctxt, executionBlock diff --git a/Deeploy/CommonExtensions/DataTypes.py b/Deeploy/CommonExtensions/DataTypes.py index f88eef2851..050e5b44ff 100644 --- a/Deeploy/CommonExtensions/DataTypes.py +++ b/Deeploy/CommonExtensions/DataTypes.py @@ -23,7 +23,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Tuple, Type +from typing import Iterable, Tuple, Type, Union + +import numpy.typing as npt from Deeploy.AbstractDataTypes import FloatImmediate, IntegerImmediate @@ -111,4 +113,32 @@ class float64_t(FloatImmediate): *UnsignedIntegerDataTypes, ), key = lambda _type: _type.typeWidth)) -FloatDataTypes: Tuple[Type[FloatImmediate], ...] = (bfloat16_t, float16_t, float32_t, float64_t) \ No newline at end of file +FloatDataTypes: Tuple[Type[FloatImmediate], ...] = (bfloat16_t, float16_t, float32_t, float64_t) + + +def minimalIntegerType(value: Union[int, Iterable[int], npt.NDArray]) -> Type[IntegerImmediate]: + # Sort data types by typeWidth and signedness (unsigned types go first) + sorted_types = sorted( + IntegerDataTypes, + key = lambda t: (t.typeWidth, t.typeMin < 0), + ) + + for _type in sorted_types: + if _type.checkValue(value): + return _type + + raise RuntimeError(f"Couldn't find appropriate integer type for value: {value}") + + +def minimalFloatType(value: Union[float, Iterable[float], npt.NDArray]) -> Type[FloatImmediate]: + # Sort data types by typeWidth + sorted_types = sorted( + FloatDataTypes, + key = lambda t: t.typeWidth, + ) + + for _type in sorted_types: + if _type.checkValue(value): + return _type + + raise RuntimeError(f"Couldn't find appropriate float type for value: {value}") diff --git a/Deeploy/CommonExtensions/OptimizationPasses/TopologyOptimizationPasses/LoweringOptimizationPasses.py b/Deeploy/CommonExtensions/OptimizationPasses/TopologyOptimizationPasses/LoweringOptimizationPasses.py index c3887ab54d..4e95f9855b 100644 --- a/Deeploy/CommonExtensions/OptimizationPasses/TopologyOptimizationPasses/LoweringOptimizationPasses.py +++ b/Deeploy/CommonExtensions/OptimizationPasses/TopologyOptimizationPasses/LoweringOptimizationPasses.py @@ -24,7 +24,7 @@ # limitations under the License. from functools import partial -from typing import Iterable, List, Optional, Sequence, Tuple, Union +from typing import Iterable, List, Optional, Sequence, Tuple, TypeVar, Union import numpy as np import onnx_graphsurgeon as gs @@ -32,6 +32,7 @@ from Deeploy.CommonExtensions.OptimizationPasses.Matchers import Match from Deeploy.CommonExtensions.OptimizationPasses.PassClasses import ReplaceSequentialPatternPass, SequentialPass, \ contextagnostic +from Deeploy.TilingExtension.TilingCodegen import HyperRectangle def _createReshape(tensorIn: gs.Tensor, @@ -106,59 +107,53 @@ def _prependSqueezeDims(tensor: gs.Tensor, name: str, axis: Union[int, Sequence[ # Permute (0,1,2,3,...,N-2,N-1) -> (0,1,2,3,...,N-1,N-2) -def _permuteLastTwoDims(length: int) -> List[int]: - outList = list(range(length)) - tmp = outList[-1] - outList[-1] = outList[-2] - outList[-2] = tmp - return outList +def _permutationLastTwoDims(N: int) -> List[int]: + assert N >= 2, "N needs to be larger then 2" + return list(range(N - 2)) + [N - 1, N - 2] # Permute (0,1,2,3,...,N-1) -> (0,2,3,...,N-1,1) -def _permuteNCHWtoNHWC(length: int) -> List[int]: - outList = list(range(length)) - outList.remove(1) - outList.append(1) - return outList +def _permutationNCHWtoNHWC(N: int) -> List[int]: + assert N >= 3, "N needs to be larger then 3 for this to make any sense" + return [0] + list(range(2, N)) + [1] # Permute (0,1,2,3,...,N-1) -> (0,N-1,1,2,3,...,N-2) -def _permuteNHWCtoNCHW(length: int) -> List[int]: - outList = list(range(length)) - outList.remove(length - 1) - outList.insert(1, length - 1) - return outList +def _permutationNHWCtoNCHW(N: int) -> List[int]: + assert N >= 3, "N needs to be larger then 3 for this to make any sense" + return [0, N - 1] + list(range(1, N - 1)) # Calculate permutation q = p^(-1) s.t. q(p(i)) = i def _invertPermutation(permutation: List[int]) -> List[int]: - tuples = [] - for idx, i in enumerate(permutation): - tuples.append((i, idx)) - sortedTuples = sorted(tuples, key = lambda x: x[0]) - outPermutation = [] - for i in sortedTuples: - outPermutation.append(i[1]) - return outPermutation + inverse = [0] * len(permutation) + for idx, permIdx in enumerate(permutation): + inverse[permIdx] = idx + return inverse -def _permuteList(inputList: List, permutation: List[int]): - assert len(inputList) == len(permutation), "Permuted list and permutation must have equal length!" - outList = [] - for i in permutation: - outList.append(inputList[i]) - return outList +T = TypeVar('T') + + +def _permute(_list: Sequence[T], permutation: Sequence[int]) -> List[T]: + assert len(_list) == len(permutation), "Permuted list and permutation must have equal length!" + return [_list[i] for i in permutation] + + +def _permuteHyperRectangle(rect: HyperRectangle, permutation: List[int]) -> HyperRectangle: + assert len(rect.dims) == len(permutation), "Permutation list and HyperRectangle must have equal dimensionality!" + return HyperRectangle(tuple(_permute(rect.offset, permutation)), tuple(_permute(rect.dims, permutation))) def _prependTransposeNode(anchor: gs.Variable, nodeName: str, permutation: Iterable[int], - invert: bool = False) -> (gs.Node, gs.Variable): + invert: bool = False) -> Tuple[gs.Node, gs.Variable]: if invert: - outShape = _permuteList(anchor.shape, _invertPermutation(permutation)) + outShape = _permute(anchor.shape, _invertPermutation(permutation)) else: - outShape = _permuteList(anchor.shape, permutation) + outShape = _permute(anchor.shape, permutation) anchorTransposeInput = gs.Variable(nodeName + "_Out", dtype = np.float32, shape = outShape) anchorTransposeNode = gs.Node(name = nodeName, @@ -176,9 +171,9 @@ def _appendTransposeNode(anchor: gs.Variable, invert: bool = False) -> (gs.Node, gs.Variable): if invert: - outShape = _permuteList(anchor.shape, _invertPermutation(permutation)) + outShape = _permute(anchor.shape, _invertPermutation(permutation)) else: - outShape = _permuteList(anchor.shape, permutation) + outShape = _permute(anchor.shape, permutation) anchorTransposeOutput = gs.Variable(nodeName + "_In", dtype = np.float32, shape = outShape) anchorTransposeNode = gs.Node(name = nodeName, @@ -210,7 +205,7 @@ def _transposeMatMulInputs_fun(graph: gs.Graph, match: Match, name: str): # Prepend transpose on A if it's transposed if gemmNode.attrs['transA'] != 0: anchorTransposeNode, anchorTransposeOutput = _appendTransposeNode(inputA, name + "_A", - _permuteLastTwoDims(len(inputA.shape))) + _permutationLastTwoDims(len(inputA.shape))) gemmNode.inputs[0] = anchorTransposeOutput gemmNode.attrs['transA'] = 0 graph.nodes.append(anchorTransposeNode) @@ -218,7 +213,7 @@ def _transposeMatMulInputs_fun(graph: gs.Graph, match: Match, name: str): # Prepend transpose on B if it's not transposed if gemmNode.attrs['transB'] != 1: anchorTransposeNode, anchorTransposeOutput = _appendTransposeNode(inputB, name + "_B", - _permuteLastTwoDims(len(inputB.shape))) + _permutationLastTwoDims(len(inputB.shape))) gemmNode.inputs[1] = anchorTransposeOutput gemmNode.attrs['transB'] = 1 graph.nodes.append(anchorTransposeNode) @@ -256,8 +251,8 @@ def _NCHWtoNHWC_fun(graph: gs.Graph, match: Match, name: str, default_channels_f inputNode = opNode.inputs[0] outputNode = opNode.outputs[0] - inPermute = _permuteNCHWtoNHWC(len(inputNode.shape)) - outPermute = _permuteNHWCtoNCHW(len(outputNode.shape)) + inPermute = _permutationNCHWtoNHWC(len(inputNode.shape)) + outPermute = _permutationNHWCtoNCHW(len(outputNode.shape)) inputTransposeNode, inputTransposeOutput = _appendTransposeNode(inputNode, name + "_TransposeIn", inPermute) outputTransposeNode, outputTransposeInput = _prependTransposeNode(outputNode, @@ -376,8 +371,8 @@ def _PULPDWNCHWtoNHWC_fun(graph: gs.Graph, match: Match, name: str, default_chan inputNode = opNode.inputs[0] outputNode = opNode.outputs[0] - inPermute = _permuteNCHWtoNHWC(len(inputNode.shape)) - outPermute = _permuteNHWCtoNCHW(len(outputNode.shape)) + inPermute = _permutationNCHWtoNHWC(len(inputNode.shape)) + outPermute = _permutationNHWCtoNCHW(len(outputNode.shape)) outputTransposeNode, outputTransposeInput = _prependTransposeNode(outputNode, name + "_TransposeOut", @@ -534,7 +529,7 @@ def _requantized_gemm_to_pw_fun(graph: gs.Graph, match: Match, name: str): # If transA is set then the matrix is of shape [B x K x M] and it needs to be transposed, otherwise its shape is [B x M x K] if 'transA' in requantizedGemm.attrs and requantizedGemm.attrs['transA'] == 1: - matrixATransposeNode, matrixA = _appendTransposeNode(matrixA, name, _permuteLastTwoDims(len(matrixA.shape))) + matrixATransposeNode, matrixA = _appendTransposeNode(matrixA, name, _permutationLastTwoDims(len(matrixA.shape))) graph.nodes.append(matrixATransposeNode) # Align dimensions for convolution @@ -551,7 +546,7 @@ def _requantized_gemm_to_pw_fun(graph: gs.Graph, match: Match, name: str): # If transB is set then the matrix is of shape [N x K] and it doesn't need to be transposed, otherwise its shape is [K x N] and it has to be transposed if not 'transB' in requantizedGemm.attrs or requantizedGemm.attrs['transB'] == 0: # matrixBTransposed, shape [N x K] - matrixBTransposeNode, matrixB = _appendTransposeNode(matrixB, name, _permuteLastTwoDims(len(matrixB.shape))) + matrixBTransposeNode, matrixB = _appendTransposeNode(matrixB, name, _permutationLastTwoDims(len(matrixB.shape))) graph.nodes.append(matrixBTransposeNode) # pwWeight, shape [N x 1 x 1 x K] matrixBExpandDimsNode, pwWeight = _appendExpandDims(matrixB, name, axis = (1, 2)) diff --git a/Deeploy/DeeployTypes.py b/Deeploy/DeeployTypes.py index c7392a6786..f3281f1759 100644 --- a/Deeploy/DeeployTypes.py +++ b/Deeploy/DeeployTypes.py @@ -35,7 +35,7 @@ from collections import OrderedDict, deque from dataclasses import dataclass from functools import reduce -from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, TypeVar, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Type, TypeVar, Union import mako import numpy as np @@ -118,7 +118,8 @@ def __init__(self, templateStr: str): """ self.template = _Template(templateStr, strict_undefined = True) - self.subTemplates = {} + self.subTemplates: Dict[str, Tuple[NodeTemplate, Callable[[NetworkContext, OperatorRepresentation], + Tuple[NetworkContext, OperatorRepresentation]]]] = {} self.subTemplateGenerators = {} def internalSize(self) -> int: @@ -535,22 +536,33 @@ class _ReferenceBuffer(VariableBuffer): """Helper class to hoist references to pre-established pointers; this is used most frequently in tiling to express an offset with respect to input or output tensors """ - allocTemplate = NodeTemplate("${type.typeName} ${name} = (${type.typeName}) ${objectName};") + allocTemplate = NodeTemplate("""\\ + % if offset is None: + ${type.typeName} ${name} = (${type.typeName}) ${referenceName};\\ + % else: + ${type.typeName} ${name} = (${type.typeName}) ${referenceName} + ${offset};\\ + % endif + """) deallocTemplate = NodeTemplate("") initTemplate = NodeTemplate("") - def __init__(self, name: str = '', shape = [1], reference: Optional[VariableBuffer] = None): - - assert reference is not None, "Can't have a reference to None!" - + def __init__(self, + name: str, + reference: VariableBuffer, + shape: Tuple[int, ...] = (1,), + offset: Optional[Union[int, str, VariableBuffer]] = None): super().__init__(name, shape) - self._referencedBuffer = str(reference._instance) self._referenceName = reference.name + if isinstance(offset, VariableBuffer): + self._offset = offset.name + else: + self._offset = offset def _bufferRepresentation(self) -> Dict: - rep = super()._bufferRepresentation() - rep['objectName'] = self._referencedBuffer - return rep + repr = super()._bufferRepresentation() + repr['referenceName'] = self._referenceName + repr['offset'] = self._offset + return repr class NetworkContext(): @@ -573,13 +585,13 @@ def __init__(self, self.TransientBuffer = transientBuffer self.name = name - def dealiasBuffer(self, referenceName: str) -> str: - """Function to unravel reference instantiated in _ReferenceBuffer objects until the underlying VariableBuffer's name is returned + def dealiasBuffer(self, name: str) -> str: + """Function to find the underlying aliased VariableBuffer Parameters ---------- - referenceName : str - Name of the _ReferenceBuffer to unravel + name: str + Name of the VariableBuffer to dealias Returns ------- @@ -589,25 +601,42 @@ def dealiasBuffer(self, referenceName: str) -> str: Raises ------ Exception - Raises an Exception if references are circular, i.e. there - is no underlying VariableBuffer + Raises an Exception if aliases are circular """ - _buffer = self.lookup(referenceName) - if not hasattr(_buffer, "_alias"): - return referenceName - seenAliases: Set[str] = set() + alias = self.lookup(name) + while hasattr(alias, "_alias"): + seenAliases.add(alias.name) + alias = self.lookup(alias._alias) + assert alias.name not in seenAliases, "Circular aliasing detected!" + return alias.name - alias = _buffer._alias - while hasattr(self.lookup(alias), "_alias"): - seenAliases.add(alias) - alias = self.lookup(alias)._alias + def unravelReference(self, ref: VariableBuffer) -> VariableBuffer: + """Function to find the underlying referenced VariableBuffer + + Parameters + ---------- + ref : VariableBuffer + Buffer to unravel - if alias in seenAliases: - raise Exception("Circular aliasing detected!") + Returns + ------- + str + Name of the original VariableBuffer that was referenced - return alias + Raises + ------ + Exception + Raises an Exception if references are circular + + """ + seenRefs = set() + while isinstance(ref, _ReferenceBuffer): + seenRefs.add(ref.name) + ref = self.lookup(ref._referenceName) + assert ref.name not in seenRefs, "Circular reference found" + return ref def exportNetworkContext(self, folderPath: str, fileName: str): """Exports the NetworkContext as a pickled dictionary @@ -706,7 +735,7 @@ def _mangle(self, name: str, repr: bool = True) -> str: repStr = re.sub('\.', '_', self.name) + '_' + repStr return repStr - def add(self, obj: VariableBuffer, ctxt: str = 'local', _id: str = ""): + def add(self, obj: VariableBuffer, ctxt: Literal['local', 'global'] = 'local', _id: str = ""): """Adds a VariableBuffer object to the NetworkContext Parameters @@ -793,10 +822,7 @@ def is_global(self, name: str) -> bool: Returns true if the name matches with any global buffer """ - if name in self.globalObjects.keys(): - return True - else: - return False + return name in self.globalObjects def is_local(self, name: str) -> bool: """Checks whether a name is associated with a local buffer @@ -812,11 +838,42 @@ def is_local(self, name: str) -> bool: Returns ture if the name matches with any local buffer """ + return name in self.localObjects - if name in self.localObjects.keys(): - return True - else: + def is_object(self, value: Any) -> bool: + """Checks whether a value is an existing object name + + Parameters + ---------- + value : Any + Value to check + + Returns + ------- + bool + Returns ture if the value is an existing buffer name + + """ + return isinstance(value, str) and (self.is_local(value) or self.is_global(value)) + + def is_buffer(self, value: Any) -> bool: + """Checks whether a value is an existing buffer name + + Parameters + ---------- + value : Any + Value to check + + Returns + ------- + bool + Returns ture if the value is an existing buffer name + + """ + if not self.is_object(value): return False + obj = self.lookup(value) + return isinstance(obj, VariableBuffer) def hoistTransientBuffer(self, name: str, size: int) -> str: """Registers a new TransientBuffer in the local context @@ -901,48 +958,47 @@ def hoistConstantAndReference(self, constBuf: ConstantBuffer, pointerType: Type[ name of the registered _ReferenceBuffer """ - - name = constBuf.name constBuf._type = pointerType - self.add(constBuf, "global") + constBuf._instance = constBuf._type(constBuf.name, self) + ref = self.hoistReference(constBuf.name + "_ref", constBuf) + return ref.name - constBuf._instance = constBuf._type(name, self) - - refName = name + "_ref" - reference = self.hoistReference(name, refName) - - return refName - - def hoistReference(self, _reference: str, name: str) -> str: - """Helper function to register a _ReferenceBuffer to preexisting VariableBuffer + def hoistReference(self, + name: str, + reference: VariableBuffer, + shape: Tuple[int, ...] = (1,), + offset: Union[int, str, VariableBuffer] = 0, + override_type: Optional[Type[BaseType]] = None) -> _ReferenceBuffer: + """Helper function to register a _ReferenceBuffer to a preexisting VariableBuffer Parameters ---------- - _reference : str - Name of the VariableBuffer that should be referenced name : str - Name of the _ReferenceBuffer that should be registered + Name of the _ReferenceBuffer to register + reference : VariableBuffer + Referenced VariableBuffer + shape: Tuple[int, ...] + Shape of the _ReferenceBuffer + offset: Union[int, str, VariableBuffer] + Offset from the reference + override_type: Optional[Type[BaseType]] + Optional argument to override the reference type Returns ------- - str - Returns the name of the newly registered _ReferenceBuffer + _ReferenceBuffer + Returns the newly registered _ReferenceBuffer """ - - assert _reference != name, f"Reference name {_reference} cannot be the same as {name}" - assert not self.is_local(name), f"{name} is already in context!" - - _object = self.lookup(_reference) - - referenceBuffer = _ReferenceBuffer(name, reference = _object) - referenceBuffer._type = _object._type - - self.add(referenceBuffer, 'local') - referenceBuffer._instance = _object._type(name, ctxt = self) - - return name + ref = _ReferenceBuffer(name, reference, shape, offset) + if override_type is not None: + ref._type = PointerClass(override_type) + else: + ref._type = reference._type + self.add(ref, 'local') + ref._instance = ref._type(name, ctxt = self) + return ref def hoistConstant(self, node: gs.Node, name: str = '', _type: Optional[Type[Pointer]] = None) -> str: """Register a ConstantBuffer extracted directly from a graphsurgeon Node @@ -1470,17 +1526,16 @@ def hoisting(self, ctxt: NetworkContext, **kwargs) -> Tuple[NetworkContext, List return newCtxt, transientBuffers + contextBuffers @staticmethod - def _mangleNodeRep(ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation) -> OperatorRepresentation: - parseDict = {} + def _mangleOpRepr(ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation) -> OperatorRepresentation: + mangledOpRepr = {} for key, value in operatorRepresentation.items(): - if type(value) == str and (ctxt.is_local(value) or - ctxt.is_global(value)) and not isinstance(ctxt.lookup(value), GlobalDefinition): - parseDict[key] = ctxt._mangle(value) + if ctxt.is_buffer(value): + mangledOpRepr[key] = ctxt._mangle(value) else: - parseDict[key] = value + mangledOpRepr[key] = value - return parseDict + return mangledOpRepr def generate(self, ctxt: NetworkContext, **kwargs) -> str: """Generates the code for all registered NodeTemplates and joins it to construct a single snippet @@ -1499,7 +1554,7 @@ def generate(self, ctxt: NetworkContext, **kwargs) -> str: return ("\n").join([ codeSnippet.template.generate( - ExecutionBlock._mangleNodeRep(ctxt, { + ExecutionBlock._mangleOpRepr(ctxt, { **codeSnippet.operatorRepresentation, **kwargs })) for codeSnippet in self.codeSnippets @@ -2455,15 +2510,12 @@ def inputs(self) -> List[VariableBuffer]: """ inputs = [] - graphInputs = [tensor.name for tensor in self.graph.inputs] + for tensor in self.graph.inputs: + if self.ctxt.is_global(tensor.name): + buffer = self.ctxt.lookup(tensor.name) + if isinstance(buffer, self.ctxt.VariableBuffer) and len(buffer._users) > 0: + inputs.append(buffer) - for key, value in self.ctxt.globalObjects.items(): - if not isinstance(value, self.ctxt.VariableBuffer) or value._users == []: - continue - if key not in graphInputs: - continue - - inputs += [value] return inputs def outputs(self) -> List[VariableBuffer]: @@ -2477,16 +2529,12 @@ def outputs(self) -> List[VariableBuffer]: """ outputs = [] - graphOutputs = [tensor.name for tensor in self.graph.outputs] - - for key, value in self.ctxt.globalObjects.items(): - - if not isinstance(value, self.ctxt.VariableBuffer): - continue - if key not in graphOutputs: - continue + for tensor in self.graph.outputs: + if self.ctxt.is_global(tensor.name): + buffer = self.ctxt.lookup(tensor.name) + if isinstance(buffer, self.ctxt.VariableBuffer): + outputs.append(buffer) - outputs += [value] return outputs def codeTransform(self, verbose: CodeGenVerbosity = _NoVerbosity): diff --git a/Deeploy/EngineExtension/NetworkDeployers/EngineColoringDeployer.py b/Deeploy/EngineExtension/NetworkDeployers/EngineColoringDeployer.py index d08978f5e0..dfadf558b4 100644 --- a/Deeploy/EngineExtension/NetworkDeployers/EngineColoringDeployer.py +++ b/Deeploy/EngineExtension/NetworkDeployers/EngineColoringDeployer.py @@ -62,8 +62,11 @@ def _initEngineColoringDeployer(self, engineMapperCls: Type[EngineMapper]): def lower(self, graph: gs.Graph) -> gs.Graph: graph = super().lower(graph) - uncoloredNodes = [node.name for node in graph.nodes if "engine" not in node.attrs] - assert len(uncoloredNodes) == 0, f"Missing engine color for nodes {uncoloredNodes}" + uncoloredNodes = [node for node in graph.nodes if "engine" not in node.attrs] + uncoloredOperations = set(node.op for node in uncoloredNodes) + assert len( + uncoloredNodes + ) == 0, f"Missing engine color for nodes {[node.name for node in uncoloredNodes]} with operations {uncoloredOperations}" return graph def _mapNode(self, node: gs.Node) -> Union[ONNXLayer, Any]: diff --git a/Deeploy/Targets/CortexM/Templates/CMSISUtils.py b/Deeploy/Targets/CortexM/Templates/CMSISUtils.py index d8f03597af..3d0cbbc311 100644 --- a/Deeploy/Targets/CortexM/Templates/CMSISUtils.py +++ b/Deeploy/Targets/CortexM/Templates/CMSISUtils.py @@ -90,8 +90,8 @@ def bindConvParams(ctxt, name, repName, batch, operatorRepresentation): operatorRepresentation[f'{repName}_conv_params'] = ctxt.lookup(f'{name}_conv_params').name convQuantDict = { - 'multiplier': ctxt._mangle(operatorRepresentation['mul']), - 'shift': ctxt._mangle(operatorRepresentation['shift']), + 'multiplier': operatorRepresentation['mul'], + 'shift': operatorRepresentation['shift'], } nameList += [ctxt.hoistStruct(convQuantDict, f'{name}_quant_params', cmsis_nn_per_channel_quant_params)] operatorRepresentation[f'{repName}_quant_params'] = ctxt.lookup(f'{name}_quant_params').name diff --git a/Deeploy/Targets/Generic/Templates/DebugPrintTemplate.py b/Deeploy/Targets/Generic/Templates/DebugPrintTemplate.py index fce7d0bdeb..abb508cb61 100644 --- a/Deeploy/Targets/Generic/Templates/DebugPrintTemplate.py +++ b/Deeploy/Targets/Generic/Templates/DebugPrintTemplate.py @@ -44,7 +44,7 @@ def alignToContext(self, ctxt: NetworkContext, operatorRepresentation['data_in_signed'] = data_in._signed operatorRepresentation['offset'] = (data_in._signed == 0) * int(data_in.nLevels / 2) - operatorRepresentation['output_name'] = ctxt._mangle(data_out.name) + operatorRepresentation['output_name'] = data_out.name return ctxt, operatorRepresentation, [] diff --git a/Deeploy/Targets/Generic/Templates/ITAMaxTemplate.py b/Deeploy/Targets/Generic/Templates/ITAMaxTemplate.py index 67d9b0f319..a8269d2eb2 100644 --- a/Deeploy/Targets/Generic/Templates/ITAMaxTemplate.py +++ b/Deeploy/Targets/Generic/Templates/ITAMaxTemplate.py @@ -43,7 +43,7 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, size = operatorRepresentation['lastDimLength'] name = operatorRepresentation['nodeName'] + f"_buffer" ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer'] = ctxt._mangle(name) + operatorRepresentation['ctxtBuffer'] = name operatorRepresentation['ctxtBufferSize'] = size return ctxt, operatorRepresentation, [name] diff --git a/Deeploy/Targets/Generic/TileConstraints/TransposeTileConstraint.py b/Deeploy/Targets/Generic/TileConstraints/TransposeTileConstraint.py index f9d53f8d82..f9c94364d0 100644 --- a/Deeploy/Targets/Generic/TileConstraints/TransposeTileConstraint.py +++ b/Deeploy/Targets/Generic/TileConstraints/TransposeTileConstraint.py @@ -28,13 +28,12 @@ from Deeploy.AbstractDataTypes import PointerClass from Deeploy.CommonExtensions.DataTypes import uint16_t from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.LoweringOptimizationPasses import \ - _invertPermutation, _permuteList + _invertPermutation, _permuteHyperRectangle from Deeploy.DeeployTypes import NetworkContext, OperatorRepresentation from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint from Deeploy.TilingExtension.TileConstraint import TileConstraint from Deeploy.TilingExtension.TilerModel import TilerModel -from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, HyperRectangle, TilingSchedule, \ - VariableReplacementScheme +from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, TilingSchedule, VariableReplacementScheme class TransposeTileConstraint(TileConstraint): @@ -68,8 +67,6 @@ def serializeTilingSolution( inputBaseOffsets, outputBaseOffsets = cls.extractBaseAddr(tilingSolution, targetMemLevel, operatorRepresentation, addrNames) - inputInCubes = [] - replacementTypes = {} replacements: Dict[str, List[int]] = {} @@ -79,28 +76,16 @@ def serializeTilingSolution( replacementTypes[f"dimLen_{dim}"] = PointerClass(uint16_t) replacements[f"dimLen_{dim}"] = [] - perm = operatorRepresentation['perm'] - invPerm = _invertPermutation(perm) - - for cube in outputCubes: - - inCubeDims = _permuteList(cube.dims, invPerm) - - InCube = HyperRectangle(_permuteList(cube.offset, invPerm), inCubeDims) - inputInCubes.append(InCube) - - for dim in range(numDims): - replacements[f"dimLen_{dim}"].append(inCubeDims[dim]) - - inputLoadSchedule = [] - outputLoadSchedule = [] - - for a in inputInCubes: - inputLoadSchedule.append({"data_in": a}) - - for out in outputCubes: - outputLoadSchedule.append({"data_out": out}) + invPerm = _invertPermutation(operatorRepresentation['perm']) + inputCubes = [] + for outCube in outputCubes: + inCube = _permuteHyperRectangle(outCube, invPerm) + inputCubes.append(inCube) + for i, dim in enumerate(inCube.dims): + replacements[f"dimLen_{i}"].append(dim) + inputLoadSchedule = [{"data_in": cube} for cube in inputCubes] + outputLoadSchedule = [{"data_out": cube} for cube in outputCubes] tilingSchedule = TilingSchedule(inputBaseOffsets, outputBaseOffsets, inputLoadSchedule, outputLoadSchedule) variableReplacementSchedule = VariableReplacementScheme(replacements, replacementTypes) diff --git a/Deeploy/Targets/Generic/TileConstraints/iRMSNormTileConstraint.py b/Deeploy/Targets/Generic/TileConstraints/iRMSNormTileConstraint.py index 4cff06d064..e31914aa3b 100644 --- a/Deeploy/Targets/Generic/TileConstraints/iRMSNormTileConstraint.py +++ b/Deeploy/Targets/Generic/TileConstraints/iRMSNormTileConstraint.py @@ -23,7 +23,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy from typing import Dict, List, Tuple import numpy as np @@ -34,7 +33,8 @@ from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint from Deeploy.TilingExtension.TileConstraint import TileConstraint from Deeploy.TilingExtension.TilerModel import TilerModel -from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, TilingSchedule, VariableReplacementScheme +from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, HyperRectangle, TilingSchedule, \ + VariableReplacementScheme class iRMSNormTileConstraint(TileConstraint): @@ -75,7 +75,6 @@ def serializeTilingSolution( addrNames = ['data_in', 'weight', 'data_out'] inputBaseOffsets, outputBaseOffsets = cls.extractBaseAddr(tilingSolution, targetMemLevel, operatorRepresentation, addrNames) - replacements = {"size": []} replacementTypes = {"size": PointerClass(uint16_t)} @@ -87,9 +86,7 @@ def serializeTilingSolution( outputLoadSchedule = [] for cube in outputCubes: - - weightCube = copy.deepcopy(cube) - weightCube.dims = (cube.dims[-1],) + weightCube = HyperRectangle((cube.offset[-1],), (cube.dims[-1],)) inputLoadSchedule.append({"data_in": cube, "weight": weightCube}) for out in outputCubes: diff --git a/Deeploy/Targets/MemPool/Templates/GemmTemplate.py b/Deeploy/Targets/MemPool/Templates/GemmTemplate.py index d4852ba00f..6a92c7e7c6 100644 --- a/Deeploy/Targets/MemPool/Templates/GemmTemplate.py +++ b/Deeploy/Targets/MemPool/Templates/GemmTemplate.py @@ -62,9 +62,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_A" operatorRepresentation['ctxtBuffer_A_size'] = size if isinstance(A, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_A'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_A'] = bufferName else: operatorRepresentation['ctxtBuffer_A'] = operatorRepresentation['A'] @@ -72,9 +72,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_B" operatorRepresentation['ctxtBuffer_B_size'] = size if isinstance(B, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_B'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_B'] = bufferName else: operatorRepresentation['ctxtBuffer_B'] = operatorRepresentation['B'] @@ -82,9 +82,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_C" operatorRepresentation['ctxtBuffer_C_size'] = size if isinstance(C, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_C'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_C'] = bufferName else: operatorRepresentation['ctxtBuffer_C'] = operatorRepresentation['C'] diff --git a/Deeploy/Targets/MemPool/Templates/ITAMaxTemplate.py b/Deeploy/Targets/MemPool/Templates/ITAMaxTemplate.py index 81d3cb1c95..acad82fe28 100644 --- a/Deeploy/Targets/MemPool/Templates/ITAMaxTemplate.py +++ b/Deeploy/Targets/MemPool/Templates/ITAMaxTemplate.py @@ -44,7 +44,7 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, size = operatorRepresentation['lastDimLength'] * 192 name = operatorRepresentation['nodeName'] + f"_buffer" ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer'] = ctxt._mangle(name) + operatorRepresentation['ctxtBuffer'] = name operatorRepresentation['ctxtBufferSize'] = size return ctxt, operatorRepresentation, [name] diff --git a/Deeploy/Targets/MemPool/Templates/ITATemplate.py b/Deeploy/Targets/MemPool/Templates/ITATemplate.py index e5cf5ea76e..40f76c50cd 100644 --- a/Deeploy/Targets/MemPool/Templates/ITATemplate.py +++ b/Deeploy/Targets/MemPool/Templates/ITATemplate.py @@ -322,9 +322,8 @@ def alignToContext(self, ctxt: NetworkContext, if hasattr(data_out, "_signed") and hasattr(data_out, "nLevels"): operatorRepresentation['output_offset'] = -(data_out._signed == 0) * int(data_out.nLevels // 2) - operatorRepresentation['data_in_array'] = ctxt._mangle(operatorRepresentation['nodeName'] + f"_data_in_array") - operatorRepresentation['quant_params_array'] = ctxt._mangle(operatorRepresentation['nodeName'] + - f"_quant_params_array") + operatorRepresentation['data_in_array'] = f"{nodeName}_data_in_array" + operatorRepresentation['quant_params_array'] = f"{nodeName}_quant_params_array" return ctxt, operatorRepresentation, nameList diff --git a/Deeploy/Targets/MemPool/Templates/RQGemmTemplate.py b/Deeploy/Targets/MemPool/Templates/RQGemmTemplate.py index b336af20ba..7806a66485 100644 --- a/Deeploy/Targets/MemPool/Templates/RQGemmTemplate.py +++ b/Deeploy/Targets/MemPool/Templates/RQGemmTemplate.py @@ -72,9 +72,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_A" operatorRepresentation['ctxtBuffer_A_size'] = size if isinstance(A, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_A'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_A'] = bufferName else: operatorRepresentation['ctxtBuffer_A'] = operatorRepresentation['A'] @@ -82,9 +82,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_B" operatorRepresentation['ctxtBuffer_B_size'] = size if isinstance(B, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_B'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_B'] = bufferName else: operatorRepresentation['ctxtBuffer_B'] = operatorRepresentation['B'] @@ -92,9 +92,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_C" operatorRepresentation['ctxtBuffer_C_size'] = size if isinstance(C, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_C'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_C'] = bufferName else: operatorRepresentation['ctxtBuffer_C'] = operatorRepresentation['C'] diff --git a/Deeploy/Targets/MemPool/Templates/RQMatMulTemplate.py b/Deeploy/Targets/MemPool/Templates/RQMatMulTemplate.py index d8165c6e51..db04b500e6 100644 --- a/Deeploy/Targets/MemPool/Templates/RQMatMulTemplate.py +++ b/Deeploy/Targets/MemPool/Templates/RQMatMulTemplate.py @@ -77,9 +77,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_A" operatorRepresentation['ctxtBuffer_A_size'] = size if isinstance(A, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_A'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_A'] = bufferName else: operatorRepresentation['ctxtBuffer_A'] = operatorRepresentation['A'] @@ -87,9 +87,9 @@ def hoistTransientBuffers(self, ctxt: NetworkContext, name = operatorRepresentation['nodeName'] + f"_buffer_B" operatorRepresentation['ctxtBuffer_B_size'] = size if isinstance(B, ConstantBuffer): - names += [name] - ctxt.hoistTransientBuffer(name, size) - operatorRepresentation['ctxtBuffer_B'] = ctxt._mangle(name) + bufferName = ctxt.hoistTransientBuffer(name, size) + names += [bufferName] + operatorRepresentation['ctxtBuffer_B'] = bufferName else: operatorRepresentation['ctxtBuffer_B'] = operatorRepresentation['B'] diff --git a/Deeploy/Targets/Neureka/TileConstraints/NeurekaDenseConstraint.py b/Deeploy/Targets/Neureka/TileConstraints/NeurekaDenseConstraint.py index 8457c17e88..70eea8772a 100644 --- a/Deeploy/Targets/Neureka/TileConstraints/NeurekaDenseConstraint.py +++ b/Deeploy/Targets/Neureka/TileConstraints/NeurekaDenseConstraint.py @@ -35,7 +35,7 @@ from Deeploy.TilingExtension.TileConstraint import TileConstraint from Deeploy.TilingExtension.TilerModel import PerformanceHint, TilerModel from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, HyperRectangle, TilingSchedule, \ - VariableReplacementScheme, calculateRectangleOffset + VariableReplacementScheme, calculateFlatOffsetInBytes class NeurekaDenseConv2DTileConstraint(TileConstraint): @@ -488,7 +488,7 @@ def serializeTilingSolution( _, _, _, absoluteCOffset = absoluteCube.absoluteOffset weightShape = ctxt.lookup(varWeight).shape WeightCube = HyperRectangle((absoluteCOffset, 0, 0), (CSize, weightShape[-2], weightShape[-1])) - replacements['weight_addr_offset'].append(calculateRectangleOffset(WeightCube, ctxt.lookup(varWeight))) + replacements['weight_addr_offset'].append(calculateFlatOffsetInBytes(WeightCube, ctxt.lookup(varWeight))) inputLoadSchedule = [] outputLoadSchedule = [] diff --git a/Deeploy/Targets/Neureka/TileConstraints/NeurekaDepthwiseConstraint.py b/Deeploy/Targets/Neureka/TileConstraints/NeurekaDepthwiseConstraint.py index 6364afcdf7..0a0e7153ec 100644 --- a/Deeploy/Targets/Neureka/TileConstraints/NeurekaDepthwiseConstraint.py +++ b/Deeploy/Targets/Neureka/TileConstraints/NeurekaDepthwiseConstraint.py @@ -35,7 +35,7 @@ from Deeploy.TilingExtension.TileConstraint import TileConstraint from Deeploy.TilingExtension.TilerModel import PerformanceHint, TilerModel from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, HyperRectangle, TilingSchedule, \ - VariableReplacementScheme, calculateRectangleOffset + VariableReplacementScheme, calculateFlatOffsetInBytes class NeurekaDWConv2DTileConstraint(TileConstraint): @@ -486,7 +486,7 @@ def serializeTilingSolution( _, _, _, absoluteCOffset = absoluteCube.absoluteOffset weightShape = ctxt.lookup(varWeight).shape WeightCube = HyperRectangle((absoluteCOffset, 0, 0), (CSize, weightShape[-2], weightShape[-1])) - replacements['weight_addr_offset'].append(calculateRectangleOffset(WeightCube, ctxt.lookup(varWeight))) + replacements['weight_addr_offset'].append(calculateFlatOffsetInBytes(WeightCube, ctxt.lookup(varWeight))) inputLoadSchedule = [] outputLoadSchedule = [] diff --git a/Deeploy/Targets/Neureka/TileConstraints/NeurekaPointwiseConstraint.py b/Deeploy/Targets/Neureka/TileConstraints/NeurekaPointwiseConstraint.py index 303cc6a4e7..c9bdf6a12e 100644 --- a/Deeploy/Targets/Neureka/TileConstraints/NeurekaPointwiseConstraint.py +++ b/Deeploy/Targets/Neureka/TileConstraints/NeurekaPointwiseConstraint.py @@ -35,7 +35,7 @@ from Deeploy.TilingExtension.TileConstraint import TileConstraint from Deeploy.TilingExtension.TilerModel import PerformanceHint, TilerModel from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, HyperRectangle, TilingSchedule, \ - VariableReplacementScheme, calculateRectangleOffset + VariableReplacementScheme, calculateFlatOffsetInBytes class NeurekaPWConv2DTileConstraint(TileConstraint): @@ -535,7 +535,7 @@ def serializeTilingSolution( _, _, _, absoluteCOffset = absoluteCube.absoluteOffset weightShape = ctxt.lookup(varWeight).shape WeightCube = HyperRectangle((absoluteCOffset, 0, 0), (CSize, weightShape[-2], weightShape[-1])) - replacements['weight_addr_offset'].append(calculateRectangleOffset(WeightCube, ctxt.lookup(varWeight))) + replacements['weight_addr_offset'].append(calculateFlatOffsetInBytes(WeightCube, ctxt.lookup(varWeight))) inputLoadSchedule = [] outputLoadSchedule = [] diff --git a/Deeploy/Targets/PULPOpen/Bindings.py b/Deeploy/Targets/PULPOpen/Bindings.py index 547a29af10..5e13acf411 100644 --- a/Deeploy/Targets/PULPOpen/Bindings.py +++ b/Deeploy/Targets/PULPOpen/Bindings.py @@ -48,6 +48,8 @@ from Deeploy.Targets.PULPOpen.CodeTransformationPasses.PULPL3Tiling import PULPL3Tiling from Deeploy.Targets.PULPOpen.CodeTransformationPasses.PULPProfileUntiled import PULPProfileUntiled from Deeploy.Targets.PULPOpen.DataTypes import PULPDMAFuture +from Deeploy.Targets.PULPOpen.DMA.L3Dma import l3DmaHack +from Deeploy.Targets.PULPOpen.DMA.MchanDma import MchanDma from Deeploy.Targets.PULPOpen.Templates import ConvTemplate, FloatAddTemplate, FloatConvTemplate, FloatGELUTemplate, \ FloatGemmTemplate, FloatLayernormTemplate, FloatMatMulTemplate, FloatMaxPoolTemplate, FloatMulTemplate, \ FloatReluTemplate, FloatSoftmaxTemplate, GEMMTemplate, MatrixVectorTemplate, MaxPool2DTemplate, MulTemplate, \ @@ -56,7 +58,8 @@ iRMSNormTemplate, iSoftmaxTemplate from Deeploy.Targets.PULPOpen.TypeCheckers import PULPConvChecker, PULPLinearChecker, PULPMaxPoolChecker, \ PULPRequantShiftChecker -from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement +from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement, \ + TilingVariableReplacementUpdate _clusterEntryClosureCallTemplate = NodeTemplate(""" // ${closureName} CLOSURE CALL @@ -115,29 +118,31 @@ TilingCallClosure(writeback = False), PULPSynchCoresPass(), ForkClosure(writeback = False, generateStruct = True), - PULPClusterTiling("L1"), + TilingVariableReplacementUpdate("L1"), + PULPClusterTiling("L2", "L1", MchanDma()), ArgumentStructGeneration(), MemoryManagementGeneration("L1"), - MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), TilingVariableReplacement("L2"), - PULPL3Tiling("L2"), + MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), + PULPL3Tiling("L3", "L2", l3DmaHack), PULPProfileUntiled(), ArgumentStructGeneration(), L3MemoryAwareFunctionCallClosure(writeback = False), - MemoryManagementGeneration("L3.*"), MemoryManagementGeneration("L2"), + MemoryManagementGeneration("L3.*"), MemoryManagementGeneration(), ]) ClusterTransformer = CodeTransformation([ TilingVariableReplacement("L1"), TilingCallClosure(writeback = False, generateStruct = True), - PULPClusterTiling("L1"), + TilingVariableReplacementUpdate("L1"), + PULPClusterTiling("L2", "L1", MchanDma()), ArgumentStructGeneration(), MemoryManagementGeneration("L1"), - MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), TilingVariableReplacement("L2"), - PULPL3Tiling("L2"), + MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), + PULPL3Tiling("L3", "L2", l3DmaHack), PULPProfileUntiled(), ArgumentStructGeneration(), L3MemoryAwareFunctionCallClosure(writeback = False), diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/AutoTransposeUtils.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/AutoTransposeUtils.py index 47d19cb850..42b090776f 100644 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/AutoTransposeUtils.py +++ b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/AutoTransposeUtils.py @@ -27,32 +27,23 @@ from typing import Dict, List, Literal, Tuple from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.LoweringOptimizationPasses import \ - _invertPermutation, _permuteList + _invertPermutation, _permute, _permuteHyperRectangle from Deeploy.DeeployTypes import NetworkContext, OperatorRepresentation from Deeploy.Targets.PULPOpen.DataTypes import PULPStructDataTypes -from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, minimizeRectangleDims +from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, minimizeRectangle -def _transposedDMAStrides(ctxt: NetworkContext, rectangle: HyperRectangle, direction: Literal["ToL1", "FromL1"], +def _transposedDMAStrides(ctxt: NetworkContext, rect: HyperRectangle, direction: Literal["ToL1", "FromL1"], perm: List[int], L1Name: str, L2Name: str) -> Tuple[HyperRectangle, List[int], List[int]]: _invPerm = _invertPermutation(perm) - rectangle = HyperRectangle(_permuteList(rectangle.offset, _invPerm), _permuteList(rectangle.dims, _invPerm)) + inRect = _permuteHyperRectangle(rect, _invPerm) - contiguousDims = [permIdx == rangeIdx for permIdx, rangeIdx in zip(perm, range(len(perm)))] - workList = [] - - for idx, dim in enumerate(contiguousDims): - if dim: - workList.append(rectangle.dims[idx]) - else: - workList.append(1) - - maxTransferRect = copy.copy(rectangle) - maxTransferRect.dims = tuple(workList) + maxTransferDims = tuple(inRect.dims[idx] if idx == permIdx else 1 for idx, permIdx in enumerate(perm)) + maxTransferRect = HyperRectangle(inRect.offset, maxTransferDims) referenceBuffer = copy.copy(ctxt.lookup(L2Name)) - referenceBuffer.shape = _permuteList(referenceBuffer.shape, _invPerm) - minRect, referenceRect = minimizeRectangleDims(maxTransferRect, referenceBuffer) + referenceBuffer.shape = _permute(referenceBuffer.shape, _invPerm) + minRect, referenceShape = minimizeRectangle(maxTransferRect, referenceBuffer.shape) droppedIdx = [ idx for idx in range(len(perm)) @@ -70,7 +61,7 @@ def _transposedDMAStrides(ctxt: NetworkContext, rectangle: HyperRectangle, direc newPerm.append(p - sub) strides = [1] - for dim in reversed(referenceRect.dims[1:]): + for dim in reversed(referenceShape[1:]): strides.insert(0, strides[0] * dim) permStrides = [strides[idx] for idx in newPerm] diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTiling.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTiling.py index 3f15f04680..b81d043dd2 100644 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTiling.py +++ b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTiling.py @@ -26,18 +26,38 @@ from typing import Tuple from Deeploy.DeeployTypes import CodeGenVerbosity, CodeTransformationPass, ExecutionBlock, NetworkContext, _NoVerbosity +from Deeploy.TilingExtension.AsyncDma import AsyncDma +from Deeploy.TilingExtension.CodeTransformationPasses.DoubleBufferingTilingCodeGeneration import \ + DoubleBufferingTilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.SingleBufferingTilingCodeGeneration import \ + SingleBufferingTilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import DoubleBufferingTilingMixIn, \ + ProfilingDoubleBufferingTilingMixIn, ProfilingSingleBufferingTilingMixIn, SingleBufferingTilingMixIn -from .PULPClusterTilingDB import ProfilingPULPClusterTilingGenerationDB, PULPClusterTilingGenerationDB -from .PULPClusterTilingSB import ProfilingPULPClusterTilingGenerationSB, PULPClusterTilingGenerationSB + +class PULPClusterTilingGenerationSB(SingleBufferingTilingCodeGeneration, SingleBufferingTilingMixIn): + pass + + +class ProfilingPULPClusterTilingGenerationSB(SingleBufferingTilingCodeGeneration, ProfilingSingleBufferingTilingMixIn): + pass + + +class PULPClusterTilingGenerationDB(DoubleBufferingTilingCodeGeneration, DoubleBufferingTilingMixIn): + pass + + +class ProfilingPULPClusterTilingGenerationDB(DoubleBufferingTilingCodeGeneration, ProfilingDoubleBufferingTilingMixIn): + pass class PULPClusterTiling(CodeTransformationPass): - def __init__(self, targetMemLevel: str): - self.SB = PULPClusterTilingGenerationSB(targetMemLevel) - self.profilingSB = ProfilingPULPClusterTilingGenerationSB(targetMemLevel) - self.DB = PULPClusterTilingGenerationDB(targetMemLevel) - self.profilingDB = ProfilingPULPClusterTilingGenerationDB(targetMemLevel) + def __init__(self, externalMemory: str, localMemory: str, dma: AsyncDma): + self.SB = PULPClusterTilingGenerationSB(externalMemory, localMemory, dma) + self.profilingSB = ProfilingPULPClusterTilingGenerationSB(externalMemory, localMemory, dma) + self.DB = PULPClusterTilingGenerationDB(externalMemory, localMemory, dma) + self.profilingDB = ProfilingPULPClusterTilingGenerationDB(externalMemory, localMemory, dma) def apply(self, ctxt: NetworkContext, diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTilingDB.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTilingDB.py deleted file mode 100644 index e13c1bbad0..0000000000 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTilingDB.py +++ /dev/null @@ -1,359 +0,0 @@ -# ---------------------------------------------------------------------- -# -# File: PULPClusterTilingDB.py -# -# Last edited: 25.10.2023 -# -# Copyright (C) 2023, ETH Zurich and University of Bologna. -# -# Author: Moritz Scherer, ETH Zurich -# -# ---------------------------------------------------------------------- -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the License); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an AS IS BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from typing import Dict, List, Tuple - -from Deeploy.DeeployTypes import CodeSnippet, ExecutionBlock, NetworkContext, NodeTemplate, OperatorRepresentation -from Deeploy.Targets.PULPOpen.CodeTransformationPasses.PULPClusterTilingSB import PULPClusterTilingSB, _DMAUpdate -from Deeploy.Targets.PULPOpen.DataTypes import PULPStructDataTypes -from Deeploy.TilingExtension.CodeTransformationPasses.TilingCodeGeneration import TilingCodeGeneration -from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import DoubleBufferingTilingMixIn, \ - ProfilingDoubleBufferingTilingMixIn, TilingMetaInfo -from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint -from Deeploy.TilingExtension.TilingCodegen import TilingSchedule, VariableReplacementScheme - -_moveTileInTemplate = NodeTemplate(""" - -// IMPORT TILE ${innerTilePtr} from ${outerTilePtr} -if (${tileNum} < ${numTiles}[*${tileIdxPtr}+1]){ -dory_dma_memcpy_mindims_async(&${stateReference}); -} - -""") - -_moveTileOutTemplate = NodeTemplate(""" - -// EXPORT TILE ${innerTilePtr} to ${outerTilePtr} -if((${tileNum}) % 2 == 0){ -dory_dma_memcpy_mindims_async(&${stateReference}); -} else { -dory_dma_memcpy_mindims_async(&${_stateReference}); -} -""") - -_blockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -if((${tileNum}) > 1){ -if((${tileNum}) % 2 == 0){ -dory_dma_barrier(&${stateReference}); -} else { -dory_dma_barrier(&${_stateReference}); -} -} - -""") - -_finalBlockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -dory_dma_barrier(&${stateReference}); -dory_dma_barrier(&${_stateReference}); -""") - -_updateDMATransferStructTemplate = NodeTemplate(""" - -// UPDATE DMA STRUCT ${stateReference}, ${_stateReference} -${stateReference}.ext = (((char*)${extPtr}) + ${extOffsetPtr}[${tileNum}]); -${stateReference}.mchan_cmd = ${mchanCmdPtr}[${tileNum}]; -${stateReference}.length_1d_copy = ${length1dPtr}[${tileNum}]; -${stateReference}.number_of_1d_copies = ${number1dPtr}[${tileNum}]; -${stateReference}.number_of_2d_copies = ${number2dPtr}[${tileNum}]; -${stateReference}.loc = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); -${locPtr} = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}-1]); -""") - -_outUpdateDMATransferStructTemplate = NodeTemplate(""" - -if ((${tileNum}) % 2 == 0){ -// UPDATE DMA STRUCT ${stateReference} -${stateReference}.ext = ((char*)${extPtr} + ${extOffsetPtr}[${tileNum}]); -${stateReference}.mchan_cmd = ${mchanCmdPtr}[${tileNum}]; -${stateReference}.length_1d_copy = ${length1dPtr}[${tileNum}]; -${stateReference}.number_of_1d_copies = ${number1dPtr}[${tileNum}]; -${stateReference}.number_of_2d_copies = ${number2dPtr}[${tileNum}]; -${stateReference}.loc = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); -} else { -${_stateReference}.ext = ((char*)${extPtr} + ${extOffsetPtr}[${tileNum}]); -${_stateReference}.mchan_cmd = ${mchanCmdPtr}[${tileNum}]; -${_stateReference}.length_1d_copy = ${length1dPtr}[${tileNum}]; -${_stateReference}.number_of_1d_copies = ${number1dPtr}[${tileNum}]; -${_stateReference}.number_of_2d_copies = ${number2dPtr}[${tileNum}]; -${_stateReference}.loc = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); -} -${locPtr} = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); - -""") - - -class PULPClusterTilingDB(PULPClusterTilingSB): - - _blockTileOutTemplate = _blockTileOutTemplate - _updateDMATransferStructTemplate = _updateDMATransferStructTemplate - _moveTileOutTemplate = _moveTileOutTemplate - _moveTileInTemplate = _moveTileInTemplate - - def _hoistDMAUpdates(self, ctxt: NetworkContext, tensorName: str, updateList: List[_DMAUpdate], - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, Dict]: - nodeName = operatorRepresentation['nodeName'] - - operatorRepresentation = operatorRepresentation.copy() - - dmaName = self._DMAStructName(tensorName, nodeName) - # operatorRepresentation['stateReference'] = dmaName - # operatorRepresentation['tileNum'] = "TILING_I" - operatorRepresentation['locPtr'] = ctxt.lookup(operatorRepresentation[tensorName]).name - operatorRepresentation['baseLocPtr'] = ctxt.hoistReference(operatorRepresentation['locPtr'], - operatorRepresentation['locPtr'] + "_ref") - operatorRepresentation['_stateReference'] = self._DMAStructName(tensorName, nodeName) + "_1" - ctxt.lookup(operatorRepresentation['baseLocPtr'])._memoryLevel = self.targetMemLevel - - namePrefix = self.prefix + f"{nodeName}_{tensorName}" - - ctxt, operatorRepresentation = super()._hoistDMAUpdates(ctxt, tensorName, updateList, operatorRepresentation) - - locOffsetList = [] - locBaseOffset = updateList[0].locOffset - for update in updateList: - locOffsetList.append(int(update.locOffset) - locBaseOffset) - - name = namePrefix + "_locOffset" - cb = ctxt.ConstantBuffer(name, [len(updateList)], locOffsetList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'locOffsetPtr') - - return ctxt, operatorRepresentation - - def _generateEgressPointerUpdates( - self, nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.outputLoadSchedule, - nodeMemoryConstraint, tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(_outUpdateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateEgressDMACode( - self, tilingSchedule: TilingSchedule, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - egressDMATransferCalls = [] - egressDMAWaitStatements = [] - - exportLoadStep = tilingSchedule.outputLoadSchedule[0] - for key, rectangle in exportLoadStep.items(): - externalPtr = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - internalPtr = ctxt.lookup(operatorRepresentation[key]) - - tensorName = key - nodeName = operatorRepresentation['nodeName'] - dmaName = self._DMAStructName(tensorName, nodeName) - - finalMemoryLevel = TilingCodeGeneration.isFinalMemoryLevel(nodeMemoryConstraint, internalPtr) - struct = self._rectToDMAStruct(ctxt, rectangle, "FromL1", internalPtr.name, externalPtr.name, - finalMemoryLevel) - _ = ctxt.hoistStruct(struct, dmaName, PULPStructDataTypes.DMA_copy) - ctxt.lookup(dmaName)._users += [operatorRepresentation['nodeName']] - - tensorName = key + "_1" - nodeName = operatorRepresentation['nodeName'] - _dmaName = self._DMAStructName(tensorName, nodeName) - - struct = self._rectToDMAStruct(ctxt, rectangle, "FromL1", internalPtr.name, externalPtr.name, - finalMemoryLevel) - _ = ctxt.hoistStruct(struct, _dmaName, PULPStructDataTypes.DMA_copy) - ctxt.lookup(_dmaName)._users += [operatorRepresentation['nodeName']] - - egressDMATransferCalls.append( - CodeSnippet( - self._moveTileOutTemplate, { - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName, - "_stateReference": _dmaName - })) - - egressDMAWaitStatements.append( - CodeSnippet( - self._blockTileOutTemplate, { - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName, - "_stateReference": _dmaName - })) - - return egressDMATransferCalls, egressDMAWaitStatements - - def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, - nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, - variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - tileIdxPtr = self._hoistTileIdxPtr(ctxt, operatorRepresentation) - - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateIngressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - egressDMATransferCalls, egressDMAWaitStatements = self._generateEgressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - ctxt, ingressDMAUpdates = self._generateIngressPointerUpdates(nodeMemoryConstraint, tilingSchedule, ctxt, - operatorRepresentation) - ctxt, egressDMAUpdates = self._generateEgressPointerUpdates(nodeMemoryConstraint, tilingSchedule, ctxt, - operatorRepresentation) - - variableUpdates = self._generateVariableUpdates(tilingSchedule, variableReplacement, ctxt, - operatorRepresentation) - - for transaction in ingressDMATransferCalls: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I+1" - _operatorRepresentation["numTiles"] = operatorRepresentation['numTiles'] - _operatorRepresentation["tileIdxPtr"] = tileIdxPtr - - for transaction in ingressDMAUpdates: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I+1" - - for transaction in egressDMATransferCalls: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I" - - for transaction in egressDMAWaitStatements: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation['tileNum'] = "TILING_I" - - for transaction in egressDMAUpdates: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I" - - for transaction in variableUpdates: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I" - - openLoopStatement = [ - CodeSnippet(self._openTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - closeLoopStatement = [ - CodeSnippet(self._closeTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - setupStatements = [] - teardownStatements = [] - - teardownStatements += [ - CodeSnippet(self._releaseDMATemplate, - {"stateReference": ingressDMAUpdates[0].operatorRepresentation["stateReference"]}) - ] - - setupStatements += [CodeSnippet(self._initDMATemplate, {"channelName": "dma_channel"})] - setupStatements += [ - CodeSnippet(self._setDMAChannelTemplate, { - **transaction.operatorRepresentation, "channelName": "dma_channel" - }) for transaction in ingressDMAUpdates - ] - - for transaction in egressDMAUpdates: - _operatorRepresentation = transaction.operatorRepresentation.copy() - _operatorRepresentation["channelName"] = "dma_channel" - setupStatements.append(CodeSnippet(self._setDMAChannelTemplate, _operatorRepresentation.copy())) - _operatorRepresentation["channelName"] = "dma_channel" - _operatorRepresentation["stateReference"] = _operatorRepresentation["_stateReference"] - setupStatements.append(CodeSnippet(self._setDMAChannelTemplate, _operatorRepresentation.copy())) - - for transaction in ingressDMATransferCalls: - _operatorRepresentation = transaction.operatorRepresentation.copy() - _operatorRepresentation["tileNum"] = 0 - _operatorRepresentation["numTiles"] = operatorRepresentation['numTiles'] - _operatorRepresentation["tileIdxPtr"] = tileIdxPtr - setupStatements.append(CodeSnippet(transaction.template, _operatorRepresentation)) - - for transaction in egressDMAWaitStatements: - _operatorRepresentation = transaction.operatorRepresentation.copy() - _operatorRepresentation['tileNum'] = ctxt.lookup(operatorRepresentation["numTiles"]).values[-1] - teardownStatements.append(CodeSnippet(_finalBlockTileOutTemplate, _operatorRepresentation)) - - metaInfo = TilingMetaInfo(nodeName = operatorRepresentation['nodeName'] + "_L2", - nodeOps = operatorRepresentation['nodeOps'], - numTiles = len(tilingSchedule.outputLoadSchedule), - tileIdxVar = "TILING_I", - kernelLevelTiling = True) - - newExecutionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDMATransferCalls, - ingressDMAWaitStatements[-1:], ingressDMAUpdates, - egressDMATransferCalls, egressDMAWaitStatements[-1:], - egressDMAUpdates, variableUpdates, openLoopStatement, - closeLoopStatement, setupStatements, teardownStatements) - - return ctxt, newExecutionBlock, True - - def generateTilingLoop( - self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - flatTilingSchedule = copy.copy(tilingSchedules[0]) - for tilingSchedule in tilingSchedules[1:]: - flatTilingSchedule += tilingSchedule - - offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) - - if len(offsetLists) == 0: - return ctxt, executionBlock, False - - for offsetList in offsetLists: - if not len(offsetList) == 2: - return ctxt, executionBlock, False - - allNumTiles = [len(schedule.outputLoadSchedule) for schedule in tilingSchedules] - operatorRepresentation["numTiles"] = self._hoistNumTiles(ctxt, operatorRepresentation['nodeName'], - tilingSchedules) - - return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, - operatorRepresentation) - - -class PULPClusterTilingGenerationDB(PULPClusterTilingDB, DoubleBufferingTilingMixIn): - pass - - -class ProfilingPULPClusterTilingGenerationDB(PULPClusterTilingDB, ProfilingDoubleBufferingTilingMixIn): - pass diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTilingSB.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTilingSB.py deleted file mode 100644 index 90dc3b2b2b..0000000000 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPClusterTilingSB.py +++ /dev/null @@ -1,673 +0,0 @@ -# ---------------------------------------------------------------------- -# -# File: PULPClusterTiling.py -# -# Last edited: 17.10.2023 -# -# Copyright (C) 2023, ETH Zurich and University of Bologna. -# -# Author: Moritz Scherer, ETH Zurich -# -# ---------------------------------------------------------------------- -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the License); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an AS IS BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from collections import namedtuple -from typing import Dict, List, Literal, Optional, Tuple, Type - -import numpy as np - -import Deeploy.CommonExtensions.DataTypes as BasicDataTypes -from Deeploy.AbstractDataTypes import Immediate, PointerClass -from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.LoweringOptimizationPasses import \ - _invertPermutation, _permuteList -from Deeploy.DeeployTypes import CodeSnippet, ConstantBuffer, ExecutionBlock, NetworkContext, NodeTemplate, \ - OperatorRepresentation -from Deeploy.Targets.PULPOpen.CodeTransformationPasses import AutoTransposeUtils -from Deeploy.Targets.PULPOpen.DataTypes import PULPStructDataTypes -from Deeploy.TilingExtension.CodeTransformationPasses.TilingCodeGeneration import TilingCodeGeneration -from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import ProfilingSingleBufferingTilingMixIn, \ - SingleBufferingTilingMixIn, TilingMetaInfo -from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint -from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, TilingSchedule, VariableReplacementScheme, \ - calculateRectangleOffset, minimizeRectangleDims - -_openTileLoopTemplate = NodeTemplate(""" - -// TILING LOOP -for (int TILING_I=${numTiles}[*${tileIdxPtr}]; TILING_I<${numTiles}[(*${tileIdxPtr})+1]; TILING_I++){ -""") - -_closeTileLoopTemplate = NodeTemplate(""" - -// CLOSE TILING LOOP -} -*${tileIdxPtr} += 1; - -""") - -_moveTileInTemplate = NodeTemplate(""" - -// IMPORT TILE ${innerTilePtr} from ${outerTilePtr} -dory_dma_memcpy_mindims_async(&${stateReference}); - -""") - -_iteratedMoveTileInTemplate = NodeTemplate(""" - -// IMPORT TILE ${innerTilePtr} from ${outerTilePtr} -// ITERATED - -<% -_extStrides = [stride * stateStruct.value['length_1d_copy'].value for stride in remainderStrides] -_locStride = f"{stateReference}.length_1d_copy * {stateReference}.number_of_1d_copies * {stateReference}.number_of_2d_copies" - -stateStruct.value['ext'] = str(stateReference) + ".ext" -stateStruct.value['loc'] = str(stateReference) + ".loc" -stateStruct.value['tid'] = str(stateReference) + ".tid" -stateStruct.value['stride_2d'] = str(stateReference) + ".stride_2d" -stateStruct.value['stride_1d'] = str(stateReference) + ".stride_1d" -stateStruct.value['number_of_2d_copies'] = str(stateReference) + ".number_of_2d_copies" -stateStruct.value['number_of_1d_copies'] = str(stateReference) + ".number_of_1d_copies" -stateStruct.value['length_1d_copy'] = str(stateReference) + ".length_1d_copy" -%> - -int8_t * bu_${stateReference}_loc = ${stateReference}.loc; -int8_t * bu_${stateReference}_ext = ${stateReference}.ext; - -% for idx, dimLen in enumerate(dimLens): -uint16_t ${nodeName}_${tensorName}_dimLen_${idx} = ${dimLen}[${tileNum}]; -for(int i_${idx} = 0; i_${idx} < ${nodeName}_${tensorName}_dimLen_${idx}; i_${idx}++){ -%endfor -${stateStruct.typeName} trans_${stateReference} = (${stateStruct.typeName}) ${str(stateStruct)}; -dory_dma_memcpy_mindims_async(&trans_${stateReference}); -${stateStruct.value['loc']} = (((int8_t*) ${stateStruct.value['loc']}) + ${_locStride}); -% for idx, _ in enumerate(dimLens): -${stateStruct.value['ext']} = (((int8_t*) ${stateStruct.value['ext']}) + (${_extStrides[idx]})); -} -${stateStruct.value['ext']} = (((int8_t*) ${stateStruct.value['ext']}) - ${nodeName}_${tensorName}_dimLen_${len(dimLens) -1 - idx} * ${_extStrides[idx]}); -%endfor - -${stateStruct.value['loc']} = bu_${stateReference}_loc; -${stateStruct.value['ext']} = bu_${stateReference}_ext; - -""") - -_blockTileInTemplate = NodeTemplate(""" - -// BLOCKING IMPORT TILE ${innerTilePtr} -dory_dma_barrier(&${stateReference}); - -""") - -_moveTileOutTemplate = NodeTemplate(""" - -// EXPORT TILE ${innerTilePtr} to ${outerTilePtr} -dory_dma_memcpy_mindims_async(&${stateReference}); - -""") - -_blockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -dory_dma_barrier(&${stateReference}); - -""") - -_updateDMATransferStructTemplate = NodeTemplate(""" - -// UPDATE DMA STRUCT ${stateReference} -${stateReference}.ext = ((char*)${extPtr}) + ${extOffsetPtr}[${tileNum}]; -${stateReference}.length_1d_copy = ${length1dPtr}[${tileNum}]; -${stateReference}.number_of_1d_copies = ${number1dPtr}[${tileNum}]; -${stateReference}.number_of_2d_copies = ${number2dPtr}[${tileNum}]; - -${stateReference}.stride_1d = ${stride1dPtr}[${tileNum}]; -${stateReference}.stride_2d = ${stride2dPtr}[${tileNum}]; - -${stateReference}.mchan_cmd = ${mchanCmdPtr}[${tileNum}]; -""") - -_updateReferenceTemplate = NodeTemplate(""" - -// UPDATE VARIABLE ${reference} -*${reference} = ${baseReference}[${tileNum}]; -""") - -_initDMATemplate = NodeTemplate(""" -int32_t ${channelName} = dory_dma_allocate(); -""") - -_setDMAChannelTemplate = NodeTemplate(""" -${stateReference}.tid = ${channelName}; -""") - -_releaseDMATemplate = NodeTemplate(""" -dory_dma_free(&${stateReference}); -""") - -# ADD NUM TRANSFERS VARIABLE - -_DMAUpdate = namedtuple( - "_DMAUpdate", - "extOffset locOffset length_1d_copy number_of_1d_copies number_of_2d_copies stride_1d stride_2d mchan_cmd") - - -class PULPClusterTilingSB(TilingCodeGeneration): - - _prefix = "TILING_REPLACED_" - - _openTileLoopTemplate = _openTileLoopTemplate - _closeTileLoopTemplate = _closeTileLoopTemplate - - _moveTileInTemplate = _moveTileInTemplate - _iteratedMoveTileInTemplate = _iteratedMoveTileInTemplate - _blockTileInTemplate = _blockTileInTemplate - - _moveTileOutTemplate = _moveTileOutTemplate - _blockTileOutTemplate = _blockTileOutTemplate - - _updateDMATransferStructTemplate = _updateDMATransferStructTemplate - _updateReferenceTemplate = _updateReferenceTemplate - - _initDMATemplate = _initDMATemplate - _setDMAChannelTemplate = _setDMAChannelTemplate - _releaseDMATemplate = _releaseDMATemplate - - @property - def prefix(self): - return self._prefix + self.targetMemLevel + "_" - - def _DMAStructName(self, tensorName: str, nodeName: str) -> str: - return f"{self.prefix}_DMA_{nodeName}_{tensorName}" - - @classmethod - def _generatePointerUpdates(cls, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - loadSchedule: List[Dict[str, - HyperRectangle]], nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedule: TilingSchedule) -> Dict[str, _DMAUpdate]: - updateDict = {} - deltaOffsets = {} - - for idx, loadStep in enumerate(loadSchedule): - for stepIdx, (key, rect) in enumerate(loadStep.items()): - - if key in tilingSchedule.outputBaseOffsets.keys(): - baseOffsets = tilingSchedule.outputBaseOffsets[key] - direction = "FromL1" - else: - baseOffsets = tilingSchedule.inputBaseOffsets[key] - direction = "ToL1" - - if key not in updateDict.keys(): - updateDict[key] = [] - if key not in deltaOffsets.keys(): - deltaOffsets[key] = 0 - - referenceBuffer = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - l1Buffer = ctxt.lookup(operatorRepresentation[key]) - - finalMemoryLevel = TilingCodeGeneration.isFinalMemoryLevel(nodeMemoryConstraint, l1Buffer) - - if (f"in{stepIdx}_perm" in operatorRepresentation - and key in tilingSchedule.inputBaseOffsets.keys()) and (finalMemoryLevel == False): - perm = operatorRepresentation[f"in{stepIdx}_perm"] - struct, _, _ = AutoTransposeUtils.generateTransposedDMAStruct(ctxt, rect, direction, perm, - l1Buffer.name, - l1Buffer._referenceName) - - _invPerm = _invertPermutation(perm) - _rect = copy.copy(rect) - _referenceBuffer = copy.copy(referenceBuffer) - _rect.offset = _permuteList(rect.offset, _invPerm) - _rect.dims = _permuteList(rect.dims, _invPerm) - _referenceBuffer.shape = _permuteList(referenceBuffer.shape, _invPerm) - - accOffset = calculateRectangleOffset(_rect, _referenceBuffer) - - else: - struct = cls._rectToDMAStruct(ctxt, rect, direction, l1Buffer.name, l1Buffer._referenceName, - finalMemoryLevel) - accOffset = calculateRectangleOffset(rect, referenceBuffer) - - length_1d_copy = struct.value['length_1d_copy'].value - number_of_1d_copies = struct.value['number_of_1d_copies'].value - number_of_2d_copies = struct.value['number_of_2d_copies'].value - stride_1d = struct.value['stride_1d'].value - stride_2d = struct.value['stride_2d'].value - mchan_cmd = struct.value['mchan_cmd'].value - - lIdx = idx % len(baseOffsets) - - sol = _DMAUpdate(accOffset, baseOffsets[lIdx], length_1d_copy, number_of_1d_copies, number_of_2d_copies, - stride_1d, stride_2d, mchan_cmd) - - deltaOffsets[key] = accOffset - updateDict[key].append(sol) - - return updateDict - - @classmethod - def _rectToDMAStruct(cls, ctxt: NetworkContext, rectangle: HyperRectangle, direction: Literal["ToL1", "FromL1"], - L1Name: str, L2Name: str, finalMemoryLevel: bool) -> PULPStructDataTypes.DMA_copy: - - referenceBuffer = ctxt.lookup(L2Name) - - rect, referenceRect = minimizeRectangleDims(rectangle, referenceBuffer) - assert len(rect.dims) <= 3, "PULP: Only 2D transfers are supported!" - - if direction == "ToL1": - _dir = 1 - else: - _dir = 0 - - length_1d_copy = rect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - - number_of_1d_copies = 1 - stride_1d = 0 - - if len(rect.dims) > 1: - number_of_1d_copies = rect.dims[-2] - stride_1d = referenceRect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - - if not finalMemoryLevel: - stride_1d = length_1d_copy - - number_of_2d_copies = 1 - stride_2d = 0 - - if len(rect.dims) > 2: - number_of_2d_copies = rect.dims[-3] - stride_2d = referenceRect.dims[-2] * stride_1d - - length_2d_copy = number_of_1d_copies * length_1d_copy - mchan_flags = _dir + 0x2 + 0x8 - if number_of_1d_copies > 1 or number_of_2d_copies > 1: - mchan_flags += 0x4 - mchan_cmd = length_2d_copy + (mchan_flags << 17) - - assert length_2d_copy <= 2**17, f"The DMA transfer size for mchan should be representable with 17 bits, current number of bits required is {np.ceil(np.log2(length_2d_copy))}" - - struct = PULPStructDataTypes.DMA_copy( - { - "ext": referenceBuffer.name, - "loc": L1Name, - "hwc_to_chw": 0, - "stride_2d": stride_2d, - "number_of_2d_copies": number_of_2d_copies, - "stride_1d": stride_1d, - "number_of_1d_copies": number_of_1d_copies, - "length_1d_copy": length_1d_copy, - "mchan_cmd": mchan_cmd, - "dir": _dir, - "tid": 0 - }, ctxt) - - return struct - - def _hoistConstantAndReference(self, - ctxt: NetworkContext, - constBuf: ConstantBuffer, - operatorRepresentation: OperatorRepresentation, - nodeName: str, - operatorRepresentationName: str, - immediateType: Optional[Type[Immediate]] = None) -> Tuple[NetworkContext, Dict]: - - if immediateType is None: - _type = PointerClass(BasicDataTypes.int32_t) - else: - _type = PointerClass(immediateType) - - name = constBuf.name - - ctxt.add(constBuf, "global") - constBuf._type = _type - constBuf._instance = constBuf._type(name, ctxt) - constBuf._users = [nodeName] - constBuf._memoryLevel = self.targetMemLevel - - refName = name + "_ref" - reference = ctxt.hoistReference(name, refName) - ctxt.lookup(reference)._memoryLevel = self.targetMemLevel - - operatorRepresentation[operatorRepresentationName] = refName - - return ctxt, operatorRepresentation - - def _hoistDMAUpdates(self, ctxt: NetworkContext, tensorName: str, updateList: List[_DMAUpdate], - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, Dict]: - - operatorRepresentation = operatorRepresentation.copy() - - nodeName = operatorRepresentation['nodeName'] - - offsetList = [] - mchanCmdList = [] - len1dList = [] - num1dList = [] - num2dList = [] - stride1dList = [] - stride2dList = [] - for update in updateList: - offsetList.append(int(update.extOffset)) - mchanCmdList.append(int(update.mchan_cmd)) - len1dList.append(int(update.length_1d_copy)) - num1dList.append(int(update.number_of_1d_copies)) - num2dList.append(int(update.number_of_2d_copies)) - stride1dList.append(int(update.stride_1d)) - stride2dList.append(int(update.stride_2d)) - - dmaName = self._DMAStructName(tensorName, nodeName) - operatorRepresentation['stateReference'] = dmaName - operatorRepresentation['tileNum'] = "TILING_I" - operatorRepresentation['extPtr'] = ctxt.lookup(operatorRepresentation[tensorName])._referenceName - - namePrefix = self.prefix + f"{nodeName}_{tensorName}" - - name = namePrefix + "_offset" - cb = ctxt.ConstantBuffer(name, [len(updateList)], offsetList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'extOffsetPtr') - - name = namePrefix + "_mchan_cmd" - cb = ctxt.ConstantBuffer(name, [len(updateList)], mchanCmdList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'mchanCmdPtr', - PULPStructDataTypes.DMA_copy.structTypeDict['mchan_cmd']) - - name = namePrefix + "_length_1d_copy" - cb = ctxt.ConstantBuffer(name, [len(updateList)], len1dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'length1dPtr', - PULPStructDataTypes.DMA_copy.structTypeDict['length_1d_copy']) - - name = namePrefix + "_number_of_1d_copies" - cb = ctxt.ConstantBuffer(name, [len(updateList)], num1dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'number1dPtr', - PULPStructDataTypes.DMA_copy.structTypeDict['number_of_1d_copies']) - - name = namePrefix + "_number_of_2d_copies" - cb = ctxt.ConstantBuffer(name, [len(updateList)], num2dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'number2dPtr', - PULPStructDataTypes.DMA_copy.structTypeDict['number_of_2d_copies']) - - name = namePrefix + "_stride_1d" - cb = ctxt.ConstantBuffer(name, [len(updateList)], stride1dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'stride1dPtr', - PULPStructDataTypes.DMA_copy.structTypeDict['stride_1d']) - - name = namePrefix + "_stride_2d" - cb = ctxt.ConstantBuffer(name, [len(updateList)], stride2dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'stride2dPtr', - PULPStructDataTypes.DMA_copy.structTypeDict['stride_2d']) - - return ctxt, operatorRepresentation - - def _generateEgressPointerUpdates( - self, nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.outputLoadSchedule, - nodeMemoryConstraint, tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(self._updateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateIngressPointerUpdates( - self, nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.inputLoadSchedule, - nodeMemoryConstraint, tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(self._updateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateVariableUpdates(self, tilingSchedule: TilingSchedule, variableReplacement: VariableReplacementScheme, - ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> List[CodeSnippet]: - - updates = [] - - for key in variableReplacement.perTileReplacements.keys(): - - buf = ctxt.lookup(operatorRepresentation[key]) - reference = str(buf._instance) - - updates.append( - CodeSnippet(self._updateReferenceTemplate, { - "reference": reference, - "tileNum": "TILING_I", - "baseReference": buf._referenceName - })) - - return updates - - def _generateDMACode(self, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation, loadSchedule: List[Dict[str, HyperRectangle]], - direction: Literal["ToL1", "FromL1"]) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - DMATransferCalls = [] - DMAWaitStatements = [] - - allNumTransfers = AutoTransposeUtils.allNumTransfers(ctxt, operatorRepresentation, loadSchedule, direction) - - transferNodeRep = {} - - if allNumTransfers != []: - - dimLens = [] - - for dim in range(len(allNumTransfers[0])): - dimVec = [transfer[dim] for transfer in allNumTransfers] - namePrefix = operatorRepresentation["nodeName"] + "_" - vecName = f"dimLen_{dim}" - - cb = ctxt.ConstantBuffer(namePrefix + vecName, [len(dimVec)], dimVec) - ctxt, transferNodeRep = self._hoistConstantAndReference(ctxt, cb, transferNodeRep, - operatorRepresentation['nodeName'], vecName) - - dimLens.append(str(cb._instance)) - - transferNodeRep['nodeName'] = operatorRepresentation['nodeName'] - transferNodeRep['dimLens'] = dimLens - transferNodeRep['tileNum'] = "TILING_I" - - loadStep = loadSchedule[0] - - for idx, (key, rectangle) in enumerate(loadStep.items()): - - permName = f"in{idx}_perm" - - externalPtr = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - internalPtr = ctxt.lookup(operatorRepresentation[key]) - - tensorName = key - nodeName = operatorRepresentation['nodeName'] - dmaName = self._DMAStructName(tensorName, nodeName) - - transferNodeRep = { - **transferNodeRep, - **{ - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName - } - } - - if permName in operatorRepresentation and direction == "ToL1": - perm = operatorRepresentation[permName] - struct, remainderStrides, numTransfers = AutoTransposeUtils.generateTransposedDMAStruct( - ctxt, rectangle, direction, perm, internalPtr.name, externalPtr.name) - locStride = np.prod( - rectangle.dims) // np.prod(numTransfers) * (externalPtr._type.referencedType.typeWidth // 8) - - transferNodeRep['tensorName'] = operatorRepresentation[key] - - transferNodeRep = {**transferNodeRep, **{"remainderStrides": remainderStrides, "locStride": locStride}} - - else: - finalMemoryLevel = TilingCodeGeneration.isFinalMemoryLevel(nodeMemoryConstraint, internalPtr) - - struct = self._rectToDMAStruct(ctxt, rectangle, direction, internalPtr.name, externalPtr.name, - finalMemoryLevel) - - transferNodeRep["stateStruct"] = struct - _ = ctxt.hoistStruct(struct, dmaName, PULPStructDataTypes.DMA_copy) - ctxt.lookup(dmaName)._users += [operatorRepresentation['nodeName']] - - if permName in operatorRepresentation and direction == "ToL1": - - DMATransferCalls.append(CodeSnippet(self._iteratedMoveTileInTemplate, transferNodeRep)) - else: - DMATransferCalls.append(CodeSnippet(self._moveTileInTemplate, transferNodeRep)) - - DMAWaitStatements.append(CodeSnippet(self._blockTileInTemplate, transferNodeRep)) - - return DMATransferCalls, DMAWaitStatements - - def _generateIngressDMACode( - self, tilingSchedule: TilingSchedule, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - importLoadStep = tilingSchedule.inputLoadSchedule - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateDMACode(nodeMemoryConstraint, ctxt, - operatorRepresentation, - importLoadStep, "ToL1") - return ingressDMATransferCalls, ingressDMAWaitStatements - - def _generateEgressDMACode( - self, tilingSchedule: TilingSchedule, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - exportLoadStep = tilingSchedule.outputLoadSchedule - egressDMATransferCalls, egressDMAWaitStatements = self._generateDMACode(nodeMemoryConstraint, ctxt, - operatorRepresentation, exportLoadStep, - "FromL1") - - return egressDMATransferCalls, egressDMAWaitStatements - - def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, - nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, - variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - tileIdxPtr = self._hoistTileIdxPtr(ctxt, operatorRepresentation) - - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateIngressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - egressDMATransferCalls, egressDMAWaitStatements = self._generateEgressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - ctxt, ingressDMAUpdates = self._generateIngressPointerUpdates(nodeMemoryConstraint, tilingSchedule, ctxt, - operatorRepresentation) - ctxt, egressDMAUpdates = self._generateEgressPointerUpdates(nodeMemoryConstraint, tilingSchedule, ctxt, - operatorRepresentation) - - openLoopStatement = [ - CodeSnippet(self._openTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - closeLoopStatement = [ - CodeSnippet(self._closeTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - setupStatements = [CodeSnippet(self._initDMATemplate, {"channelName": "dma_channel"})] - setupStatements += [ - CodeSnippet(self._setDMAChannelTemplate, { - **transaction.operatorRepresentation, "channelName": "dma_channel" - }) for transaction in ingressDMAUpdates + egressDMAUpdates - ] - - teardownStatements = [ - CodeSnippet(self._releaseDMATemplate, - {"stateReference": ingressDMAUpdates[0].operatorRepresentation["stateReference"]}) - ] - - variableUpdates = self._generateVariableUpdates(tilingSchedule, variableReplacement, ctxt, - operatorRepresentation) - - metaInfo = TilingMetaInfo(nodeName = operatorRepresentation['nodeName'] + "_L2", - nodeOps = operatorRepresentation['nodeOps'], - numTiles = len(tilingSchedule.outputLoadSchedule), - tileIdxVar = "TILING_I", - kernelLevelTiling = True) - - newExecutionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDMATransferCalls, - ingressDMAWaitStatements, ingressDMAUpdates, - egressDMATransferCalls, egressDMAWaitStatements, - egressDMAUpdates, variableUpdates, openLoopStatement, - closeLoopStatement, setupStatements, teardownStatements) - - return ctxt, newExecutionBlock, True - - def generateTilingLoop( - self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - flatTilingSchedule = copy.copy(tilingSchedules[0]) - for tilingSchedule in tilingSchedules[1:]: - flatTilingSchedule += tilingSchedule - - # SCHEREMO: hoist numTiles - - offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) - - if len(offsetLists) == 0: - return ctxt, executionBlock, False - - for offsetList in offsetLists: - if not len(offsetList) == 1: - return ctxt, executionBlock, False - - operatorRepresentation["numTiles"] = self._hoistNumTiles(ctxt, operatorRepresentation['nodeName'], - tilingSchedules) - - return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, - operatorRepresentation) - - -class PULPClusterTilingGenerationSB(PULPClusterTilingSB, SingleBufferingTilingMixIn): - pass - - -class ProfilingPULPClusterTilingGenerationSB(PULPClusterTilingSB, ProfilingSingleBufferingTilingMixIn): - pass diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3Tiling.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3Tiling.py index af744f8672..32854b3e93 100644 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3Tiling.py +++ b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3Tiling.py @@ -26,18 +26,38 @@ from typing import Tuple from Deeploy.DeeployTypes import CodeGenVerbosity, CodeTransformationPass, ExecutionBlock, NetworkContext, _NoVerbosity +from Deeploy.TilingExtension.AsyncDma import AsyncDma +from Deeploy.TilingExtension.CodeTransformationPasses.DoubleBufferingTilingCodeGeneration import \ + DoubleBufferingTilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.SingleBufferingTilingCodeGeneration import \ + SingleBufferingTilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import DoubleBufferingTilingMixIn, \ + ProfilingDoubleBufferingTilingMixIn, ProfilingSingleBufferingTilingMixIn, SingleBufferingTilingMixIn -from .PULPL3TilingDB import ProfilingPULPL3TilingGenerationDB, PULPL3TilingGenerationDB -from .PULPL3TilingSB import ProfilingPULPL3TilingGenerationSB, PULPL3TilingGenerationSB + +class PULPL3TilingGenerationSB(SingleBufferingTilingCodeGeneration, SingleBufferingTilingMixIn): + pass + + +class ProfilingPULPL3TilingGenerationSB(SingleBufferingTilingCodeGeneration, ProfilingSingleBufferingTilingMixIn): + pass + + +class PULPL3TilingGenerationDB(DoubleBufferingTilingCodeGeneration, DoubleBufferingTilingMixIn): + pass + + +class ProfilingPULPL3TilingGenerationDB(DoubleBufferingTilingCodeGeneration, ProfilingDoubleBufferingTilingMixIn): + pass class PULPL3Tiling(CodeTransformationPass): - def __init__(self, targetMemLevel: str): - self.SB = PULPL3TilingGenerationSB(targetMemLevel) - self.profilingSB = ProfilingPULPL3TilingGenerationSB(targetMemLevel) - self.DB = PULPL3TilingGenerationDB(targetMemLevel) - self.profilingDB = ProfilingPULPL3TilingGenerationDB(targetMemLevel) + def __init__(self, externalMemory: str, localMemory: str, dma: AsyncDma): + self.SB = PULPL3TilingGenerationSB(externalMemory, localMemory, dma) + self.DB = PULPL3TilingGenerationDB(externalMemory, localMemory, dma) + self.profilingSB = ProfilingPULPL3TilingGenerationSB(externalMemory, localMemory, dma) + self.profilingDB = ProfilingPULPL3TilingGenerationDB(externalMemory, localMemory, dma) def apply(self, ctxt: NetworkContext, diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3TilingDB.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3TilingDB.py deleted file mode 100644 index 6a3f80bd28..0000000000 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3TilingDB.py +++ /dev/null @@ -1,329 +0,0 @@ -# ---------------------------------------------------------------------- -# -# File: PULPClusterTiling.py -# -# Last edited: 17.10.2023 -# -# Copyright (C) 2023, ETH Zurich and University of Bologna. -# -# Author: Moritz Scherer, ETH Zurich -# -# ---------------------------------------------------------------------- -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the License); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an AS IS BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from typing import Dict, List, Tuple - -from Deeploy.DeeployTypes import CodeSnippet, ExecutionBlock, NetworkContext, NodeTemplate, OperatorRepresentation -from Deeploy.Targets.PULPOpen.CodeTransformationPasses.PULPL3TilingSB import PULPL3TilingSB, _DMAUpdate -from Deeploy.Targets.PULPOpen.DataTypes import PULPStructDataTypes -from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import DoubleBufferingTilingMixIn, \ - ProfilingDoubleBufferingTilingMixIn, TilingMetaInfo -from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint -from Deeploy.TilingExtension.TilingCodegen import TilingSchedule, VariableReplacementScheme - -_moveTileInTemplate = NodeTemplate(""" - -// IMPORT TILE ${innerTilePtr} from ${outerTilePtr} -if (${tileNum} < ${numTiles}[*${tileIdxPtr}+1]){ -pi_cl_ram_copy_2d(get_ram_ptr(), ${stateReference}.pi_ram_addr, ${stateReference}.addr, ${stateReference}.size, ${stateReference}.stride, ${stateReference}.length, ${stateReference}.ext2loc, &${stateReference}); -} - -""") - -_moveTileOutTemplate = NodeTemplate(""" - -// EXPORT TILE ${innerTilePtr} to ${outerTilePtr} -if((${tileNum}) % 2 == 0){ -pi_cl_ram_copy_2d(get_ram_ptr(), ${stateReference}.pi_ram_addr, ${stateReference}.addr, ${stateReference}.size, ${stateReference}.stride, ${stateReference}.length, ${stateReference}.ext2loc, &${stateReference}); -} else { -pi_cl_ram_copy_2d(get_ram_ptr(), ${_stateReference}.pi_ram_addr, ${_stateReference}.addr, ${_stateReference}.size, ${_stateReference}.stride, ${_stateReference}.length, ${_stateReference}.ext2loc, &${_stateReference}); -} - -""") - -_blockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -if((${tileNum}) > 1){ -if((${tileNum}) % 2 == 0){ -pi_cl_ram_copy_wait(&${stateReference}); -} else { -pi_cl_ram_copy_wait(&${_stateReference}); -} -} - -""") - -_finalBlockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -pi_cl_ram_copy_wait(&${stateReference}); -% if numTiles > 1: -pi_cl_ram_copy_wait(&${_stateReference}); -% endif -""") - -_updateDMATransferStructTemplate = NodeTemplate(""" - -// UPDATE DMA STRUCT ${stateReference} -${stateReference}.pi_ram_addr = ((char*)${extPtr}) + ${extOffsetPtr}[${tileNum}]; -${stateReference}.size = ${length1dPtr}[${tileNum}]; -${stateReference}.length = ${number1dPtr}[${tileNum}]; -${stateReference}.addr = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); -${locPtr} = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}-1]); - -""") - -_outUpdateDMATransferStructTemplate = NodeTemplate(""" - -if ((${tileNum}) % 2 == 0){ -// UPDATE DMA STRUCT ${stateReference} -${stateReference}.pi_ram_addr = ((char*)${extPtr}) + ${extOffsetPtr}[${tileNum}]; -${stateReference}.size = ${length1dPtr}[${tileNum}]; -${stateReference}.length = ${number1dPtr}[${tileNum}]; -${stateReference}.addr = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); -} else { -${_stateReference}.pi_ram_addr = ((char*)${extPtr}) + ${extOffsetPtr}[${tileNum}]; -${_stateReference}.size = ${length1dPtr}[${tileNum}]; -${_stateReference}.length = ${number1dPtr}[${tileNum}]; -${_stateReference}.addr = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); -} -${locPtr} = (((char*)${baseLocPtr}) + ${locOffsetPtr}[${tileNum}]); - -""") - - -class PULPL3TilingDB(PULPL3TilingSB): - - _prefix = "TILING_REPLACED_" - _blockTileOutTemplate = _blockTileOutTemplate - _updateDMATransferStructTemplate = _updateDMATransferStructTemplate - _moveTileOutTemplate = _moveTileOutTemplate - _moveTileInTemplate = _moveTileInTemplate - - def _hoistDMAUpdates(self, ctxt: NetworkContext, tensorName: str, updateList: List[_DMAUpdate], - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, Dict]: - - nodeName = operatorRepresentation['nodeName'] - - operatorRepresentation = operatorRepresentation.copy() - - dmaName = self._DMAStructName(tensorName, nodeName) - # operatorRepresentation['stateReference'] = dmaName - # operatorRepresentation['tileNum'] = "TILING_I" - operatorRepresentation['locPtr'] = ctxt.lookup(operatorRepresentation[tensorName]).name - operatorRepresentation['baseLocPtr'] = ctxt.hoistReference(operatorRepresentation['locPtr'], - operatorRepresentation['locPtr'] + "_ref") - operatorRepresentation['_stateReference'] = self._DMAStructName(tensorName, nodeName) + "_1" - ctxt.lookup(operatorRepresentation['baseLocPtr'])._memoryLevel = self.targetMemLevel - - namePrefix = self.prefix + f"{nodeName}_{tensorName}" - - ctxt, operatorRepresentation = super()._hoistDMAUpdates(ctxt, tensorName, updateList, operatorRepresentation) - - locOffsetList = [] - locBaseOffset = updateList[0].locOffset - for update in updateList: - locOffsetList.append(int(update.locOffset) - locBaseOffset) - - name = namePrefix + "_locOffset" - cb = ctxt.ConstantBuffer(name, [len(updateList)], locOffsetList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'locOffsetPtr') - - return ctxt, operatorRepresentation - - def _generateEgressPointerUpdates( - self, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.outputLoadSchedule, - tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(_outUpdateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateEgressDMACode( - self, tilingSchedule: TilingSchedule, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - egressDMATransferCalls = [] - egressDMAWaitStatements = [] - exportLoadStep = tilingSchedule.outputLoadSchedule[0] - - for key, rectangle in exportLoadStep.items(): - externalPtr = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - internalPtr = ctxt.lookup(operatorRepresentation[key]) - - tensorName = key - nodeName = operatorRepresentation['nodeName'] - dmaName = self._DMAStructName(tensorName, nodeName) - - struct = self._rectToDMAStruct(ctxt, rectangle, "FromL2", internalPtr.name, externalPtr.name) - _ = ctxt.hoistStruct(struct, dmaName, PULPStructDataTypes.pi_cl_ram_req_t) - ctxt.lookup(dmaName)._users += [operatorRepresentation['nodeName']] - - tensorName = key + "_1" - nodeName = operatorRepresentation['nodeName'] - _dmaName = self._DMAStructName(tensorName, nodeName) - - struct = self._rectToDMAStruct(ctxt, rectangle, "FromL2", internalPtr.name, externalPtr.name) - _ = ctxt.hoistStruct(struct, _dmaName, PULPStructDataTypes.pi_cl_ram_req_t) - ctxt.lookup(_dmaName)._users += [operatorRepresentation['nodeName']] - - egressDMATransferCalls.append( - CodeSnippet( - self._moveTileOutTemplate, { - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName, - "_stateReference": _dmaName - })) - - egressDMAWaitStatements.append( - CodeSnippet( - self._blockTileOutTemplate, { - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName, - "_stateReference": _dmaName - })) - - return egressDMATransferCalls, egressDMAWaitStatements - - def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, - nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, - variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - tileIdxPtr = self._hoistTileIdxPtr(ctxt, operatorRepresentation) - - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateIngressDMACode( - tilingSchedule, ctxt, operatorRepresentation) - - egressDMATransferCalls, egressDMAWaitStatements = self._generateEgressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - ctxt, ingressDMAUpdates = self._generateIngressPointerUpdates(tilingSchedule, ctxt, operatorRepresentation) - ctxt, egressDMAUpdates = self._generateEgressPointerUpdates(tilingSchedule, ctxt, operatorRepresentation) - - variableUpdates = [] - - for transaction in ingressDMATransferCalls: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I+1" - _operatorRepresentation["numTiles"] = operatorRepresentation['numTiles'] - _operatorRepresentation["tileIdxPtr"] = tileIdxPtr - - for transaction in ingressDMAUpdates: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I+1" - - for transaction in egressDMATransferCalls: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I" - - for transaction in egressDMAWaitStatements: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation['tileNum'] = "TILING_I" - - for transaction in egressDMAUpdates: - _operatorRepresentation = transaction.operatorRepresentation - _operatorRepresentation["tileNum"] = "TILING_I" - - openLoopStatement = [ - CodeSnippet(self._openTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - closeLoopStatement = [ - CodeSnippet(self._closeTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - setupStatements = [] - teardownStatements = [] - - for transaction in ingressDMATransferCalls: - _operatorRepresentation = transaction.operatorRepresentation.copy() - _operatorRepresentation["tileNum"] = 0 - _operatorRepresentation["numTiles"] = operatorRepresentation['numTiles'] - _operatorRepresentation["tileIdxPtr"] = tileIdxPtr - setupStatements.append(CodeSnippet(transaction.template, _operatorRepresentation)) - - for transaction in egressDMAWaitStatements: - _operatorRepresentation = transaction.operatorRepresentation.copy() - _operatorRepresentation['tileNum'] = ctxt.lookup(operatorRepresentation["numTiles"]).values[-1] - _operatorRepresentation['numTiles'] = len(tilingSchedule.outputLoadSchedule) - teardownStatements.append(CodeSnippet(_finalBlockTileOutTemplate, _operatorRepresentation)) - - metaInfo = TilingMetaInfo(nodeName = operatorRepresentation['nodeName'] + "_L3", - nodeOps = operatorRepresentation['nodeOps'], - numTiles = len(tilingSchedule.outputLoadSchedule), - tileIdxVar = "TILING_I", - kernelLevelTiling = False) - - newExecutionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDMATransferCalls, - ingressDMAWaitStatements, ingressDMAUpdates, - egressDMATransferCalls, egressDMAWaitStatements, - egressDMAUpdates, variableUpdates, openLoopStatement, - closeLoopStatement, setupStatements, teardownStatements) - - return ctxt, newExecutionBlock, True - - def generateTilingLoop( - self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - flatTilingSchedule = copy.copy(tilingSchedules[0]) - for tilingSchedule in tilingSchedules[1:]: - flatTilingSchedule += tilingSchedule - - offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) - - if len(offsetLists) == 0: - return ctxt, executionBlock, False - - for offsetList in offsetLists: - if not len(offsetList) == 2: - return ctxt, executionBlock, False - - allNumTiles = [len(schedule.outputLoadSchedule) for schedule in tilingSchedules] - operatorRepresentation["numTiles"] = self._hoistNumTiles(ctxt, operatorRepresentation['nodeName'], - tilingSchedules) - - return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, - operatorRepresentation) - - -class PULPL3TilingGenerationDB(PULPL3TilingDB, DoubleBufferingTilingMixIn): - pass - - -class ProfilingPULPL3TilingGenerationDB(PULPL3TilingDB, ProfilingDoubleBufferingTilingMixIn): - pass diff --git a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3TilingSB.py b/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3TilingSB.py deleted file mode 100644 index 8079516720..0000000000 --- a/Deeploy/Targets/PULPOpen/CodeTransformationPasses/PULPL3TilingSB.py +++ /dev/null @@ -1,468 +0,0 @@ -# ---------------------------------------------------------------------- -# -# File: PULPL3TilingSB.py -# -# Last edited: 19.04.2024 -# -# Copyright (C) 2024, ETH Zurich and University of Bologna. -# -# Author: Moritz Scherer, ETH Zurich -# -# ---------------------------------------------------------------------- -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the License); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an AS IS BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from collections import namedtuple -from typing import Dict, List, Literal, Optional, Tuple, Type - -import Deeploy.CommonExtensions.DataTypes as BasicDataTypes -from Deeploy.AbstractDataTypes import Immediate, PointerClass -from Deeploy.DeeployTypes import CodeSnippet, ConstantBuffer, ExecutionBlock, NetworkContext, NodeTemplate, \ - OperatorRepresentation -from Deeploy.Targets.PULPOpen.CodeTransformationPasses import AutoTransposeUtils -from Deeploy.Targets.PULPOpen.DataTypes import PULPStructDataTypes -from Deeploy.TilingExtension.CodeTransformationPasses.TilingCodeGeneration import TilingCodeGeneration -from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import ProfilingSingleBufferingTilingMixIn, \ - SingleBufferingTilingMixIn, TilingMetaInfo -from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint -from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, TilingSchedule, VariableReplacementScheme, \ - calculateRectangleOffset, minimizeRectangleDims - -_openTileLoopTemplate = NodeTemplate(""" - -// TILING LOOP -// for (int TILING_I=0; TILING_I<${numTiles}; TILING_I++){ -for (int TILING_I=${numTiles}[*${tileIdxPtr}]; TILING_I<${numTiles}[(*${tileIdxPtr})+1]; TILING_I++){ -""") - -_closeTileLoopTemplate = NodeTemplate(""" - -// CLOSE TILING LOOP -} -*${tileIdxPtr} += 1; - -""") - -_moveTileInTemplate = NodeTemplate(""" - -// IMPORT TILE ${innerTilePtr} from ${outerTilePtr} -pi_cl_ram_copy_2d(get_ram_ptr(), ${stateReference}.pi_ram_addr, ${stateReference}.addr, ${stateReference}.size, ${stateReference}.stride, ${stateReference}.length, ${stateReference}.ext2loc, &${stateReference}); -// L3 TRANSFERS CANNOT BE CONCURRENT WITH CURRENT DRIVER -pi_cl_ram_copy_wait(&${stateReference}); - -""") - -_blockTileInTemplate = NodeTemplate(""" - -// BLOCKING IMPORT TILE ${innerTilePtr} -pi_cl_ram_copy_wait(&${stateReference}); - -""") - -_moveTileOutTemplate = NodeTemplate(""" - -// EXPORT TILE ${innerTilePtr} to ${outerTilePtr} -pi_cl_ram_copy_2d(get_ram_ptr(), ${stateReference}.pi_ram_addr, ${stateReference}.addr, ${stateReference}.size, ${stateReference}.stride, ${stateReference}.length, ${stateReference}.ext2loc, &${stateReference}); -// L3 TRANSFERS CANNOT BE CONCURRENT WITH CURRENT DRIVER -pi_cl_ram_copy_wait(&${stateReference}); - -""") - -_blockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -pi_cl_ram_copy_wait(&${stateReference}); - -""") - -_updateDMATransferStructTemplate = NodeTemplate(""" - -// UPDATE DMA STRUCT ${stateReference} -${stateReference}.pi_ram_addr = ((char*)${extPtr}) + ${extOffsetPtr}[${tileNum}]; -${stateReference}.size = ${length1dPtr}[${tileNum}]; -${stateReference}.length = ${number1dPtr}[${tileNum}]; - -""") - -# ${stateReference}.number_of_2d_copies = ${number2dPtr}[${tileNum}]; - -_updateReferenceTemplate = NodeTemplate(""" - -// UPDATE VARIABLE ${reference} -*${reference} = ${baseReference}[${tileNum}]; -""") - -# ADD NUM TRANSFERS VARIABLE - -_DMAUpdate = namedtuple("_DMAUpdate", "extOffset locOffset length_1d_copy number_of_1d_copies number_of_2d_copies") - - -class PULPL3TilingSB(TilingCodeGeneration): - - _prefix = "TILING_REPLACED_" - - _openTileLoopTemplate = _openTileLoopTemplate - _closeTileLoopTemplate = _closeTileLoopTemplate - - _moveTileInTemplate = _moveTileInTemplate - _blockTileInTemplate = _blockTileInTemplate - - _moveTileOutTemplate = _moveTileOutTemplate - _blockTileOutTemplate = _blockTileOutTemplate - - _updateDMATransferStructTemplate = _updateDMATransferStructTemplate - _updateReferenceTemplate = _updateReferenceTemplate - - @property - def prefix(self): - return self._prefix + self.targetMemLevel + "_" - - def _DMAStructName(self, tensorName: str, nodeName: str) -> str: - return f"{self.prefix}_DMA_{nodeName}_{tensorName}" - - @classmethod - def _generatePointerUpdates(cls, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - loadSchedule: List[Dict[str, HyperRectangle]], - tilingSchedule: TilingSchedule) -> Dict[str, _DMAUpdate]: - updateDict = {} - deltaOffsets = {} - - for idx, loadStep in enumerate(loadSchedule): - for stepIdx, (key, rect) in enumerate(loadStep.items()): - - if key in tilingSchedule.outputBaseOffsets.keys(): - baseOffsets = tilingSchedule.outputBaseOffsets[key] - direction = "FromL2" - else: - baseOffsets = tilingSchedule.inputBaseOffsets[key] - direction = "ToL2" - - if key not in updateDict.keys(): - updateDict[key] = [] - if key not in deltaOffsets.keys(): - deltaOffsets[key] = 0 - - referenceBuffer = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - l1Buffer = ctxt.lookup(operatorRepresentation[key]) - - struct = cls._rectToDMAStruct(ctxt, rect, direction, l1Buffer.name, l1Buffer._referenceName) - accOffset = calculateRectangleOffset(rect, referenceBuffer) - - length_1d_copy = struct.value['size'].value - number_of_1d_copies = struct.value['length'].value - - lIdx = idx % len(baseOffsets) - - sol = _DMAUpdate(accOffset, baseOffsets[lIdx], length_1d_copy, number_of_1d_copies, 0) - - deltaOffsets[key] = accOffset - updateDict[key].append(sol) - - return updateDict - - @classmethod - def _rectToDMAStruct(cls, ctxt: NetworkContext, rectangle: HyperRectangle, direction: Literal["ToL2", "FromL2"], - L1Name: str, L2Name: str) -> PULPStructDataTypes.pi_cl_ram_req_t: - - referenceBuffer = ctxt.lookup(L2Name) - - rect, referenceRect = minimizeRectangleDims(rectangle, referenceBuffer) - assert len(rect.dims) <= 2, "PULP: Only 2D transfers are supported!" - - if direction == "ToL2": - _dir = 1 - else: - _dir = 0 - - length_1d_copy = rect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - - if len(rect.dims) > 1: - number_of_1d_copies = rect.dims[-2] - stride_1d = referenceRect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - else: - number_of_1d_copies = 1 - stride_1d = 0 - - struct = PULPStructDataTypes.pi_cl_ram_req_t( - { - "pi_ram_addr": referenceBuffer.name, - "addr": L1Name, - "stride": stride_1d, - "length": length_1d_copy, - "size": number_of_1d_copies * length_1d_copy, - "ext2loc": _dir, - "is_2d": 1 - }, ctxt) - - return struct - - def _hoistConstantAndReference(self, - ctxt: NetworkContext, - constBuf: ConstantBuffer, - operatorRepresentation: OperatorRepresentation, - nodeName: str, - operatorRepresentationName: str, - immediateType: Optional[Type[Immediate]] = None) -> Tuple[NetworkContext, Dict]: - if immediateType is None: - _type = PointerClass(BasicDataTypes.int32_t) - else: - _type = PointerClass(immediateType) - - constBuf._users = [nodeName] - constBuf._memoryLevel = self.targetMemLevel - - refName = ctxt.hoistConstantAndReference(constBuf, _type) - - operatorRepresentation[operatorRepresentationName] = refName - - return ctxt, operatorRepresentation - - def _hoistDMAUpdates(self, ctxt: NetworkContext, tensorName: str, updateList: List[_DMAUpdate], - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, Dict]: - - operatorRepresentation = operatorRepresentation.copy() - - nodeName = operatorRepresentation['nodeName'] - - offsetList = [] - len1dList = [] - num1dList = [] - num2dList = [] - for update in updateList: - offsetList.append(int(update.extOffset)) - len1dList.append(int(update.length_1d_copy)) - num1dList.append(int(update.number_of_1d_copies)) - num2dList.append(int(update.number_of_2d_copies)) - - dmaName = self._DMAStructName(tensorName, nodeName) - operatorRepresentation['stateReference'] = dmaName - operatorRepresentation['tileNum'] = "TILING_I" - operatorRepresentation['extPtr'] = ctxt.lookup(operatorRepresentation[tensorName])._referenceName - - namePrefix = self.prefix + f"{nodeName}_{tensorName}" - - name = namePrefix + "_offset" - cb = ctxt.ConstantBuffer(name, [len(updateList)], offsetList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'extOffsetPtr') - - name = namePrefix + "_length_1d_copy" - cb = ctxt.ConstantBuffer(name, [len(updateList)], len1dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'length1dPtr', - PULPStructDataTypes.pi_cl_ram_req_t.structTypeDict['size']) - - name = namePrefix + "_number_of_1d_copies" - cb = ctxt.ConstantBuffer(name, [len(updateList)], num1dList) - ctxt, operatorRepresentation = self._hoistConstantAndReference( - ctxt, cb, operatorRepresentation, nodeName, 'number1dPtr', - PULPStructDataTypes.pi_cl_ram_req_t.structTypeDict['length']) - - return ctxt, operatorRepresentation - - def _generateEgressPointerUpdates( - self, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.outputLoadSchedule, - tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(self._updateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateIngressPointerUpdates( - self, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.inputLoadSchedule, - tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(self._updateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateVariableUpdates(self, tilingSchedule: TilingSchedule, variableReplacement: VariableReplacementScheme, - ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> List[CodeSnippet]: - - updates = [] - - for key in variableReplacement.perTileReplacements.keys(): - - buf = ctxt.lookup(operatorRepresentation[key]) - reference = str(buf._instance) - - updates.append( - CodeSnippet(self._updateReferenceTemplate, { - "reference": reference, - "tileNum": "TILING_I", - "baseReference": buf._referenceName - })) - - return updates - - def _generateDMACode(self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - loadSchedule: List[Dict[str, HyperRectangle]], - direction: Literal["ToL2", "FromL2"]) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - DMATransferCalls = [] - DMAWaitStatements = [] - - allNumTransfers = AutoTransposeUtils.allNumTransfers(ctxt, operatorRepresentation, loadSchedule, direction) - - transferNodeRep = {} - - loadStep = loadSchedule[0] - - for idx, (key, rectangle) in enumerate(loadStep.items()): - - externalPtr = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - internalPtr = ctxt.lookup(operatorRepresentation[key]) - - tensorName = key - nodeName = operatorRepresentation['nodeName'] - dmaName = self._DMAStructName(tensorName, nodeName) - - transferNodeRep = { - **transferNodeRep, - **{ - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName - } - } - - struct = self._rectToDMAStruct(ctxt, rectangle, direction, internalPtr.name, externalPtr.name) - transferNodeRep["stateStruct"] = struct - _ = ctxt.hoistStruct(struct, dmaName, PULPStructDataTypes.pi_cl_ram_req_t) - ctxt.lookup(dmaName)._users += [operatorRepresentation['nodeName']] - - DMATransferCalls.append(CodeSnippet(self._moveTileInTemplate, transferNodeRep)) - - DMAWaitStatements.append(CodeSnippet(self._blockTileInTemplate, transferNodeRep)) - - return DMATransferCalls, DMAWaitStatements - - def _generateIngressDMACode( - self, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - importLoadStep = tilingSchedule.inputLoadSchedule - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateDMACode(ctxt, operatorRepresentation, - importLoadStep, "ToL2") - return ingressDMATransferCalls, ingressDMAWaitStatements - - def _generateEgressDMACode( - self, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - exportLoadStep = tilingSchedule.outputLoadSchedule - egressDMATransferCalls, egressDMAWaitStatements = self._generateDMACode(ctxt, operatorRepresentation, - exportLoadStep, "FromL2") - - return egressDMATransferCalls, egressDMAWaitStatements - - def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, - nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, - variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - tileIdxPtr = self._hoistTileIdxPtr(ctxt, operatorRepresentation) - - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateIngressDMACode( - tilingSchedule, ctxt, operatorRepresentation) - - egressDMATransferCalls, egressDMAWaitStatements = self._generateEgressDMACode( - tilingSchedule, ctxt, operatorRepresentation) - - ctxt, ingressDMAUpdates = self._generateIngressPointerUpdates(tilingSchedule, ctxt, operatorRepresentation) - ctxt, egressDMAUpdates = self._generateEgressPointerUpdates(tilingSchedule, ctxt, operatorRepresentation) - - setupStatements: List[CodeSnippet] = [] - teardownStatements: List[CodeSnippet] = [] - variableUpdates: List[CodeSnippet] = [] - - openLoopStatement = [ - CodeSnippet(self._openTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - closeLoopStatement = [ - CodeSnippet(self._closeTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - metaInfo = TilingMetaInfo(nodeName = operatorRepresentation['nodeName'] + "_L3", - nodeOps = operatorRepresentation['nodeOps'], - numTiles = len(tilingSchedule.outputLoadSchedule), - tileIdxVar = "TILING_I", - kernelLevelTiling = False) - - newExecutionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDMATransferCalls, - ingressDMAWaitStatements, ingressDMAUpdates, - egressDMATransferCalls, egressDMAWaitStatements, - egressDMAUpdates, variableUpdates, openLoopStatement, - closeLoopStatement, setupStatements, teardownStatements) - - return ctxt, newExecutionBlock, True - - def generateTilingLoop( - self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - flatTilingSchedule = copy.copy(tilingSchedules[0]) - for tilingSchedule in tilingSchedules[1:]: - flatTilingSchedule += tilingSchedule - - offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) - - if len(offsetLists) == 0: - return ctxt, executionBlock, False - - for offsetList in offsetLists: - if not len(offsetList) == 1: - return ctxt, executionBlock, False - - operatorRepresentation["numTiles"] = self._hoistNumTiles(ctxt, operatorRepresentation['nodeName'], - tilingSchedules) - - return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, - operatorRepresentation) - - -class PULPL3TilingGenerationSB(PULPL3TilingSB, SingleBufferingTilingMixIn): - pass - - -class ProfilingPULPL3TilingGenerationSB(PULPL3TilingSB, ProfilingSingleBufferingTilingMixIn): - pass diff --git a/Deeploy/Targets/PULPOpen/DMA/L3Dma.py b/Deeploy/Targets/PULPOpen/DMA/L3Dma.py new file mode 100644 index 0000000000..c74b4da7ed --- /dev/null +++ b/Deeploy/Targets/PULPOpen/DMA/L3Dma.py @@ -0,0 +1,53 @@ +import math +from typing import Dict, Tuple + +from Deeploy.DeeployTypes import NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer +from Deeploy.TilingExtension.AsyncDma import AsyncDma, BlockingDmaFromAsyncDmaAdapter, DmaDirection, Future, \ + PerTensorWaitingStrategy + + +class L3DmaFuture(Future): + + _initTemplate = NodeTemplate("pi_cl_ram_req_t ${name};") + _deinitTemplate = NodeTemplate("") + _waitTemplate = NodeTemplate("pi_cl_ram_copy_wait(&${name});") + + +class L3Dma(AsyncDma): + + _transferTemplates = { + 2: + NodeTemplate( + "pi_cl_ram_copy_2d(get_ram_ptr(), ${ext}, ${loc}, ${transfer_size}, ${stride}, ${length}, ${ext2loc}, &${future});" + ) + } + _waitingStrategy = PerTensorWaitingStrategy(L3DmaFuture) + + def __init__(self, transferTemplates: Dict[int, NodeTemplate] = _transferTemplates) -> None: + super().__init__(transferTemplates) + + def checkTransfer(self, ctxt: NetworkContext, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, + shape: Tuple[int, ...], strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], + direction: DmaDirection) -> None: + super().checkTransfer(ctxt, externalBuffer, localBuffer, shape, strideExt, strideLoc, direction) + assert strideExt[-1] == 1, \ + "Mchan supports only contigous transfers of the innermost dimension for external memory" + assert strideLoc[0] == shape[1] and strideLoc[1] == 1, \ + f"Mchan supports only contigous transfers for local memory. Received local shape: {shape}, stride: {strideLoc}" + + def transferOpRepr(self, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, shape: Tuple[int, ...], + strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], direction: DmaDirection, + future: Future) -> OperatorRepresentation: + operatorRepresentation = super().transferOpRepr(externalBuffer, localBuffer, shape, strideExt, strideLoc, + direction, future) + operatorRepresentation.update({ + "ext2loc": 1 if direction == "ExternalToLocal" else 0, + "transfer_size": math.prod(shape), + "length": shape[1], + "stride": strideExt[0], + }) + return operatorRepresentation + + +# LMACAN: It's a hack because the driver is now working correctly +l3DmaHack = BlockingDmaFromAsyncDmaAdapter(L3Dma()) diff --git a/Deeploy/Targets/PULPOpen/DMA/MchanDma.py b/Deeploy/Targets/PULPOpen/DMA/MchanDma.py new file mode 100644 index 0000000000..0f2b77a03d --- /dev/null +++ b/Deeploy/Targets/PULPOpen/DMA/MchanDma.py @@ -0,0 +1,66 @@ +import math +from typing import Dict, Tuple + +from Deeploy.DeeployTypes import NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer +from Deeploy.TilingExtension.AsyncDma import AsyncDma, DmaDirection, Future, TensorGroupWaitingStrategy + + +class MchanChannelFuture(Future): + + _initTemplate = NodeTemplate("uint32_t ${name} = mchan_channel_alloc();") + _deinitTemplate = NodeTemplate("mchan_channel_free(${name});") + _waitTemplate = NodeTemplate("mchan_channel_wait(${name});") + + +class MchanDma(AsyncDma): + + _transferTemplates = { + 1: NodeTemplate("mchan_transfer_1d(${cmd}, ${loc}, ${ext});"), + 2: NodeTemplate("mchan_transfer_2d_ext_strided(${cmd}, ${loc}, ${ext}, ${size_1d}, ${stride_2d});"), + } + _waitingStrategy = TensorGroupWaitingStrategy(MchanChannelFuture, "channel_id") + + def __init__(self, transferTemplates: Dict[int, NodeTemplate] = _transferTemplates) -> None: + super().__init__(transferTemplates) + + def checkTransfer(self, ctxt: NetworkContext, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, + shape: Tuple[int, ...], strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], + direction: DmaDirection) -> None: + super().checkTransfer(ctxt, externalBuffer, localBuffer, shape, strideExt, strideLoc, direction) + + transferRank = len(shape) + assert strideExt[ + -1] == 1, "Mchan supports only contigous transfers of the innermost dimension for external memory" + if transferRank == 1: + assert strideLoc[0] == 1, "Mchan supports only contigous transfers for local memory" + else: + assert strideLoc[0] == shape[1] and strideLoc[ + 1] == 1, "Mchan supports only contigous transfers for local memory" + + def transferOpRepr(self, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, shape: Tuple[int, ...], + strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], direction: DmaDirection, + future: Future) -> OperatorRepresentation: + operatorRepresentation = super().transferOpRepr(externalBuffer, localBuffer, shape, strideExt, strideLoc, + direction, future) + + transferRank = len(shape) + + mchanFlags = 0 + mchanFlags += (1 << 0) if direction == "ExternalToLocal" else 0 # direction + mchanFlags += (1 << 1) # increment addresses + mchanFlags += (1 << 2) if transferRank == 2 else 0 # 2d transfer + mchanFlags += (1 << 3) # event enable + + mchanTransferSize = math.prod(shape) + mchanTransferSizeBits = math.ceil(math.log2(mchanTransferSize)) + assert mchanTransferSizeBits <= 17, ( + "The transfer size is not representable with 17 bits. " + f"Received transfer size {mchanTransferSize} that requires {mchanTransferSizeBits}") + + operatorRepresentation["cmd"] = (mchanFlags << 17) + mchanTransferSize + + if transferRank == 2: + operatorRepresentation["size_1d"] = shape[1] + operatorRepresentation["stride_2d"] = strideExt[0] + + return operatorRepresentation diff --git a/Deeploy/Targets/PULPOpen/Platform.py b/Deeploy/Targets/PULPOpen/Platform.py index 93e42b77d0..2fded86717 100644 --- a/Deeploy/Targets/PULPOpen/Platform.py +++ b/Deeploy/Targets/PULPOpen/Platform.py @@ -253,8 +253,8 @@ class PULPStructBuffer(StructBuffer): # SCHEREMO: stdint is included before pulp_nn_kernels.h because it is supposed to be included in there, but isn't... _includeList = [ - "pmsis.h", "stdint.h", "pulp_nn_kernels.h", "DeeployBasicMath.h", "DeeployPULPMath.h", "dory_dma.h", "dory_mem.h", - "bsp/ram.h", "pulp_core.h" + "pmsis.h", "stdint.h", "pulp_nn_kernels.h", "DeeployBasicMath.h", "DeeployPULPMath.h", "mchan_siracusa.h", + "dory_mem.h", "bsp/ram.h", "pulp_core.h" ] diff --git a/Deeploy/Targets/PULPOpen/TileConstraints/GEMMTileConstraint.py b/Deeploy/Targets/PULPOpen/TileConstraints/GEMMTileConstraint.py index 7f8a456265..4206be3390 100644 --- a/Deeploy/Targets/PULPOpen/TileConstraints/GEMMTileConstraint.py +++ b/Deeploy/Targets/PULPOpen/TileConstraints/GEMMTileConstraint.py @@ -117,9 +117,13 @@ def serializeTilingSolution( addrNames = ['A', 'B', 'mul', 'C', 'data_out'] inputBaseOffsets, outputBaseOffsets = cls.extractBaseAddr(tilingSolution, targetMemLevel, operatorRepresentation, addrNames) - varA = operatorRepresentation['A'] + transA = operatorRepresentation['transA'] + transB = operatorRepresentation['transB'] - NSize = ctxt.lookup(varA).shape[-1] + buffA = ctxt.lookup(operatorRepresentation['A']) + buffB = ctxt.lookup(operatorRepresentation['B']) + + NSize = buffA.shape[-1] NOffset = 0 inputACubes = [] @@ -151,8 +155,51 @@ def serializeTilingSolution( replacements["O"].append(OSize) replacements["batch"].append(BSize) - ACube = HyperRectangle((BatchOffset, BOffset, MOffset, NOffset), (BatchSize, BSize, MSize, NSize)) - BCube = HyperRectangle((BatchOffset, BOffset, OOffset, NOffset), (BatchSize, BSize, OSize, NSize)) + if transA == 0: + AMatrixOffsets = (MOffset, NOffset) + AMatrixShape = (MSize, NSize) + else: + AMatrixOffsets = (NOffset, MOffset) + AMatrixShape = (NSize, MSize) + + if transB == 0: + BMatrixOffsets = (NOffset, OOffset) + BMatrixShape = (NSize, OSize) + else: + BMatrixOffsets = (OOffset, NOffset) + BMatrixShape = (OSize, NSize) + + if len(buffA.shape) == 2: + ACube = HyperRectangle(AMatrixOffsets, AMatrixShape) + elif len(buffA.shape) == 3: + ACube = HyperRectangle((BatchOffset,) + AMatrixOffsets, (BatchSize,) + AMatrixShape) + else: + ACube = HyperRectangle( + ( + BatchOffset, + BOffset, + ) + AMatrixOffsets, + ( + BatchSize, + BSize, + ) + AMatrixShape, + ) + + if len(buffB.shape) == 2: + BCube = HyperRectangle(BMatrixOffsets, BMatrixShape) + elif len(buffB.shape) == 3: + BCube = HyperRectangle((BatchOffset,) + BMatrixOffsets, (BatchSize,) + BMatrixShape) + else: + BCube = HyperRectangle( + ( + BatchOffset, + BOffset, + ) + BMatrixOffsets, + ( + BatchSize, + BSize, + ) + BMatrixShape, + ) RequantCube = HyperRectangle((OOffset,), (OSize,)) @@ -301,13 +348,14 @@ def serializeTilingSolution( transA = operatorRepresentation['transA'] transB = operatorRepresentation['transB'] - varA = operatorRepresentation['A'] - varB = operatorRepresentation['B'] + buffA = ctxt.lookup(operatorRepresentation['A']) + buffB = ctxt.lookup(operatorRepresentation['B']) + buffC = ctxt.lookup(operatorRepresentation['C']) if transA == 0: - NSize = ctxt.lookup(varA).shape[-1] + NSize = buffA.shape[-1] else: - NSize = ctxt.lookup(varA).shape[-2] + NSize = buffA.shape[-2] NOffset = 0 @@ -340,16 +388,60 @@ def serializeTilingSolution( replacements["batch"].append(BSize) if transA == 0: - ACube = HyperRectangle((BatchOffset, BOffset, MOffset, NOffset), (BatchSize, BSize, MSize, NSize)) + AMatrixOffsets = (MOffset, NOffset) + AMatrixShape = (MSize, NSize) else: - ACube = HyperRectangle((BatchOffset, BOffset, NOffset, MOffset), (BatchSize, BSize, NSize, MSize)) + AMatrixOffsets = (NOffset, MOffset) + AMatrixShape = (NSize, MSize) if transB == 0: - BCube = HyperRectangle((BatchOffset, BOffset, NOffset, OOffset), (BatchSize, BSize, NSize, OSize)) + BMatrixOffsets = (NOffset, OOffset) + BMatrixShape = (NSize, OSize) else: - BCube = HyperRectangle((BatchOffset, BOffset, OOffset, NOffset), (BatchSize, BSize, OSize, NSize)) + BMatrixOffsets = (OOffset, NOffset) + BMatrixShape = (OSize, NSize) - CCube = HyperRectangle(cube.offset, cube.dims) + if len(buffA.shape) == 2: + ACube = HyperRectangle(AMatrixOffsets, AMatrixShape) + elif len(buffA.shape) == 3: + ACube = HyperRectangle((BatchOffset,) + AMatrixOffsets, (BatchSize,) + AMatrixShape) + else: + ACube = HyperRectangle( + ( + BatchOffset, + BOffset, + ) + AMatrixOffsets, + ( + BatchSize, + BSize, + ) + AMatrixShape, + ) + + if len(buffB.shape) == 2: + BCube = HyperRectangle(BMatrixOffsets, BMatrixShape) + elif len(buffB.shape) == 3: + BCube = HyperRectangle((BatchOffset,) + BMatrixOffsets, (BatchSize,) + BMatrixShape) + else: + BCube = HyperRectangle( + ( + BatchOffset, + BOffset, + ) + BMatrixOffsets, + ( + BatchSize, + BSize, + ) + BMatrixShape, + ) + + CMatrixOffsets = (MOffset, OOffset) + CMatrixShape = (MSize, OSize) + + if len(buffC.shape) == 2: + CCube = HyperRectangle(CMatrixOffsets, CMatrixShape) + elif len(buffC.shape) == 3: + CCube = HyperRectangle((BatchOffset,) + CMatrixOffsets, (BatchSize,) + CMatrixShape) + else: + CCube = HyperRectangle((BatchOffset, BOffset) + CMatrixOffsets, (BatchSize, BSize) + CMatrixShape) inputACubes.append(ACube) inputBCubes.append(BCube) diff --git a/Deeploy/Targets/PULPOpen/TileConstraints/MatMulTileConstraint.py b/Deeploy/Targets/PULPOpen/TileConstraints/MatMulTileConstraint.py index 2b5d284159..cae635b5fc 100644 --- a/Deeploy/Targets/PULPOpen/TileConstraints/MatMulTileConstraint.py +++ b/Deeploy/Targets/PULPOpen/TileConstraints/MatMulTileConstraint.py @@ -112,9 +112,10 @@ def serializeTilingSolution( inputBaseOffsets, outputBaseOffsets = cls.extractBaseAddr(tilingSolution, targetMemLevel, operatorRepresentation, addrNames) - varA = operatorRepresentation['A'] + buffA = ctxt.lookup(operatorRepresentation['A']) + buffB = ctxt.lookup(operatorRepresentation['B']) - NSize = ctxt.lookup(varA).shape[-1] + NSize = buffA.shape[-1] NOffset = 0 inputACubes = [] @@ -144,8 +145,43 @@ def serializeTilingSolution( replacements["O"].append(OSize) replacements["batch"].append(BSize) - ACube = HyperRectangle((BatchOffset, BOffset, MOffset, NOffset), (BatchSize, BSize, MSize, NSize)) - BCube = HyperRectangle((BatchOffset, BOffset, NOffset, OOffset), (BatchSize, BSize, NSize, OSize)) + AMatrixOffsets = (MOffset, NOffset) + AMatrixShape = (MSize, NSize) + + BMatrixOffsets = (NOffset, OOffset) + BMatrixShape = (NSize, OSize) + + if len(buffA.shape) == 2: + ACube = HyperRectangle(AMatrixOffsets, AMatrixShape) + elif len(buffA.shape) == 3: + ACube = HyperRectangle((BatchOffset,) + AMatrixOffsets, (BatchSize,) + AMatrixShape) + else: + ACube = HyperRectangle( + ( + BatchOffset, + BOffset, + ) + AMatrixOffsets, + ( + BatchSize, + BSize, + ) + AMatrixShape, + ) + + if len(buffB.shape) == 2: + BCube = HyperRectangle(BMatrixOffsets, BMatrixShape) + elif len(buffB.shape) == 3: + BCube = HyperRectangle((BatchOffset,) + BMatrixOffsets, (BatchSize,) + BMatrixShape) + else: + BCube = HyperRectangle( + ( + BatchOffset, + BOffset, + ) + BMatrixOffsets, + ( + BatchSize, + BSize, + ) + BMatrixShape, + ) inputACubes.append(ACube) inputBCubes.append(BCube) diff --git a/Deeploy/Targets/PULPOpen/TileConstraints/SoftmaxCrossEntropyTileConstraint.py b/Deeploy/Targets/PULPOpen/TileConstraints/SoftmaxCrossEntropyTileConstraint.py index 343c4970eb..cec95f671d 100644 --- a/Deeploy/Targets/PULPOpen/TileConstraints/SoftmaxCrossEntropyTileConstraint.py +++ b/Deeploy/Targets/PULPOpen/TileConstraints/SoftmaxCrossEntropyTileConstraint.py @@ -114,7 +114,7 @@ def serializeTilingSolution( replacements['num_classes'].append(num_classes) replacements['batch'].append(batch) - labelCube = HyperRectangle((0, cube.offset[0]), (1, batch)) + labelCube = HyperRectangle((cube.offset[0],), (batch,)) inputlabelCubes.append(labelCube) inputLoadSchedule = [] diff --git a/Deeploy/Targets/Snitch/Bindings.py b/Deeploy/Targets/Snitch/Bindings.py index 37f7800d6b..391c64af81 100644 --- a/Deeploy/Targets/Snitch/Bindings.py +++ b/Deeploy/Targets/Snitch/Bindings.py @@ -36,11 +36,13 @@ from Deeploy.Targets.Generic.TypeCheckers import AddChecker, GEMMChecker, RQAddChecker, SoftmaxChecker, iNoNormChecker from Deeploy.Targets.Snitch.CodeTransformationPasses import SnitchClusterTiling, SnitchCoreFilterPass, \ SnitchProfileExecutionBlockPass, SnitchSynchCoresPass +from Deeploy.Targets.Snitch.DMA.SnitchDma import SnitchDma from Deeploy.Targets.Snitch.Templates import AddTemplate, FloatGemmTemplate, RQAddTemplate, iSoftmaxTemplate from Deeploy.Targets.Snitch.Templates.FloatSoftmaxTemplate import FloatSoftmax_Template from Deeploy.Targets.Snitch.Templates.GemmTemplate import SnitchGemm_Template from Deeploy.Targets.Snitch.Templates.RqGemmTemplate import SnitchRqGemm_Template -from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement +from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement, \ + TilingVariableReplacementUpdate TilingCallClosure = partial(ClosureGeneration, closureSuffix = "_tiling_closure") MemoryAwareFunctionCallClosure = partial(MemoryAwareClosureGeneration, @@ -60,7 +62,8 @@ TilingVariableReplacement("L1"), TilingCallClosure(writeback = False), SnitchSynchCoresPass(), - SnitchClusterTiling("L1"), + TilingVariableReplacementUpdate("L1"), + SnitchClusterTiling("L2", "L1", SnitchDma()), ArgumentStructGeneration(), MemoryManagementGeneration("L1"), MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), diff --git a/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTiling.py b/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTiling.py index ce513a5355..71268d0a6d 100644 --- a/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTiling.py +++ b/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTiling.py @@ -26,24 +26,37 @@ from typing import Tuple from Deeploy.DeeployTypes import CodeGenVerbosity, CodeTransformationPass, ExecutionBlock, NetworkContext, _NoVerbosity +from Deeploy.TilingExtension.AsyncDma import AsyncDma +from Deeploy.TilingExtension.CodeTransformationPasses.DoubleBufferingTilingCodeGeneration import \ + DoubleBufferingTilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.SingleBufferingTilingCodeGeneration import \ + SingleBufferingTilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import DoubleBufferingTilingMixIn, \ + SingleBufferingTilingMixIn -from .SnitchClusterTilingSB import SnitchClusterTilingGenerationSB + +class SnitchClusterTilingSB(SingleBufferingTilingCodeGeneration, SingleBufferingTilingMixIn): + pass + + +class SnitchClusterTilingDB(DoubleBufferingTilingCodeGeneration, DoubleBufferingTilingMixIn): + pass class SnitchClusterTiling(CodeTransformationPass): - def __init__(self, targetMemLevel: str): - self.SB = SnitchClusterTilingGenerationSB(targetMemLevel) + def __init__(self, externalMemory: str, localMemory: str, dma: AsyncDma): + self.SB = SnitchClusterTilingSB(externalMemory, localMemory, dma) + self.DB = SnitchClusterTilingDB(externalMemory, localMemory, dma) def apply(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, name: str, verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]: - if verbose.tilingProfiling: raise NotImplementedError("Profiling not implemented for L2") - # ctxt, executionBlock = self.profilingSB.apply(ctxt, executionBlock, name) - else: - ctxt, executionBlock = self.SB.apply(ctxt, executionBlock, name) + + ctxt, executionBlock = self.SB.apply(ctxt, executionBlock, name) + ctxt, executionBlock = self.DB.apply(ctxt, executionBlock, name) return ctxt, executionBlock diff --git a/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTilingSB.py b/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTilingSB.py deleted file mode 100644 index 8e31ee2627..0000000000 --- a/Deeploy/Targets/Snitch/CodeTransformationPasses/SnitchClusterTilingSB.py +++ /dev/null @@ -1,520 +0,0 @@ -# ---------------------------------------------------------------------- -# -# File: SnitchClusterTilingSB.py -# -# Last edited: 03.06.2024 -# -# Copyright (C) 2024, ETH Zurich and University of Bologna. -# -# Author: -# - Victor Jung, jungvi@iis.ee.ethz.ch, ETH Zurich -# -# ---------------------------------------------------------------------- -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the License); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an AS IS BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from collections import namedtuple -from typing import Dict, List, Literal, Tuple - -from Deeploy.DeeployTypes import CodeSnippet, ExecutionBlock, NetworkContext, NodeTemplate, OperatorRepresentation -from Deeploy.Targets.Snitch.DataTypes import Snitch_DMA_copy -from Deeploy.TilingExtension.CodeTransformationPasses.TilingCodeGeneration import TilingCodeGeneration -from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import SingleBufferingTilingMixIn, TilingMetaInfo -from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint -from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, TilingSchedule, VariableReplacementScheme, \ - calculateRectangleOffset, minimizeRectangleDims - -_openTileLoopTemplate = NodeTemplate(""" - -// TILING LOOP -for (int TILING_I=${numTiles}[*${tileIdxPtr}]; TILING_I<${numTiles}[(*${tileIdxPtr})+1]; TILING_I++){ -""") - -_closeTileLoopTemplate = NodeTemplate(""" - -// CLOSE TILING LOOP -} -*${tileIdxPtr} += 1; - -""") - -_moveTileInTemplate = NodeTemplate(""" - -// IMPORT TILE ${innerTilePtr} from ${outerTilePtr} -if(snrt_is_dm_core()){ - ${stateReference}.tid = snrt_dma_start_2d(${stateReference}.dst, - ${stateReference}.src, - ${stateReference}.size, - ${stateReference}.dst_stride, - ${stateReference}.src_stride, - ${stateReference}.repeat); -} -""") - -_iteratedMoveTileInTemplate = NodeTemplate(""" - -""") - -_blockTileInTemplate = NodeTemplate(""" - -// BLOCKING IMPORT TILE ${innerTilePtr} -if(snrt_is_dm_core()){ - // snrt_dma_wait(${stateReference}.tid); - snrt_dma_wait_all(); -} -""") - -_moveTileOutTemplate = NodeTemplate(""" - -// EXPORT TILE ${innerTilePtr} to ${outerTilePtr} -if(snrt_is_dm_core()){ - ${stateReference}.tid = snrt_dma_start_2d(${stateReference}.dst, - ${stateReference}.src, - ${stateReference}.size, - ${stateReference}.dst_stride, - ${stateReference}.src_stride, - ${stateReference}.repeat); -} -""") - -_blockTileOutTemplate = NodeTemplate(""" - -// BLOCKING EXPORT TILE ${innerTilePtr} -if(snrt_is_dm_core()){ - //snrt_dma_wait(${stateReference}.tid); - snrt_dma_wait_all(); -} -""") - -_updateDMATransferStructTemplate = NodeTemplate(""" - -// UPDATE DMA STRUCT ${stateReference} -${stateReference}.dst = ((char*)${dstPtr}) + ${dstOffsetPtr}[${tileNum}]; -${stateReference}.src = ((char*)${srcPtr}) + ${srcOffsetPtr}[${tileNum}]; -${stateReference}.size = ${sizePtr}[${tileNum}]; -${stateReference}.dst_stride = ${dstStridePtr}[${tileNum}]; -${stateReference}.src_stride = ${srcStridePtr}[${tileNum}]; -${stateReference}.repeat = ${repeatPtr}[${tileNum}]; -""") - -_updateReferenceTemplate = NodeTemplate(""" - -// UPDATE VARIABLE ${reference} -*${reference} = ${baseReference}[${tileNum}]; -""") - -_DMAUpdate = namedtuple("_DMAUpdate", "dst src size dst_stride src_stride repeat tid direction") - - -class SnitchClusterTilingSB(TilingCodeGeneration): - - _prefix = "TILING_REPLACED_" - - _openTileLoopTemplate = _openTileLoopTemplate - _closeTileLoopTemplate = _closeTileLoopTemplate - - _moveTileInTemplate = _moveTileInTemplate - _iteratedMoveTileInTemplate = _iteratedMoveTileInTemplate - _blockTileInTemplate = _blockTileInTemplate - - _moveTileOutTemplate = _moveTileOutTemplate - _blockTileOutTemplate = _blockTileOutTemplate - - _updateDMATransferStructTemplate = _updateDMATransferStructTemplate - _updateReferenceTemplate = _updateReferenceTemplate - - @property - def prefix(self): - return self._prefix + self.targetMemLevel + "_" - - def _DMAStructName(self, tensorName: str, nodeName: str) -> str: - return f"{self.prefix}_DMA_{nodeName}_{tensorName}" - - @classmethod - def _generatePointerUpdates(cls, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - loadSchedule: List[Dict[str, - HyperRectangle]], nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedule: TilingSchedule) -> Dict[str, _DMAUpdate]: - updateDict = {} - deltaOffsets = {} - - for idx, loadStep in enumerate(loadSchedule): - for _, (key, rect) in enumerate(loadStep.items()): - - if key in tilingSchedule.outputBaseOffsets.keys(): - baseOffsets = tilingSchedule.outputBaseOffsets[key] - direction = "FromL1" - else: - baseOffsets = tilingSchedule.inputBaseOffsets[key] - direction = "ToL1" - - if key not in updateDict.keys(): - updateDict[key] = [] - if key not in deltaOffsets.keys(): - deltaOffsets[key] = 0 - - referenceBuffer = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - l1Buffer = ctxt.lookup(operatorRepresentation[key]) - - finalMemoryLevel = TilingCodeGeneration.isFinalMemoryLevel(nodeMemoryConstraint, l1Buffer) - - struct = cls._rectToDMAStruct(ctxt, rect, direction, l1Buffer.name, l1Buffer._referenceName, - finalMemoryLevel) - accOffset = calculateRectangleOffset(rect, referenceBuffer) - - lIdx = idx % len(baseOffsets) - - if direction == "ToL1": - src = accOffset - dst = baseOffsets[lIdx] - else: - src = baseOffsets[lIdx] - dst = accOffset - - size = struct.value['size'].value - dst_stride = struct.value['dst_stride'].value - src_stride = struct.value['src_stride'].value - repeat = struct.value['repeat'].value - tid = struct.value['tid'].value - - sol = _DMAUpdate(dst, src, size, dst_stride, src_stride, repeat, tid, direction) - - deltaOffsets[key] = accOffset - updateDict[key].append(sol) - - return updateDict - - @classmethod - def _rectToDMAStruct(cls, ctxt: NetworkContext, rectangle: HyperRectangle, direction: Literal["ToL1", "FromL1"], - L1Name: str, L2Name: str, finalMemoryLevel: bool) -> Snitch_DMA_copy: - - referenceBuffer = ctxt.lookup(L2Name) - - rect, referenceRect = minimizeRectangleDims(rectangle, referenceBuffer) - assert len(rect.dims) <= 3, "Snitch's iDMA only 2D transfers are supported!" - - if direction == "FromL1": - _src = L1Name - _dst = referenceBuffer.name - else: - _src = referenceBuffer.name - _dst = L1Name - - transfer_size = rect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - - src_stride = 0 - dst_stride = 0 - repeat = 1 - if len(rect.dims) > 1: - repeat = rect.dims[-2] - if direction == "ToL1": - dst_stride = rect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - src_stride = referenceRect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - else: - dst_stride = referenceRect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - src_stride = rect.dims[-1] * (referenceBuffer._type.referencedType.typeWidth // 8) - - struct = Snitch_DMA_copy( - { - "dst": _dst, - "src": _src, - "size": transfer_size, - "dst_stride": dst_stride, - "src_stride": src_stride, - "repeat": repeat, - "tid": 0 - }, ctxt) - - return struct - - def _hoistDMAUpdates(self, ctxt: NetworkContext, tensorName: str, updateList: List[_DMAUpdate], - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, Dict]: - - operatorRepresentation = operatorRepresentation.copy() - - nodeName = operatorRepresentation['nodeName'] - - dstList = [] - srcList = [] - sizeList = [] - dstStrideList = [] - srcStideList = [] - repeatList = [] - for update in updateList: - dstList.append(int(update.dst)) - srcList.append(int(update.src)) - sizeList.append(int(update.size)) - dstStrideList.append(int(update.dst_stride)) - srcStideList.append(int(update.src_stride)) - repeatList.append(int(update.repeat)) - - dmaName = self._DMAStructName(tensorName, nodeName) - - operatorRepresentation['stateReference'] = dmaName - operatorRepresentation['tileNum'] = "TILING_I" - - if updateList[0].direction == "ToL1": - operatorRepresentation['dstPtr'] = ctxt.lookup(operatorRepresentation[tensorName]).name - operatorRepresentation['srcPtr'] = ctxt.lookup(operatorRepresentation[tensorName])._referenceName - - dstOffsetList = [0] * len(updateList) - srcOffsetList = [srcList[i] - srcList[0] for i in range(0, len(srcList))] - # srcOffsetList = [0] + [sum(sizeList[:i+1]) for i in range(0, len(sizeList)-1)] - else: - operatorRepresentation['dstPtr'] = ctxt.lookup(operatorRepresentation[tensorName])._referenceName - operatorRepresentation['srcPtr'] = ctxt.lookup(operatorRepresentation[tensorName]).name - - dstOffsetList = [dstList[i] - dstList[0] for i in range(0, len(dstList))] - # dstOffsetList = [0] + [sum(sizeList[:i+1]) for i in range(0, len(sizeList)-1)] - srcOffsetList = [0] * len(updateList) - - namePrefix = self.prefix + f"{nodeName}_{tensorName}" - - name = namePrefix + "_dst_offset" - cb = ctxt.ConstantBuffer(name, [len(updateList)], dstOffsetList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'dstOffsetPtr') - - name = namePrefix + "_src_offset" - cb = ctxt.ConstantBuffer(name, [len(updateList)], srcOffsetList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'srcOffsetPtr') - - name = namePrefix + "_size" - cb = ctxt.ConstantBuffer(name, [len(updateList)], sizeList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'sizePtr', - Snitch_DMA_copy.structTypeDict['size']) - - name = namePrefix + "_dst_stride" - cb = ctxt.ConstantBuffer(name, [len(updateList)], dstStrideList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'dstStridePtr', - Snitch_DMA_copy.structTypeDict['dst_stride']) - - name = namePrefix + "_src_stride" - cb = ctxt.ConstantBuffer(name, [len(updateList)], srcStideList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'srcStridePtr', - Snitch_DMA_copy.structTypeDict['src_stride']) - - name = namePrefix + "_repeat" - cb = ctxt.ConstantBuffer(name, [len(updateList)], repeatList) - ctxt, operatorRepresentation = self._hoistConstantAndReference(ctxt, cb, operatorRepresentation, nodeName, - 'repeatPtr', - Snitch_DMA_copy.structTypeDict['repeat']) - - return ctxt, operatorRepresentation - - def _generateEgressPointerUpdates( - self, nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.outputLoadSchedule, - nodeMemoryConstraint, tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(self._updateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateIngressPointerUpdates( - self, nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, List[CodeSnippet]]: - - updates = [] - newCtxt = ctxt.copy() - - updateDict = self._generatePointerUpdates(ctxt, operatorRepresentation, tilingSchedule.inputLoadSchedule, - nodeMemoryConstraint, tilingSchedule) - - for key, updateList in updateDict.items(): - - newCtxt, newNodeRep = self._hoistDMAUpdates(newCtxt, key, updateList, operatorRepresentation) - updates.append(CodeSnippet(self._updateDMATransferStructTemplate, newNodeRep)) - - return newCtxt, updates - - def _generateVariableUpdates(self, tilingSchedule: TilingSchedule, variableReplacement: VariableReplacementScheme, - ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> List[CodeSnippet]: - - updates = [] - - for key in variableReplacement.perTileReplacements.keys(): - - buf = ctxt.lookup(operatorRepresentation[key]) - reference = str(buf._instance) - - updates.append( - CodeSnippet(self._updateReferenceTemplate, { - "reference": reference, - "tileNum": "TILING_I", - "baseReference": buf._referenceName - })) - - return updates - - def _generateDMACode(self, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation, loadSchedule: List[Dict[str, HyperRectangle]], - direction: Literal["ToL1", "FromL1"]) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - DMATransferCalls = [] - DMAWaitStatements = [] - transferNodeRep = {} - - loadStep = loadSchedule[0] - - for idx, (key, rectangle) in enumerate(loadStep.items()): - - permName = f"in{idx}_perm" - - externalPtr = ctxt.lookup(ctxt.lookup(operatorRepresentation[key])._referenceName) - internalPtr = ctxt.lookup(operatorRepresentation[key]) - - tensorName = key - nodeName = operatorRepresentation['nodeName'] - dmaName = self._DMAStructName(tensorName, nodeName) - - transferNodeRep = { - **transferNodeRep, - **{ - 'innerTilePtr': str(internalPtr._instance), - "outerTilePtr": str(externalPtr._instance), - "stateReference": dmaName - } - } - - finalMemoryLevel = TilingCodeGeneration.isFinalMemoryLevel(nodeMemoryConstraint, internalPtr) - struct = self._rectToDMAStruct(ctxt, rectangle, direction, internalPtr.name, externalPtr.name, - finalMemoryLevel) - - transferNodeRep["stateStruct"] = struct - _ = ctxt.hoistStruct(struct, dmaName, Snitch_DMA_copy) - ctxt.lookup(dmaName)._users += [operatorRepresentation['nodeName']] - - if permName in operatorRepresentation and direction == "ToL1": - - DMATransferCalls.append(CodeSnippet(self._iteratedMoveTileInTemplate, transferNodeRep)) - else: - DMATransferCalls.append(CodeSnippet(self._moveTileInTemplate, transferNodeRep)) - - DMAWaitStatements.append(CodeSnippet(self._blockTileInTemplate, transferNodeRep)) - - return DMATransferCalls, DMAWaitStatements - - def _generateIngressDMACode( - self, tilingSchedule: TilingSchedule, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - importLoadStep = tilingSchedule.inputLoadSchedule - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateDMACode(nodeMemoryConstraint, ctxt, - operatorRepresentation, - importLoadStep, "ToL1") - return ingressDMATransferCalls, ingressDMAWaitStatements - - def _generateEgressDMACode( - self, tilingSchedule: TilingSchedule, nodeMemoryConstraint: NodeMemoryConstraint, ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation) -> Tuple[List[CodeSnippet], List[CodeSnippet]]: - - exportLoadStep = tilingSchedule.outputLoadSchedule - egressDMATransferCalls, egressDMAWaitStatements = self._generateDMACode(nodeMemoryConstraint, ctxt, - operatorRepresentation, exportLoadStep, - "FromL1") - - return egressDMATransferCalls, egressDMAWaitStatements - - def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, - nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, - variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - tileIdxPtr = self._hoistTileIdxPtr(ctxt, operatorRepresentation) - - ingressDMATransferCalls, ingressDMAWaitStatements = self._generateIngressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - egressDMATransferCalls, egressDMAWaitStatements = self._generateEgressDMACode( - tilingSchedule, nodeMemoryConstraint, ctxt, operatorRepresentation) - - ctxt, ingressDMAUpdates = self._generateIngressPointerUpdates(nodeMemoryConstraint, tilingSchedule, ctxt, - operatorRepresentation) - ctxt, egressDMAUpdates = self._generateEgressPointerUpdates(nodeMemoryConstraint, tilingSchedule, ctxt, - operatorRepresentation) - - openLoopStatement = [ - CodeSnippet(self._openTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - closeLoopStatement = [ - CodeSnippet(self._closeTileLoopTemplate, { - "numTiles": operatorRepresentation["numTiles"], - "tileIdxPtr": tileIdxPtr - }) - ] - - variableUpdates = self._generateVariableUpdates(tilingSchedule, variableReplacement, ctxt, - operatorRepresentation) - - metaInfo = TilingMetaInfo(nodeName = operatorRepresentation['nodeName'] + "_L2", - nodeOps = operatorRepresentation['nodeOps'], - numTiles = len(tilingSchedule.outputLoadSchedule), - tileIdxVar = "TILING_I", - kernelLevelTiling = True) - - newExecutionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDMATransferCalls, - ingressDMAWaitStatements, ingressDMAUpdates, - egressDMATransferCalls, egressDMAWaitStatements, - egressDMAUpdates, variableUpdates, openLoopStatement, - closeLoopStatement, [], []) - - return ctxt, newExecutionBlock, True - - def generateTilingLoop( - self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, - operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: - - flatTilingSchedule = copy.copy(tilingSchedules[0]) - for tilingSchedule in tilingSchedules[1:]: - flatTilingSchedule += tilingSchedule - - # SCHEREMO: hoist numTiles - - offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) - - if len(offsetLists) == 0: - return ctxt, executionBlock, False - - for offsetList in offsetLists: - if not len(offsetList) == 1: - return ctxt, executionBlock, False - - operatorRepresentation["numTiles"] = self._hoistNumTiles(ctxt, operatorRepresentation['nodeName'], - tilingSchedules) - - return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, - operatorRepresentation) - - -class SnitchClusterTilingGenerationSB(SnitchClusterTilingSB, SingleBufferingTilingMixIn): - pass diff --git a/Deeploy/Targets/Snitch/DMA/SnitchDma.py b/Deeploy/Targets/Snitch/DMA/SnitchDma.py new file mode 100644 index 0000000000..6e2533697d --- /dev/null +++ b/Deeploy/Targets/Snitch/DMA/SnitchDma.py @@ -0,0 +1,51 @@ +from typing import Dict, Tuple + +from Deeploy.DeeployTypes import NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer +from Deeploy.TilingExtension.AsyncDma import AsyncDma, DmaDirection, Future, TensorGroupWaitingStrategy + + +class SnitchBarrierFuture(Future): + _initTemplate = NodeTemplate("") + _deinitTemplate = NodeTemplate("") + _waitTemplate = NodeTemplate("if (snrt_is_dm_core()) snrt_dma_wait_all();") + + +# LMACAN: TODO: Add single transfer waiting +class SnitchFuture(Future): + _initTemplate = NodeTemplate("uint16_t ${name};") + _deinitTemplate = NodeTemplate("") + _waitTemplate = NodeTemplate("if (snrt_is_dm_core()) snrt_dma_wait(${name});") + + +class SnitchDma(AsyncDma): + + _transferTemplates = { + 2: + NodeTemplate( + "if (snrt_is_dm_core()) snrt_dma_start_2d(${dest}, ${src}, ${size}, ${stride_dest}, ${stride_src}, ${repeat});" + ), + } + _waitingStrategy = TensorGroupWaitingStrategy(SnitchBarrierFuture, "") + + def __init__(self, transferTemplates: Dict[int, NodeTemplate] = _transferTemplates) -> None: + super().__init__(transferTemplates) + + def checkTransfer(self, ctxt: NetworkContext, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, + shape: Tuple[int, ...], strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], + direction: DmaDirection) -> None: + super().checkTransfer(ctxt, externalBuffer, localBuffer, shape, strideExt, strideLoc, direction) + assert strideLoc[1] == 1 and strideExt[1] == 1, f"Supports only contigous transfers in the innermost dimension" + + def transferOpRepr(self, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, shape: Tuple[int, ...], + strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], direction: DmaDirection, + future: Future) -> OperatorRepresentation: + _ = future + operatorRepresentation: OperatorRepresentation = { + "dest": localBuffer.name if direction == "ExternalToLocal" else externalBuffer.name, + "src": externalBuffer.name if direction == "ExternalToLocal" else localBuffer.name, + "repeat": shape[0], + "size": shape[1], + "stride_dest": strideLoc[0] if direction == "ExternalToLocal" else strideExt[0], + "stride_src": strideExt[0] if direction == "ExternalToLocal" else strideLoc[0], + } + return operatorRepresentation diff --git a/Deeploy/TilingExtension/AsyncDma.py b/Deeploy/TilingExtension/AsyncDma.py new file mode 100644 index 0000000000..ea1dd99edf --- /dev/null +++ b/Deeploy/TilingExtension/AsyncDma.py @@ -0,0 +1,247 @@ +import math +from abc import ABC, abstractmethod +from typing import Dict, List, Literal, Set, Tuple, Type + +from Deeploy.DeeployTypes import CodeSnippet, NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer, \ + _ReferenceBuffer +from Deeploy.TilingExtension.TilingCodegen import padShape, padStride + +DmaDirection = Literal["ExternalToLocal", "LocalToExternal"] + + +class Future: + + _initTemplate: NodeTemplate + _deinitTemplate: NodeTemplate + _waitTemplate: NodeTemplate + + def __init__(self, name: str): + self.name = name + + def _operatorRepresentation(self) -> OperatorRepresentation: + return {"name": self.name} + + def init(self) -> CodeSnippet: + return CodeSnippet(self._initTemplate, self._operatorRepresentation()) + + def deinit(self) -> CodeSnippet: + return CodeSnippet(self._deinitTemplate, self._operatorRepresentation()) + + def wait(self) -> CodeSnippet: + return CodeSnippet(self._waitTemplate, self._operatorRepresentation()) + + +class AsyncDmaWaitingStrategy(ABC): + + def __init__(self, FutureCls: Type[Future]) -> None: + self.FutureCls = FutureCls + + @abstractmethod + def getFuture(self, tensorName: str) -> Future: + pass + + +class PerTensorWaitingStrategy(AsyncDmaWaitingStrategy): + + def getFuture(self, tensorName: str) -> Future: + return self.FutureCls(tensorName + "_future") + + +class TensorGroupWaitingStrategy(AsyncDmaWaitingStrategy): + + def __init__(self, FutureCls: Type[Future], asyncGroupName: str) -> None: + super().__init__(FutureCls) + self.asyncGroupFuture = FutureCls(f"{asyncGroupName}_future") + + def getFuture(self, tensorName: str) -> Future: + _ = tensorName + return self.asyncGroupFuture + + +class AsyncDma(ABC): + + _waitingStrategy: AsyncDmaWaitingStrategy + + def __init__(self, transferTemplates: Dict[int, NodeTemplate]) -> None: + self._transferTemplates = transferTemplates + + def getFuture(self, tensorName: str) -> Future: + return self._waitingStrategy.getFuture(tensorName) + + def supportedTransferRanks(self) -> Set[int]: + return set(self._transferTemplates.keys()) + + def checkTransfer(self, ctxt: NetworkContext, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, + shape: Tuple[int, ...], strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], + direction: DmaDirection) -> None: + transferRank = len(shape) + assert transferRank == len(strideLoc) and transferRank == len( + strideExt), f"The shape and stride rank should match" + assert transferRank in self.supportedTransferRanks( + ), f"Unsupported transfer rank {transferRank}. Supported ranks are {self.supportedTransferRanks()}" + + @abstractmethod + def transferOpRepr(self, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, shape: Tuple[int, ...], + strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], direction: DmaDirection, + future: Future) -> OperatorRepresentation: + return {"loc": localBuffer.name, "ext": externalBuffer.name, "future": future.name} + + def transfer(self, ctxt: NetworkContext, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, + shape: Tuple[int, ...], strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], + direction: DmaDirection, future: Future) -> List[CodeSnippet]: + self.checkTransfer(ctxt, externalBuffer, localBuffer, shape, strideExt, strideLoc, direction) + opRepr = self.transferOpRepr(externalBuffer, localBuffer, shape, strideExt, strideLoc, direction, future) + template = self._transferTemplates[len(shape)] + return [CodeSnippet(template, opRepr)] + + def setup(self) -> List[CodeSnippet]: + return [] + + def teardown(self) -> List[CodeSnippet]: + return [] + + +class EmptyFuture(Future): + + _initTemplate = NodeTemplate("") + _deinitTemplate = NodeTemplate("") + _waitTemplate = NodeTemplate("") + + +class BlockingDmaFromAsyncDmaAdapter(AsyncDma): + + _waitingStrategy = PerTensorWaitingStrategy(EmptyFuture) + + def __init__(self, dma: AsyncDma) -> None: + self.dma = dma + + @property + def _transferTemplates(self) -> Dict[int, NodeTemplate]: + return self.dma._transferTemplates + + def transferOpRepr(self, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, shape: Tuple[int, ...], + strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], direction: DmaDirection, + future: Future) -> OperatorRepresentation: + return self.dma.transferOpRepr(externalBuffer, localBuffer, shape, strideExt, strideLoc, direction, future) + + def transfer(self, ctxt: NetworkContext, externalBuffer: VariableBuffer, localBuffer: VariableBuffer, + shape: Tuple[int, ...], strideExt: Tuple[int, ...], strideLoc: Tuple[int, ...], + direction: DmaDirection, future: Future) -> List[CodeSnippet]: + tmpFuture = self.dma.getFuture(future.name.removesuffix("_future")) + callStack = [] + callStack.append(tmpFuture.init()) + callStack.extend( + self.dma.transfer(ctxt, externalBuffer, localBuffer, shape, strideExt, strideLoc, direction, tmpFuture)) + callStack.append(tmpFuture.wait()) + callStack.append(tmpFuture.deinit()) + return callStack + + def setup(self) -> List[CodeSnippet]: + return self.dma.setup() + + def teardown(self) -> List[CodeSnippet]: + return self.dma.teardown() + + +class AnydimAsyncDmaTransferAdapter: + + class NestedForLoopOpenTemplate(NodeTemplate): + + def __init__(self, depth: int): + templateStr = "" + for level in range(depth): + iter = f"i_{level}" + templateStr += f"for (uint32_t {iter} = 0; {iter} < ${{end_{level}}}; {iter}++) {{" + super().__init__(templateStr) + + class NestedForLoopCloseTemplate(NodeTemplate): + + def __init__(self, depth: int): + templateStr = "" + for _ in range(depth): + templateStr += "}" + super().__init__(templateStr) + + class OffsetCalculationTemplate(NodeTemplate): + + def __init__(self, name: str, depth: int): + templateStr = f"const uint32_t {name} = " + for i in range(depth): + templateStr += f"i_{i} * ${{stride_{i}}}" + if i < depth - 1: + templateStr += " + " + templateStr += ";" + super().__init__(templateStr) + + offsetPtrTemplate = NodeTemplate("void * const ${resultPtr} = (void *)${basePtr} + ${offset};") + + def __init__(self, dma: AsyncDma) -> None: + self.dma = dma + + def nearestSupportedTransferRank(self, transfer_rank: int) -> int: + sortedRanks = sorted(self.dma.supportedTransferRanks()) + + # Find nearest smaller + for rank in reversed(sortedRanks): + if rank <= transfer_rank: + return rank + + # All supported ranks are bigger so return the smallest one + return sortedRanks[0] + + def transfer(self, + ctxt: NetworkContext, + externalBuffer: VariableBuffer, + localBuffer: VariableBuffer, + shape: Tuple[int, ...], + strideExt: Tuple[int, ...], + strideLoc: Tuple[int, ...], + direction: DmaDirection, + future: Future, + strideExtPad: int = 0) -> List[CodeSnippet]: + transferRank = len(shape) + kernelRank = self.nearestSupportedTransferRank(transferRank) + + if kernelRank < transferRank: + nestedLoopDepth = transferRank - kernelRank + + nestedLoopOpRepr = {f"end_{level}": shape[level] for level in range(nestedLoopDepth)} + locOffsetCalculationOpRepr = {f"stride_{level}": strideLoc[level] for level in range(nestedLoopDepth)} + extOffsetCalculationOpRepr = {f"stride_{level}": strideExt[level] for level in range(nestedLoopDepth)} + + callStack = [] + callStack.append(CodeSnippet(self.NestedForLoopOpenTemplate(nestedLoopDepth), nestedLoopOpRepr)) + callStack.append( + CodeSnippet(self.OffsetCalculationTemplate("ext_offset", nestedLoopDepth), extOffsetCalculationOpRepr)) + callStack.append( + CodeSnippet(self.OffsetCalculationTemplate("loc_offset", nestedLoopDepth), locOffsetCalculationOpRepr)) + + localBufferOffseted = _ReferenceBuffer("local_buffer_offsetted", localBuffer) + localBufferOffseted._memoryLevel = localBuffer._memoryLevel + callStack.append( + CodeSnippet(self.offsetPtrTemplate, { + "resultPtr": "local_buffer_offsetted", + "basePtr": localBuffer.name, + "offset": "loc_offset" + })) + + externalBufferOffseted = _ReferenceBuffer("external_buffer_offsetted", externalBuffer) + externalBufferOffseted._memoryLevel = externalBuffer._memoryLevel + callStack.append( + CodeSnippet(self.offsetPtrTemplate, { + "resultPtr": externalBufferOffseted.name, + "basePtr": externalBuffer.name, + "offset": "ext_offset" + })) + + callStack.extend( + self.dma.transfer(ctxt, externalBufferOffseted, localBufferOffseted, shape[-kernelRank:], + strideExt[-kernelRank:], strideLoc[-kernelRank:], direction, future)) + callStack.append(CodeSnippet(self.NestedForLoopCloseTemplate(nestedLoopDepth), {})) + return callStack + elif kernelRank == transferRank: + return self.dma.transfer(ctxt, externalBuffer, localBuffer, shape, strideExt, strideLoc, direction, future) + else: + return self.dma.transfer(ctxt, externalBuffer, localBuffer, padShape(shape, kernelRank), + padStride(strideExt, kernelRank, strideExtPad), + padStride(strideLoc, kernelRank, math.prod(shape)), direction, future) diff --git a/Deeploy/TilingExtension/CodeTransformationPasses/DoubleBufferingTilingCodeGeneration.py b/Deeploy/TilingExtension/CodeTransformationPasses/DoubleBufferingTilingCodeGeneration.py new file mode 100644 index 0000000000..dc7a790604 --- /dev/null +++ b/Deeploy/TilingExtension/CodeTransformationPasses/DoubleBufferingTilingCodeGeneration.py @@ -0,0 +1,249 @@ +# ---------------------------------------------------------------------- +# +# File: PULPClusterTilingDB.py +# +# Last edited: 25.10.2023 +# +# Copyright (C) 2023, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import math +from typing import List, Set, Tuple + +from Deeploy.AbstractDataTypes import VoidType +from Deeploy.DeeployTypes import CodeSnippet, ExecutionBlock, NetworkContext, NodeTemplate, OperatorRepresentation, \ + VariableBuffer, _ReferenceBuffer +from Deeploy.TilingExtension.AsyncDma import AnydimAsyncDmaTransferAdapter, AsyncDma, Future +from Deeploy.TilingExtension.CodeTransformationPasses.TilingCodeGeneration import TilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.TilingHoistingMixIn import dictOfArrays +from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import TilingMetaInfo +from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint +from Deeploy.TilingExtension.TilingCodegen import TilingSchedule, VariableReplacementScheme, stridesFromShape + + +class DoubleBufferingTilingCodeGeneration(TilingCodeGeneration): + + _moveTileInCheckOpenStatement = NodeTemplate(""" + // DOUBLE BUFFERING CHECK TILE LOAD + if ((${tileIdxVar}) < ${numTiles}[*${tileIdxPtr}+1]) { + """) + + _moveTileInCheckCloseStatement = NodeTemplate(""" + } + """) + + # LMACAN: The brackets around ${tileIdxVar} are important to ensure correct order + # of the modulo operation. Breaking case without the brackets is when we + # put "TILING_I + 1" for tileIdxVar. + _chooseBufferTemplate = NodeTemplate(""" + switch((${tileIdxVar}) % 2) { + case 0: ${reference} = (${type})${buffer_0}; break; + case 1: ${reference} = (${type})${buffer_1}; break; + } + """) + + def __init__(self, externalMemory: str, localMemory: str, dma: AsyncDma): + super().__init__(externalMemory, localMemory, dma, 2) + + def _generateBufferChoice(self, reference: VariableBuffer, buffers: List[_ReferenceBuffer], + tileIdxVar: str) -> CodeSnippet: + assert len(buffers) == 2, f"Only double buffering supported. Received {len(buffers)} buffers." + operatorRepresentation = { + "tileIdxVar": tileIdxVar, + "reference": reference.name, + "type": reference._type.typeName, + "buffer_0": buffers[0].name, + "buffer_1": buffers[1].name, + } + template = self._chooseBufferTemplate + return CodeSnippet(template, operatorRepresentation) + + def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, + nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, + variableReplacement: VariableReplacementScheme, + operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: + + setupStatements: List[CodeSnippet] = [] + teardownStatements: List[CodeSnippet] = [] + + openLoopStatements: List[CodeSnippet] = [CodeSnippet(self._openTileLoopTemplate, {**operatorRepresentation})] + + ingressDmaTransferCalls: List[CodeSnippet] = [ + CodeSnippet(self._moveTileInCheckOpenStatement, { + **operatorRepresentation, "tileIdxVar": "TILING_I+1" + }) + ] + + ingressFutures: Set[Future] = set() + initialFutures: Set[Future] = set() + + for tensorName, rectangles in dictOfArrays(tilingSchedule.inputLoadSchedule).items(): + localBuffer = ctxt.lookup(operatorRepresentation[tensorName]) + assert localBuffer._memoryLevel == self.localMemory + assert isinstance(localBuffer, _ReferenceBuffer) + externalBuffer = ctxt.lookup(localBuffer._referenceName) + assert isinstance(externalBuffer, VariableBuffer) + tensorMemoryConstraint = nodeMemoryConstraint.inputTensorMemoryConstraints[externalBuffer.name] + externalBufferShape = tensorMemoryConstraint.memoryConstraints[self.externalMemory].shape + assert externalBufferShape is not None + + rectangles, externalBufferShape = self._legalizeTransfers(rectangles, tuple(externalBufferShape), + localBuffer._type.referencedType.typeWidth, + self.isFinalMemoryLevel(tensorMemoryConstraint)) + + externalBufferRef = self._hoistReference(ctxt, + externalBuffer.name + "_ref", + externalBuffer, + externalBufferShape, + override_type = VoidType) + + tensorMemoryConstraint = nodeMemoryConstraint.inputTensorMemoryConstraints[externalBuffer.name] + l1BuffersReferences = self._hoistMultibufferReferences(ctxt, localBuffer, tensorMemoryConstraint) + + nextLocalBufferReference = self._hoistReference(ctxt, f"{tensorName}_next", l1BuffersReferences[1]) + + openLoopStatements.append(self._generateBufferChoice(localBuffer, l1BuffersReferences, "TILING_I")) + + future = self.dma.getFuture(tensorName) + ingressFutures.add(future) + + ingressDmaTransferCalls.append( + self._generateBufferChoice(nextLocalBufferReference, l1BuffersReferences, "TILING_I+1")) + ingressDmaTransferCalls.extend( + self._generateDmaTransferCalls(ctxt, tensorName, rectangles, "TILING_I+1", nextLocalBufferReference, + externalBufferRef, "ExternalToLocal", future)) + + anydimAdapter = AnydimAsyncDmaTransferAdapter(self.dma) + + initialFuture = self.dma.getFuture(tensorName + "_init") + initialFutures.add(initialFuture) + initialDmaTransferCalls = anydimAdapter.transfer(ctxt, externalBufferRef, localBuffer, rectangles[0].dims, + stridesFromShape(externalBufferShape), + stridesFromShape(rectangles[0].dims), "ExternalToLocal", + initialFuture, math.prod(externalBufferShape)) + setupStatements.extend(initialDmaTransferCalls) + setupStatements.append(initialFuture.wait()) + + referenceUpdate = self._generateExternalReferenceUpdate(ctxt, tensorName, rectangles, "TILING_I+1", + externalBufferRef) + if referenceUpdate is not None: + ingressDmaTransferCalls.append(referenceUpdate) + initialReferenceUpdate = CodeSnippet(referenceUpdate.template, + operatorRepresentation = { + **referenceUpdate.operatorRepresentation, + "tileIdxVar": 0, + }) + setupStatements.append(initialReferenceUpdate) + + ingressDmaTransferCalls.append(CodeSnippet(self._moveTileInCheckCloseStatement, {})) + ingressDmaWaitStatements = [f.wait() for f in ingressFutures] + + egressDmaTransferCalls: List[CodeSnippet] = [] + egressFutures: Set[Future] = set() + + for tensorName, rectangles in dictOfArrays(tilingSchedule.outputLoadSchedule).items(): + localBuffer = ctxt.lookup(operatorRepresentation[tensorName]) + assert localBuffer._memoryLevel == self.localMemory + assert isinstance(localBuffer, _ReferenceBuffer) + externalBuffer = ctxt.lookup(localBuffer._referenceName) + assert isinstance(externalBuffer, VariableBuffer) + tensorMemoryConstraint = nodeMemoryConstraint.outputTensorMemoryConstraints[externalBuffer.name] + externalBufferShape = tensorMemoryConstraint.memoryConstraints[self.externalMemory].shape + assert externalBufferShape is not None + + rectangles, externalBufferShape = self._legalizeTransfers(rectangles, tuple(externalBufferShape), + localBuffer._type.referencedType.typeWidth, + self.isFinalMemoryLevel(tensorMemoryConstraint)) + + externalBufferRef = self._hoistReference(ctxt, + externalBuffer.name + "_ref", + externalBuffer, + externalBufferShape, + override_type = VoidType) + + tensorMemoryConstraint = nodeMemoryConstraint.outputTensorMemoryConstraints[externalBuffer.name] + l1BuffersReferences = self._hoistMultibufferReferences(ctxt, localBuffer, tensorMemoryConstraint) + + openLoopStatements.append(self._generateBufferChoice(localBuffer, l1BuffersReferences, "TILING_I")) + + future = self.dma.getFuture(tensorName) + egressFutures.add(future) + + dmaTransferCalls = self._generateDmaTransferCalls(ctxt, tensorName, rectangles, "TILING_I", localBuffer, + externalBufferRef, "LocalToExternal", future) + egressDmaTransferCalls.extend(dmaTransferCalls) + + referenceUpdate = self._generateExternalReferenceUpdate(ctxt, tensorName, rectangles, "TILING_I", + externalBufferRef) + if referenceUpdate is not None: + egressDmaTransferCalls.append(referenceUpdate) + + egressDmaWaitStatements = [f.wait() for f in egressFutures] + + teardownStatements.extend([f.wait() for f in egressFutures]) + + setupStatements = [f.init() for f in ingressFutures | initialFutures | egressFutures] + setupStatements + teardownStatements.extend(f.deinit() for f in ingressFutures | initialFutures | egressFutures) + + closeLoopStatements = [CodeSnippet(self._closeTileLoopTemplate, {**operatorRepresentation})] + + metaInfo = TilingMetaInfo( + nodeName = operatorRepresentation['nodeName'] + f"_{self.externalMemory}", + nodeOps = operatorRepresentation['nodeOps'], + numTiles = operatorRepresentation['numTiles'], + totalNumTiles = len(tilingSchedule.outputLoadSchedule), + tileIdxPtr = operatorRepresentation['tileIdxPtr'], + tileIdxVar = "TILING_I", + # TODO: The kernelLevelTiling field is used in profiling to know we are generating code around the kernel. + # The current implementation does this by checking whether we are at the lowest memory level, + # which is hardcoded by the value "L1". Change this to be memory level agnostic. + kernelLevelTiling = self.localMemory == "L1") + + executionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDmaTransferCalls, + ingressDmaWaitStatements, [], egressDmaTransferCalls, + egressDmaWaitStatements, [], [], openLoopStatements, + closeLoopStatements, setupStatements, teardownStatements) + + return ctxt, executionBlock, True + + def generateTilingLoop( + self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, + tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, + operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: + + flatTilingSchedule = copy.copy(tilingSchedules[0]) + for tilingSchedule in tilingSchedules[1:]: + flatTilingSchedule += tilingSchedule + + offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) + + if len(offsetLists) == 0: + return ctxt, executionBlock, False + + for offsetList in offsetLists: + if not len(offsetList) == self.bufferCount: + return ctxt, executionBlock, False + + numTiles, tileIdxPtr = self._hoistTileNumAndIdxPtr(ctxt, tilingSchedules) + operatorRepresentation["numTiles"] = numTiles.name + operatorRepresentation["tileIdxPtr"] = tileIdxPtr.name + + return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, + operatorRepresentation) diff --git a/Deeploy/TilingExtension/CodeTransformationPasses/SingleBufferingTilingCodeGeneration.py b/Deeploy/TilingExtension/CodeTransformationPasses/SingleBufferingTilingCodeGeneration.py new file mode 100644 index 0000000000..52c8568efc --- /dev/null +++ b/Deeploy/TilingExtension/CodeTransformationPasses/SingleBufferingTilingCodeGeneration.py @@ -0,0 +1,153 @@ +# ---------------------------------------------------------------------- +# +# File: PULPL3TilingSB.py +# +# Last edited: 19.04.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from typing import Dict, List, Set, Tuple + +from Deeploy.AbstractDataTypes import VoidType +from Deeploy.DeeployTypes import CodeSnippet, ExecutionBlock, NetworkContext, OperatorRepresentation, VariableBuffer, \ + _ReferenceBuffer +from Deeploy.TilingExtension.AsyncDma import AsyncDma, DmaDirection, Future +from Deeploy.TilingExtension.CodeTransformationPasses.TilingCodeGeneration import TilingCodeGeneration +from Deeploy.TilingExtension.CodeTransformationPasses.TilingHoistingMixIn import dictOfArrays +from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import TilingMetaInfo +from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint, TensorMemoryConstraint +from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, TilingSchedule, VariableReplacementScheme + + +class SingleBufferingTilingCodeGeneration(TilingCodeGeneration): + + def __init__(self, externalMemory: str, localMemory: str, dma: AsyncDma): + super().__init__(externalMemory, localMemory, dma, 1) + + def _generateTransferScheduleCalls( + self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, + transferSchedule: List[Dict[str, HyperRectangle]], tensorMemoryConstraintDict: Dict[str, + TensorMemoryConstraint], + tileIdxVar: str, direction: DmaDirection) -> Tuple[NetworkContext, List[CodeSnippet], Set[Future]]: + callStack: List[CodeSnippet] = [] + referenceUpdates: List[CodeSnippet] = [] + futures: Set[Future] = set() + + for tensorName, rectangles in dictOfArrays(transferSchedule).items(): + localBuffer = ctxt.lookup(operatorRepresentation[tensorName]) + assert localBuffer._memoryLevel == self.localMemory + assert isinstance(localBuffer, _ReferenceBuffer) + externalBuffer = ctxt.lookup(localBuffer._referenceName) + assert isinstance(externalBuffer, VariableBuffer) + tensorMemoryConstraint = tensorMemoryConstraintDict[externalBuffer.name] + externalBufferShape = tensorMemoryConstraint.memoryConstraints[self.externalMemory].shape + assert externalBufferShape is not None + + rectangles, externalBufferShape = self._legalizeTransfers(rectangles, tuple(externalBufferShape), + localBuffer._type.referencedType.typeWidth, + self.isFinalMemoryLevel(tensorMemoryConstraint)) + + externalBufferRef = self._hoistReference(ctxt, + externalBuffer.name + "_ref", + externalBuffer, + shape = externalBufferShape, + override_type = VoidType) + + future = self.dma.getFuture(tensorName) + futures.add(future) + + callStack.extend( + self._generateDmaTransferCalls(ctxt, tensorName, rectangles, tileIdxVar, localBuffer, externalBufferRef, + direction, future)) + + referenceUpdate = self._generateExternalReferenceUpdate(ctxt, tensorName, rectangles, tileIdxVar, + externalBufferRef) + if referenceUpdate is not None: + callStack.append(referenceUpdate) + + return ctxt, callStack, futures + + def _tilingLoop(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, + nodeMemoryConstraint: NodeMemoryConstraint, tilingSchedule: TilingSchedule, + variableReplacement: VariableReplacementScheme, + operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: + ctxt, ingressDmaTransferCalls, ingressFutures = self._generateTransferScheduleCalls( + ctxt, operatorRepresentation, tilingSchedule.inputLoadSchedule, + nodeMemoryConstraint.inputTensorMemoryConstraints, "TILING_I", "ExternalToLocal") + ctxt, egressDmaTransferCalls, egressFutures = self._generateTransferScheduleCalls( + ctxt, operatorRepresentation, tilingSchedule.outputLoadSchedule, + nodeMemoryConstraint.outputTensorMemoryConstraints, "TILING_I", "LocalToExternal") + + ingressDmaWaitStatements = [future.wait() for future in ingressFutures] + egressDmaWaitStatements = [future.wait() for future in egressFutures] + + setupStatements = self.dma.setup() + setupStatements += [f.init() for f in ingressFutures | egressFutures] + + teardownStatements = self.dma.teardown() + teardownStatements.extend(f.deinit() for f in ingressFutures | egressFutures) + + openLoopStatements = [CodeSnippet(self._openTileLoopTemplate, {**operatorRepresentation})] + closeLoopStatements = [CodeSnippet(self._closeTileLoopTemplate, {**operatorRepresentation})] + + metaInfo = TilingMetaInfo( + nodeName = operatorRepresentation['nodeName'] + f"_{self.externalMemory}", + nodeOps = operatorRepresentation['nodeOps'], + numTiles = operatorRepresentation['numTiles'], + totalNumTiles = len(tilingSchedule.outputLoadSchedule), + tileIdxPtr = operatorRepresentation['tileIdxPtr'], + tileIdxVar = "TILING_I", + # TODO: The kernelLevelTiling field is used in profiling to know we are generating code around the kernel. + # The current implementation does this by checking whether we are at the lowest memory level, + # which is hardcoded by the value "L1". Change this to be memory level agnostic. + kernelLevelTiling = self.localMemory == "L1") + + executionBlock = self.generateAllTilingCode(executionBlock, metaInfo, ingressDmaTransferCalls, + ingressDmaWaitStatements, [], egressDmaTransferCalls, + egressDmaWaitStatements, [], [], openLoopStatements, + closeLoopStatements, setupStatements, teardownStatements) + + return ctxt, executionBlock, True + + def generateTilingLoop( + self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, + tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, + operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: + + flatTilingSchedule = copy.copy(tilingSchedules[0]) + for tilingSchedule in tilingSchedules[1:]: + flatTilingSchedule += tilingSchedule + + offsetLists = list({**flatTilingSchedule.inputBaseOffsets, **flatTilingSchedule.outputBaseOffsets}.values()) + + if len(offsetLists) == 0: + return ctxt, executionBlock, False + + for offsetList in offsetLists: + if not len(offsetList) == self.bufferCount: + return ctxt, executionBlock, False + + numTiles, tileIdxPtr = self._hoistTileNumAndIdxPtr(ctxt, tilingSchedules) + operatorRepresentation["numTiles"] = numTiles.name + operatorRepresentation["tileIdxPtr"] = tileIdxPtr.name + + return self._tilingLoop(ctxt, executionBlock, nodeMemoryConstraint, flatTilingSchedule, variableReplacement, + operatorRepresentation) diff --git a/Deeploy/TilingExtension/CodeTransformationPasses/TilingCodeGeneration.py b/Deeploy/TilingExtension/CodeTransformationPasses/TilingCodeGeneration.py index f4e4d9aae9..db5a1a3fce 100644 --- a/Deeploy/TilingExtension/CodeTransformationPasses/TilingCodeGeneration.py +++ b/Deeploy/TilingExtension/CodeTransformationPasses/TilingCodeGeneration.py @@ -23,150 +23,210 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy +import math from abc import abstractmethod -from typing import Dict, List, Optional, Tuple, Type +from typing import List, Optional, Tuple, TypeVar + +import numpy as np -import Deeploy.CommonExtensions.DataTypes as BasicDataTypes -from Deeploy.AbstractDataTypes import Immediate, PointerClass from Deeploy.CommonExtensions.CodeTransformationPasses.Closure import ClosureExecutionBlock from Deeploy.CommonExtensions.CodeTransformationPasses.IntrospectiveCodeTransformation import \ IntrospectiveCodeTransformationMixIn from Deeploy.CommonExtensions.CodeTransformationPasses.MemoryAllocation import ArgumentStructGeneration -from Deeploy.DeeployTypes import CodeGenVerbosity, CodeTransformationPass, ConstantBuffer, ExecutionBlock, \ +from Deeploy.DeeployTypes import CodeGenVerbosity, CodeSnippet, CodeTransformationPass, ExecutionBlock, \ NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer, _NoVerbosity +from Deeploy.TilingExtension.AsyncDma import AnydimAsyncDmaTransferAdapter, AsyncDma, DmaDirection, Future +from Deeploy.TilingExtension.CodeTransformationPasses.TilingHoistingMixIn import TilingHoistingMixIn from Deeploy.TilingExtension.CodeTransformationPasses.TilingPrototypes import PrototypeTilingMixIn -from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint -from Deeploy.TilingExtension.TilingCodegen import TilingSchedule, VariableReplacementScheme, minimizeVariableReplacement +from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint, TensorMemoryConstraint +from Deeploy.TilingExtension.TilingCodegen import HyperRectangle, TilingSchedule, VariableReplacementScheme, \ + calculateFlatOffset, minimizeRectangle, minimizeVariableReplacement, padOffset, padShape, stridesFromShape +T = TypeVar('T') -class TilingCodeGeneration(CodeTransformationPass, IntrospectiveCodeTransformationMixIn, PrototypeTilingMixIn): - def __init__(self, targetMemLevel: str): - self.targetMemLevel = targetMemLevel - self.argStructGeneration = ArgumentStructGeneration() +def transposeListOfLists(listOfLists: List[List[T]]) -> List[List[T]]: + transposedListOfLists = [] + for _list in listOfLists: + for i, element in enumerate(_list): + if i >= len(transposedListOfLists): + assert i == len(transposedListOfLists) + transposedListOfLists.append([element]) + else: + transposedListOfLists[i].append(element) + return transposedListOfLists + + +class TilingCodeGeneration(CodeTransformationPass, IntrospectiveCodeTransformationMixIn, PrototypeTilingMixIn, + TilingHoistingMixIn): + + _relativeOffsetReferenceUpdateTemplate = NodeTemplate(""" + // UPDATE VARIABLE ${reference} + ${reference} += ${relativeOffset}; + """) + + _relativeOffsetReferenceUpdateTiledTemplate = NodeTemplate(""" + // UPDATE VARIABLE ${reference} + ${reference} += ${relativeOffset}[${tileIdxVar}]; + """) + + _openTileLoopTemplate = NodeTemplate(""" + // TILING LOOP + for (int TILING_I=${numTiles}[*${tileIdxPtr}]; TILING_I<${numTiles}[(*${tileIdxPtr})+1]; TILING_I++){ + """) + + _closeTileLoopTemplate = NodeTemplate(""" + // CLOSE TILING LOOP + } + *${tileIdxPtr} += 1; + """) @abstractmethod def generateTilingLoop( self, ctxt: NetworkContext, executionBlock: ExecutionBlock, nodeMemoryConstraint: NodeMemoryConstraint, - tilingSchedule: TilingSchedule, variableReplacement: VariableReplacementScheme, + tilingSchedules: List[TilingSchedule], variableReplacement: VariableReplacementScheme, operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, ExecutionBlock, bool]: return ctxt, executionBlock, False + def __init__(self, externalMemory: str, localMemory: str, dma: AsyncDma, bufferCount: int): + self.externalMemory = externalMemory + self.localMemory = localMemory + self.dma = dma + self.bufferCount = bufferCount + TilingHoistingMixIn.__init__(self, localMemory) + self.argStructGeneration = ArgumentStructGeneration() + # SCHEREMO: internalPtr refers to the HIGHER memory level of a transfer, # e.g. in both an L2 -> L1 and L1 -> L2 transfer, the internalPtr is in L1. - @staticmethod - def isFinalMemoryLevel(nodeMemoryConstraint: NodeMemoryConstraint, internalPtr: VariableBuffer) -> bool: - externalName = internalPtr._referenceName - tensorMemoryConstraint = nodeMemoryConstraint.tensorMemoryConstraints[externalName] - if len(tensorMemoryConstraint.memoryConstraints.keys()) <= 2: + def isFinalMemoryLevel(self, tensorMemoryConstraint: TensorMemoryConstraint) -> bool: + memoryOrder = list(tensorMemoryConstraint.memoryConstraints.keys()) + assert self.localMemory in memoryOrder, f"Memory {self.localMemory} does not exist in the tensor memory constraint {tensorMemoryConstraint}" + if len(memoryOrder) < 2: return True + return self.localMemory in memoryOrder[:2] - finalMemoryLevels = list(tensorMemoryConstraint.memoryConstraints.keys())[:2] - memoryLevel = internalPtr._memoryLevel + def _generateDmaTransferCalls(self, ctxt: NetworkContext, tensorName: str, transfers: List[HyperRectangle], + tileIdxVar: str, localBuffer: VariableBuffer, externalBuffer: VariableBuffer, + direction: DmaDirection, future: Future) -> List[CodeSnippet]: + assert all(len(transfers[0].dims) == len(rect.dims) for rect in transfers), \ + "Currently supporting only rectangles of same rank" - return memoryLevel in finalMemoryLevels + assert len(transfers[0].dims) > 0, "Expecting transfers of rank greater than 0" - def _hoistTileIdxPtr(self, - ctxt: NetworkContext, - operatorRepresentation: OperatorRepresentation, - sourceMemoryLevel: str = "L2") -> str: + assert len(transfers[0].dims) == len(externalBuffer.shape), \ + "External buffer's rank should be equal to the internal buffer's" - newPtrName = self.prefix + operatorRepresentation['nodeName'] + "_tileIdxPtr" + anydimAdapter = AnydimAsyncDmaTransferAdapter(self.dma) - tilePtrBuffer = ctxt.VariableBuffer(newPtrName, shape = [1]) - ctxt.add(tilePtrBuffer, "local") + initSnippets = anydimAdapter.transfer(ctxt, externalBuffer, localBuffer, transfers[0].dims, + stridesFromShape(externalBuffer.shape), + stridesFromShape(transfers[0].dims), direction, future, + math.prod(externalBuffer.shape)) - _type = ctxt.lookup(self.prefix + operatorRepresentation['nodeName'] + "_numTiles")._type + templates = [snippet.template for snippet in initSnippets] + opReprUpdates = [[] for _ in range(len(initSnippets))] - tilePtrBuffer._type = _type - tilePtrBuffer._instance = tilePtrBuffer._type(newPtrName, ctxt) - tilePtrBuffer._memoryLevel = sourceMemoryLevel + for rect in transfers: + snippets = anydimAdapter.transfer(ctxt, externalBuffer, localBuffer, rect.dims, + stridesFromShape(externalBuffer.shape), stridesFromShape(rect.dims), + direction, future, math.prod(externalBuffer.shape)) + for i, snippet in enumerate(snippets): + opReprUpdates[i].append(snippet.operatorRepresentation) - tilePtrBuffer.allocTemplate = NodeTemplate("") - tilePtrBuffer.deallocTemplate = NodeTemplate("") - tilePtrBuffer.initTemplate = NodeTemplate(""" - ${type.referencedType.typeName} bu_${name} = 0; - ${type.referencedType.typeName}* ${name} = &bu_${name};""") - - return newPtrName + tiledSnippets: List[CodeSnippet] = [ + CodeSnippet(*self._tileTemplate(ctxt, opReprUpdate, template, tileIdxVar, f"{tensorName}_")) + for template, opReprUpdate in zip(templates, opReprUpdates) + ] - def _hoistNumTiles(self, - ctxt: NetworkContext, - nodeName: str, - tilingSchedules: List[TilingSchedule], - sourceMemoryLevel: str = "L2") -> str: + return tiledSnippets - newPtrName = self.prefix + nodeName + "_numTiles" + def _generateExternalReferenceUpdate(self, ctxt: NetworkContext, tensorName: str, transfers: List[HyperRectangle], + tileIdxVar: str, externalBuffer: VariableBuffer) -> Optional[CodeSnippet]: + externalBufferStrides = stridesFromShape(externalBuffer.shape) + offsets = [calculateFlatOffset(rect.offset, externalBufferStrides) for rect in transfers] + relativeOffsets = [_next - _prev for _prev, _next in zip(offsets[:-1], offsets[1:])] - numTiles = [len(tilingSchedule.outputLoadSchedule) for tilingSchedule in tilingSchedules] - cumNumTiles = [0] - for idx in list(range(len(numTiles))): - cumNumTiles.append(cumNumTiles[-1] + numTiles[idx]) + if len(relativeOffsets) == 0 or all(offset == 0 for offset in relativeOffsets): + return None - cb = ctxt.ConstantBuffer(newPtrName, [len(cumNumTiles)], values = cumNumTiles) - ctxt.add(cb, "global") + operatorRepresentation: OperatorRepresentation = {"reference": externalBuffer.name, "tileIdxVar": tileIdxVar} - minType = None - if BasicDataTypes.uint8_t.checkValue(cumNumTiles): - minType = BasicDataTypes.uint8_t - elif BasicDataTypes.uint16_t.checkValue(cumNumTiles): - minType = BasicDataTypes.uint16_t + if all(relativeOffsets[0] == offset for offset in relativeOffsets): + operatorRepresentation["relativeOffset"] = relativeOffsets[0] + template = self._relativeOffsetReferenceUpdateTemplate else: - minType = BasicDataTypes.uint32_t - - cb._type = PointerClass(minType) - cb._instance = cb._type(newPtrName, ctxt) - cb._memoryLevel = sourceMemoryLevel - - return newPtrName - - def _hoistConstantAndReference(self, - ctxt: NetworkContext, - constBuf: ConstantBuffer, - operatorRepresentation: OperatorRepresentation, - nodeName: str, - operatorRepresentationName: str, - immediateType: Optional[Type[Immediate]] = None) -> Tuple[NetworkContext, Dict]: - - if immediateType is None: - _type = PointerClass(BasicDataTypes.int32_t) + relativeOffsets.append(0) # To have the same length as the number of tiles + buffer = self._hoistValues(ctxt, f'{tensorName}_relativeOffset', relativeOffsets) + operatorRepresentation["relativeOffset"] = buffer.name + operatorRepresentation["tileIdxVar"] = tileIdxVar + template = self._relativeOffsetReferenceUpdateTiledTemplate + + return CodeSnippet(template, operatorRepresentation) + + # TODO: Not super sure this should go here. It could be shared, but it seems a little bit too specific + # with the `isFinalMemory` thing. + def _legalizeTransfers(self, transfers: List[HyperRectangle], outerShape: Tuple[int, ...], typeWidth: int, + isFinalMemoryLevel: bool) -> Tuple[List[HyperRectangle], Tuple[int, ...]]: + transfersCommonRank = max(len(rect.dims) for rect in transfers) + commonRank = max(transfersCommonRank, len(outerShape)) + outerShape = padShape(outerShape, commonRank) + + minOuterShape = None + + if isFinalMemoryLevel: + minimizedTransfers = [] + for rect in transfers: + paddedRect = HyperRectangle(padOffset(rect.offset, commonRank), padShape(rect.dims, commonRank)) + minRect, newMinOuterShape = minimizeRectangle(paddedRect, outerShape) + if minOuterShape is None: + minOuterShape = newMinOuterShape + else: + if minOuterShape != newMinOuterShape: + rectStr = "\n".join(str(trans) for trans in transfers[:transfers.index(rect)]) + raise RuntimeError(f"""Currently support a single minimal outer shape. +Old minOuterShape: {minOuterShape} vs. new minOuterShape {newMinOuterShape}. +New minOuterShape produced by outerDims: {outerShape} and rect: {rect}. +Old minOuterShape produced by outerDims: {outerShape} and rects: +{rectStr}""") + minimizedTransfers.append(minRect) else: - _type = PointerClass(immediateType) + minimizedTransfers = [HyperRectangle((0,), (int(np.prod(rect.dims)),)) for rect in transfers] + minOuterShape = (int(np.prod(outerShape)),) + + if minOuterShape is not None: + outerShape = minOuterShape + transfers = minimizedTransfers - name = constBuf.name + def sizeInBytes(length: int, typeWidth: int) -> int: + return int(np.ceil((length * typeWidth) / 8)) - ctxt.add(constBuf, "global") - constBuf._type = _type - constBuf._instance = constBuf._type(name, ctxt) - constBuf._users = [nodeName] - constBuf._memoryLevel = self.targetMemLevel + outerShape = outerShape[:-1] + (sizeInBytes(outerShape[-1], typeWidth),) - refName = name + "_ref" - reference = ctxt.hoistReference(name, refName) - ctxt.lookup(reference)._memoryLevel = self.targetMemLevel + inBytesTransfers = [] + for rect in transfers: + newOffset = rect.offset[:-1] + (sizeInBytes(rect.offset[-1], typeWidth),) + newDims = rect.dims[:-1] + (sizeInBytes(rect.dims[-1], typeWidth),) + inBytesTransfers.append(HyperRectangle(newOffset, newDims)) + transfers = inBytesTransfers - operatorRepresentation[operatorRepresentationName] = refName + return transfers, outerShape - return ctxt, operatorRepresentation + def _tileTemplate(self, ctxt: NetworkContext, perTileOpReprs: List[OperatorRepresentation], template: NodeTemplate, + tileIdxVar: str, prefix: str) -> Tuple[NodeTemplate, OperatorRepresentation]: + opRepr, hoistedNames = self._hoistOpReprUpdates(ctxt, perTileOpReprs, prefix) + if len(hoistedNames) > 0: + template = copy.deepcopy(template) + self.indexVars(template.template, hoistedNames, "tileIdxVar") + opRepr["tileIdxVar"] = tileIdxVar + return template, opRepr def apply(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, name: str, verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]: - - def unravelReference(ctxt: NetworkContext, name: str) -> str: - - if name not in ctxt.localObjects.keys() and name not in ctxt.globalObjects.keys(): - return name - - refBuffer = ctxt.lookup(name) - if not hasattr(refBuffer, "_referenceName"): - return name - - return unravelReference(ctxt, refBuffer._referenceName) - if isinstance(executionBlock, ClosureExecutionBlock): baseExecutionBlock = executionBlock.baseBlock else: @@ -190,25 +250,24 @@ def unravelReference(ctxt: NetworkContext, name: str) -> str: templateNode = possibleTemplateNodes[0] - operatorRepresentation = templateNode.operatorRepresentation - unravelRep = operatorRepresentation.copy() - for key in unravelRep.keys(): - - val = unravelRep[key] - if not isinstance(val, str): - continue - - unravelRep[key] = unravelReference(ctxt, val) + self._initPrefix(templateNode.operatorRepresentation['nodeName']) + operatorRepresentation = templateNode.operatorRepresentation template = templateNode.template + unraveledOpRepr = operatorRepresentation.copy() + for key, value in unraveledOpRepr.items(): + if ctxt.is_buffer(value): + buffer = ctxt.lookup(value) + assert isinstance(buffer, VariableBuffer) + unraveledOpRepr[key] = ctxt.unravelReference(buffer).name + variableReplacement, tilingSchedules = template.tileConstraint.wrapTilingSolution( - nodeMemoryConstraint, self.targetMemLevel, ctxt, unravelRep) + nodeMemoryConstraint, self.localMemory, ctxt, unraveledOpRepr) + + minimalVariableReplacement, newOpRepr = minimizeVariableReplacement(variableReplacement, operatorRepresentation) - minimalVariableReplacement, newNodeRep = minimizeVariableReplacement(variableReplacement, - templateNode.operatorRepresentation) - for key, value in newNodeRep.items(): - templateNode.operatorRepresentation[key] = value + operatorRepresentation.update(newOpRepr) ctxt, executionBlock, applicable = self.generateTilingLoop(ctxt, executionBlock, nodeMemoryConstraint, tilingSchedules, minimalVariableReplacement, @@ -216,4 +275,6 @@ def unravelReference(ctxt: NetworkContext, name: str) -> str: if applicable: ctxt, executionBlock = self.argStructGeneration.apply(ctxt, executionBlock, name) + self._deinitPrefix() + return ctxt, executionBlock diff --git a/Deeploy/TilingExtension/CodeTransformationPasses/TilingHoistingMixIn.py b/Deeploy/TilingExtension/CodeTransformationPasses/TilingHoistingMixIn.py new file mode 100644 index 0000000000..4aa0f03de9 --- /dev/null +++ b/Deeploy/TilingExtension/CodeTransformationPasses/TilingHoistingMixIn.py @@ -0,0 +1,149 @@ +import math +from typing import List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union + +import Deeploy.CommonExtensions.DataTypes as BasicDataTypes +from Deeploy.AbstractDataTypes import BaseType, PointerClass, VoidType +from Deeploy.DeeployTypes import ConstantBuffer, NetworkContext, NodeTemplate, OperatorRepresentation, VariableBuffer, \ + _ReferenceBuffer +from Deeploy.TilingExtension.MemoryConstraints import TensorMemoryConstraint +from Deeploy.TilingExtension.TilingCodegen import TilingSchedule + +KT = TypeVar('KT') +VT = TypeVar('VT') + + +def dictOfArrays(arrayOfDicts: Sequence[Mapping[KT, VT]]) -> Mapping[KT, List[VT]]: + ret: Mapping[KT, List[VT]] = {} + for i, _dict in enumerate(arrayOfDicts): + if i == 0: + ret.update({key: [value] for key, value in _dict.items()}) + else: + assert set(ret.keys()) == set(_dict.keys()), "Keys should be the same" + for key, value in _dict.items(): + ret[key].append(value) + return ret + + +class TilingHoistingMixIn: + + _DEFAULT_HOIST_PREFIX = "TILING_CODEGEN_" + + def __init__(self, memory: str) -> None: + self.memory = memory + self._prefix = None + + def _initPrefix(self, nodeName: str) -> None: + self._prefix = f"{self._DEFAULT_HOIST_PREFIX}{self.memory}_{nodeName}_" + + def _deinitPrefix(self) -> None: + self._prefix = None + + @property + def prefix(self) -> str: + assert self._prefix is not None, "Prefix is not initialized!" + return self._prefix + + def _hoistValues(self, + ctxt: NetworkContext, + name: str, + values: List[int], + override_type: Optional[Type[BaseType]] = None) -> ConstantBuffer: + assert all(isinstance(value, int) for value in values) + cb = ctxt.ConstantBuffer(self.prefix + name, [len(values)], values) + ctxt.add(cb, 'global') + if override_type is not None: + cb._type = PointerClass(override_type) + else: + cb._type = PointerClass(BasicDataTypes.minimalIntegerType(values)) + cb._instance = cb._type(cb.name, ctxt) + cb._memoryLevel = self.memory + return cb + + def _hoistReference(self, + ctxt: NetworkContext, + name: str, + reference: VariableBuffer, + shape: Tuple[int, ...] = (1,), + offset: Union[int, str, VariableBuffer] = 0, + override_type: Optional[Type[BaseType]] = None) -> _ReferenceBuffer: + ref = ctxt.hoistReference(self.prefix + name, reference, shape, offset, override_type) + ref._memoryLevel = self.memory + return ref + + def _hoistTileNumAndIdxPtr(self, ctxt: NetworkContext, + tilingSchedules: List[TilingSchedule]) -> Tuple[ConstantBuffer, VariableBuffer]: + stepsNumTiles = [len(tilingSchedule.outputLoadSchedule) for tilingSchedule in tilingSchedules] + + cumulativeNumTiles = [0] + for numTiles in stepsNumTiles: + cumulativeNumTiles.append(cumulativeNumTiles[-1] + numTiles) + + tileNum = self._hoistValues(ctxt, "numTiles", cumulativeNumTiles) + + tileIdxPtr = ctxt.VariableBuffer(f"{self.prefix}tileIdxPtr", shape = [1]) + ctxt.add(tileIdxPtr, "local") + + tileIdxPtr._type = tileNum._type + tileIdxPtr._instance = tileIdxPtr._type(tileIdxPtr.name, ctxt) + # LMACAN: Intentionally don't annotate memory level so it gets allocated + # outside of the tiling loops + + tileIdxPtr.allocTemplate = NodeTemplate("") + tileIdxPtr.deallocTemplate = NodeTemplate("") + tileIdxPtr.initTemplate = NodeTemplate(""" + ${type.referencedType.typeName} bu_${name} = 0; + ${type.referencedType.typeName}* ${name} = &bu_${name};""") + + return (tileNum, tileIdxPtr) + + def _hoistOpReprUpdates(self, + ctxt: NetworkContext, + opReprs: List[OperatorRepresentation], + prefix: str = "") -> Tuple[OperatorRepresentation, List[str]]: + # Early exit if the opReprs list is empty because the following code assumes at least 1 opRepr is in the list + if len(opReprs) == 0: + return {}, [] + + newOpRepr = {} + hoistedReprNames = [] + for var, updates in dictOfArrays(opReprs).items(): + if all(update == updates[0] for update in updates): + newOpRepr[var] = updates[0] + else: + cb = self._hoistValues(ctxt, f"{prefix}{var}", updates) + newOpRepr[var] = cb.name + hoistedReprNames.append(var) + return newOpRepr, hoistedReprNames + + def _hoistMultibufferReferences(self, ctxt: NetworkContext, buffer: VariableBuffer, + tensorMemoryConstraint: TensorMemoryConstraint) -> List[_ReferenceBuffer]: + tensorName = tensorMemoryConstraint.tensorName + memoryConstraint = tensorMemoryConstraint.memoryConstraints[self.memory] + assert memoryConstraint.addrSpace is not None, "Assuming address space is set" + totalSize = memoryConstraint.addrSpace[1] - memoryConstraint.addrSpace[0] + assert isinstance(memoryConstraint.multiBufferCoefficient, + int), "Assuming multi buffer coefficient has been assigned" + assert totalSize % memoryConstraint.multiBufferCoefficient == 0, "Assuming total size is divisible by the multi buffer coefficient" + bufferSize = totalSize // memoryConstraint.multiBufferCoefficient + + assert memoryConstraint.multiBufferCoefficient == 2, "Multi buffer coefficient has to be equal to 2 since this is for double buffering" + assert memoryConstraint.shape is not None + assert len(memoryConstraint.shape) > 0 + assert isinstance(memoryConstraint.shape[0], int) + tileLength = math.prod(memoryConstraint.shape) + tileSize = int(math.ceil(tileLength * buffer._type.referencedType.typeWidth / 8)) + + assert bufferSize >= tileSize, f"Provided buffer size is not enough to fit the tile. Buffer size: {bufferSize}, tile size: {tileSize}" + + refs = [ + self._hoistReference( + ctxt, + f"{tensorName}_buffer_{i}", + buffer, + memoryConstraint.shape, + offset = i * bufferSize, + override_type = VoidType, + ) for i in range(memoryConstraint.multiBufferCoefficient) + ] + + return refs diff --git a/Deeploy/TilingExtension/CodeTransformationPasses/TilingPrototypes.py b/Deeploy/TilingExtension/CodeTransformationPasses/TilingPrototypes.py index 6b4b297da3..b09192c5b7 100644 --- a/Deeploy/TilingExtension/CodeTransformationPasses/TilingPrototypes.py +++ b/Deeploy/TilingExtension/CodeTransformationPasses/TilingPrototypes.py @@ -36,7 +36,9 @@ class TilingMetaInfo: nodeName: str nodeOps: int - numTiles: int + numTiles: str + totalNumTiles: int + tileIdxPtr: str tileIdxVar: str kernelLevelTiling: bool @@ -44,16 +46,15 @@ class TilingMetaInfo: _CodeSegmentType = List[CodeSnippet] _measureCycles = NodeTemplate(""" -${nodeName}_${measurementName}_measurements[${tileIdx}] = getCycles(); +${measurements}[${tileIdxVar}] = getCycles(); """) _measurementArrayDeclaration = NodeTemplate(""" -uint32_t ${nodeName}_${measurementName}_measurements[${numTiles}]; +uint32_t ${measurements}[${totalNumTiles}]; """) -_printPrefixAndSufixDeclaration = NodeTemplate(""" -const static char ${nodeName}_prefix[] = "[${nodeName}][${buffering}][${nodeOps} ops][Tile "; -const static char ${nodeName}_suffix[] = " cycles \\n"; +_stringDeclaration = NodeTemplate(""" +const static char ${name}[] = "${string}"; """) _measureConditionSetup = NodeTemplate(""" @@ -66,17 +67,12 @@ class TilingMetaInfo: _printLoopSetup = NodeTemplate(""" StopTimer(); -<% -current_level_num = nodeName[-1] -lower_level_num = str(int(current_level_num) - 1) -%> -for (int printLoopIdx = DeeployNetwork_TILING_REPLACED_L${lower_level_num}_${nodeName[:-3]}_numTiles[*DeeployNetwork_TILING_REPLACED_L${lower_level_num}_${nodeName[:-3]}_tileIdxPtr -1]; - printLoopIdx < DeeployNetwork_TILING_REPLACED_L${lower_level_num}_${nodeName[:-3]}_numTiles[*DeeployNetwork_TILING_REPLACED_L${lower_level_num}_${nodeName[:-3]}_tileIdxPtr]; - printLoopIdx++){ +for (int ${profileIdxVar} = ${numTiles}[*${tileIdxPtr} -1]; ${profileIdxVar} < ${numTiles}[*${tileIdxPtr}]; ${profileIdxVar}++){ """) + _printCycleDifference = NodeTemplate(r""" -printf("%s%u] %s%u%s", ${nodeName}_prefix,${tileIdx},"${flavorStr}", \ -${nodeName}_${endMeasurementName}_measurements[${tileIdx}] - ${nodeName}_${startMeasurementName}_measurements[${tileIdx}],${nodeName}_suffix); +printf("%s%u] %s%u%s", ${prefixStr}, ${profileIdxVar}, "${flavorStr}", \ +${measurementsEnd}[${profileIdxVar}] - ${measurementsStart}[${profileIdxVar}], ${suffixStr}); """) _printLoopTeardown = NodeTemplate(""" @@ -156,26 +152,30 @@ def measurementArrayDeclaration(cls, executionBlock: ExecutionBlock, metaInfo: T nodeName = metaInfo.nodeName numTiles = metaInfo.numTiles + totalNumTiles = metaInfo.totalNumTiles nodeOps = metaInfo.nodeOps - measurementNameList = [ + measurementsList = [ "ingress_dma_wait_start", "ingress_dma_wait_end", "egress_dma_wait_start", "egress_dma_wait_end" ] if metaInfo.kernelLevelTiling: - measurementNameList = ["kernel_start", "kernel_end"] + measurementNameList + measurementsList = ["kernel_start", "kernel_end"] + measurementsList - for measurementName in measurementNameList: + for measurements in measurementsList: executionBlock.addLeft(_measurementArrayDeclaration, { - "nodeName": nodeName, - "measurementName": measurementName, - "numTiles": numTiles + "measurements": f"{nodeName}_{measurements}_measurements", + "totalNumTiles": totalNumTiles }) - executionBlock.addLeft(_printPrefixAndSufixDeclaration, { - "nodeName": nodeName, - "nodeOps": nodeOps, - "buffering": bufferingStr + executionBlock.addLeft(_stringDeclaration, { + "name": f"{nodeName}_prefix", + "string": f"[{nodeName}][{bufferingStr}][{nodeOps} ops][Tile ", + }) + + executionBlock.addLeft(_stringDeclaration, { + "name": f"{nodeName}_suffix", + "string": " cycles \\n", }) return executionBlock @@ -185,33 +185,46 @@ def injectPrintCycleDiff(cls, executionBlock: ExecutionBlock, metaInfo: TilingMe numTiles = metaInfo.numTiles nodeName = metaInfo.nodeName + tileIdxPtr = metaInfo.tileIdxPtr + totalNumTiles = metaInfo.totalNumTiles + profileIdxVar = "PROFILING_I" - executionBlock.addRight(_printLoopSetup, {"numTiles": numTiles, "nodeName": nodeName}) + executionBlock.addRight(_printLoopSetup, { + "numTiles": numTiles, + "nodeName": nodeName, + "profileIdxVar": profileIdxVar, + "tileIdxPtr": tileIdxPtr, + }) executionBlock.addRight( _printCycleDifference, { - "nodeName": nodeName, + "prefixStr": f"{nodeName}_prefix", + "suffixStr": f"{nodeName}_suffix", "flavorStr": "Input DMA took ", - "startMeasurementName": "ingress_dma_wait_start", - "endMeasurementName": "ingress_dma_wait_end", - "tileIdx": "printLoopIdx" + "measurementsStart": f"{nodeName}_ingress_dma_wait_start_measurements", + "measurementsEnd": f"{nodeName}_ingress_dma_wait_end_measurements", + "profileIdxVar": profileIdxVar, }) + if metaInfo.kernelLevelTiling: executionBlock.addRight( _printCycleDifference, { - "nodeName": nodeName, + "prefixStr": f"{nodeName}_prefix", + "suffixStr": f"{nodeName}_suffix", "flavorStr": "Kernel took ", - "startMeasurementName": "kernel_start", - "endMeasurementName": "kernel_end", - "tileIdx": "printLoopIdx" + "measurementsStart": f"{nodeName}_kernel_start_measurements", + "measurementsEnd": f"{nodeName}_kernel_end_measurements", + "profileIdxVar": profileIdxVar, }) + executionBlock.addRight( _printCycleDifference, { - "nodeName": nodeName, + "prefixStr": f"{nodeName}_prefix", + "suffixStr": f"{nodeName}_suffix", "flavorStr": "Output DMA took ", - "startMeasurementName": "egress_dma_wait_start", - "endMeasurementName": "egress_dma_wait_end", - "tileIdx": "printLoopIdx" + "measurementsStart": f"{nodeName}_egress_dma_wait_start_measurements", + "measurementsEnd": f"{nodeName}_egress_dma_wait_end_measurements", + "profileIdxVar": profileIdxVar, }) executionBlock.addRight(_printLoopTeardown, {}) @@ -225,14 +238,12 @@ def kernelProfilingWrap(cls, executionBlock: ExecutionBlock, metaInfo: TilingMet if metaInfo.kernelLevelTiling: executionBlock.addLeft(_measureCycles, { - "nodeName": nodeName, - "measurementName": "kernel_start", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_kernel_start_measurements", + "tileIdxVar": tileIdxVar }) executionBlock.addRight(_measureCycles, { - "nodeName": nodeName, - "measurementName": "kernel_end", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_kernel_end_measurements", + "tileIdxVar": tileIdxVar }) return executionBlock @@ -300,31 +311,27 @@ def generateInnerCode(cls, executionBlock: ExecutionBlock, metaInfo: TilingMetaI _ingressDMAWaitStatements = [] _ingressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "ingress_dma_wait_start", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_ingress_dma_wait_start_measurements", + "tileIdxVar": tileIdxVar })) _ingressDMAWaitStatements += ingressDMAWaitStatements _ingressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "ingress_dma_wait_end", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_ingress_dma_wait_end_measurements", + "tileIdxVar": tileIdxVar })) _egressDMAWaitStatements = [] _egressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "egress_dma_wait_start", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_egress_dma_wait_start_measurements", + "tileIdxVar": tileIdxVar })) _egressDMAWaitStatements += egressDMAWaitStatements _egressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "egress_dma_wait_end", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_egress_dma_wait_end_measurements", + "tileIdxVar": tileIdxVar })) executionBlock = super().generateInnerCode(executionBlock, metaInfo, ingressDMATransferCalls, @@ -374,27 +381,24 @@ def generateSetupAndTeardownCode(cls, executionBlock: ExecutionBlock, metaInfo: teardownStatements: _CodeSegmentType) -> ExecutionBlock: nodeName = metaInfo.nodeName - numTiles = metaInfo.numTiles + totalNumTiles = metaInfo.totalNumTiles executionBlock.addLeft(_measureCycles, { - "nodeName": nodeName, - "measurementName": "ingress_dma_wait_start", - "tileIdx": 0 + "measurements": f"{nodeName}_ingress_dma_wait_start_measurements", + "tileIdxVar": 0 }) executionBlock = cls.measurementArrayDeclaration(executionBlock, metaInfo, bufferingStr = "DB") executionBlock.addRight(_measureCycles, { - "nodeName": nodeName, - "measurementName": "egress_dma_wait_start", - "tileIdx": numTiles - 1 + "measurements": f"{nodeName}_egress_dma_wait_start_measurements", + "tileIdxVar": totalNumTiles - 1 }) executionBlock = super().generateSetupAndTeardownCode(executionBlock, metaInfo, setupStatements, teardownStatements) executionBlock.addRight(_measureCycles, { - "nodeName": nodeName, - "measurementName": "egress_dma_wait_end", - "tileIdx": numTiles - 1 + "measurements": f"{nodeName}_egress_dma_wait_end_measurements", + "tileIdxVar": totalNumTiles - 1 }) executionBlock = cls.injectPrintCycleDiff(executionBlock, metaInfo) @@ -417,33 +421,29 @@ def generateInnerCode(cls, executionBlock: ExecutionBlock, metaInfo: TilingMetaI _ingressDMAWaitStatements.append(CodeSnippet(_measureConditionSetup, {"cond": f"{tileIdxVar} > 0"})) _ingressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "ingress_dma_wait_start", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_ingress_dma_wait_start_measurements", + "tileIdxVar": tileIdxVar })) _ingressDMAWaitStatements.append(CodeSnippet(_measureConditionEnd, {})) _ingressDMAWaitStatements += ingressDMAWaitStatements _ingressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "ingress_dma_wait_end", - "tileIdx": tileIdxVar + "measurements": f"{nodeName}_ingress_dma_wait_end_measurements", + "tileIdxVar": tileIdxVar })) _egressDMAWaitStatements = [] _egressDMAWaitStatements.append(CodeSnippet(_measureConditionSetup, {"cond": f"{tileIdxVar} > 0"})) _egressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "egress_dma_wait_start", - "tileIdx": f"{tileIdxVar} - 1" + "measurements": f"{nodeName}_egress_dma_wait_start_measurements", + "tileIdxVar": f"{tileIdxVar} - 1" })) _egressDMAWaitStatements += egressDMAWaitStatements _egressDMAWaitStatements.append( CodeSnippet(_measureCycles, { - "nodeName": nodeName, - "measurementName": "egress_dma_wait_end", - "tileIdx": f"{tileIdxVar} - 1" + "measurements": f"{nodeName}_egress_dma_wait_end_measurements", + "tileIdxVar": f"{tileIdxVar} - 1" })) _egressDMAWaitStatements.append(CodeSnippet(_measureConditionEnd, {})) diff --git a/Deeploy/TilingExtension/CodeTransformationPasses/TilingVariableReplacement.py b/Deeploy/TilingExtension/CodeTransformationPasses/TilingVariableReplacement.py index fd910cc16b..3810260b75 100644 --- a/Deeploy/TilingExtension/CodeTransformationPasses/TilingVariableReplacement.py +++ b/Deeploy/TilingExtension/CodeTransformationPasses/TilingVariableReplacement.py @@ -24,192 +24,207 @@ # limitations under the License. import copy -from typing import Dict, List, Tuple, Type +import itertools +from typing import List, Tuple -from mako.parsetree import Expression, Node, Text - -from Deeploy.AbstractDataTypes import Pointer +from Deeploy.AbstractDataTypes import Struct from Deeploy.CommonExtensions.CodeTransformationPasses.Closure import ClosureExecutionBlock from Deeploy.CommonExtensions.CodeTransformationPasses.IntrospectiveCodeTransformation import \ IntrospectiveCodeTransformationMixIn from Deeploy.DeeployTypes import CodeGenVerbosity, CodeSnippet, CodeTransformationPass, ExecutionBlock, \ - NetworkContext, NodeTemplate, OperatorRepresentation, TransientBuffer, _NoVerbosity + NetworkContext, NodeTemplate, OperatorRepresentation, TransientBuffer, VariableBuffer, _NoVerbosity, \ + _ReferenceBuffer +from Deeploy.TilingExtension.CodeTransformationPasses.TilingHoistingMixIn import TilingHoistingMixIn from Deeploy.TilingExtension.MemoryConstraints import NodeMemoryConstraint +from Deeploy.TilingExtension.TilerExtension import Tiler from Deeploy.TilingExtension.TilingCodegen import TilingSchedule, VariableReplacementScheme, minimizeVariableReplacement -class TilingVariableReplacement(CodeTransformationPass, IntrospectiveCodeTransformationMixIn): - - _prefix = "TILING_REPLACED_" +class TilingVariableReplacement(CodeTransformationPass, IntrospectiveCodeTransformationMixIn, TilingHoistingMixIn): def __init__(self, targetMemLevel: str): self.targetMemLevel = targetMemLevel - self._name: str + TilingHoistingMixIn.__init__(self, targetMemLevel) @property - def prefix(self): - return self._prefix + f"{self._name}_" + self.targetMemLevel + "_" + def arenaName(self): + return f"{Tiler.arenaName}_{self.targetMemLevel}" - def _dereferencePointer(self, nodes: List[Node], name: str) -> List[Node]: - instanceIdxs = [idx for idx, node in enumerate(nodes) if isinstance(node, Expression) and node.text == name] + def _arenaAllocate(self, ctxt: NetworkContext, buffer: VariableBuffer, offset: int) -> VariableBuffer: + arena = ctxt.lookup(self.arenaName) + buffer.allocTemplate = NodeTemplate(" \ + ${type.typeName} ${name} = (${type.typeName}) " + f"((char*){str(arena._instance)} + {offset});") + buffer.deallocTemplate = NodeTemplate("") + return buffer - for offset, idx in enumerate(instanceIdxs): - text = Text("*", source = "*", lineno = 0, pos = 0, filename = None) - nodes.insert(offset + idx, text) - - return nodes + def _replaceTransients(self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, + nodeMemoryConstraint: NodeMemoryConstraint) -> NetworkContext: + for value in operatorRepresentation.values(): + if not (isinstance(value, str) and ctxt.is_local(value)): + continue - def _replaceImmediate(self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - variableReplacement: Tuple[str, - List], dataType: Type[Pointer]) -> Tuple[NetworkContext, Dict]: + buffer = ctxt.lookup(value) - varName = variableReplacement[0] - varVal = variableReplacement[1] + if not (isinstance(buffer, TransientBuffer) and buffer._memoryLevel == self.targetMemLevel): + continue - newConstName = self.prefix + varName - newRefName = self.prefix + "ref_" + varName + memoryConstraints = nodeMemoryConstraint.tensorMemoryConstraints[buffer.name].memoryConstraints + assert len(memoryConstraints) == 1, f"Tiled transient buffer {buffer.name} has more than one memory level!" + constraint = next(iter(memoryConstraints.values())) + assert constraint.addrSpace is not None, f"Address space of {constraint} cannot be None!" + offset = constraint.addrSpace[0] + self._arenaAllocate(ctxt, buffer, offset) - cb = ctxt.ConstantBuffer(newConstName, shape = (len(varVal),), values = varVal) - ctxt.add(cb, "global") + return ctxt - cb._type = dataType - cb._instance = dataType(newConstName, ctxt) - cb._memoryLevel = self.targetMemLevel + def _replaceVariableReplacements(self, ctxt: NetworkContext, snippet: CodeSnippet, + variableReplacement: VariableReplacementScheme) -> NetworkContext: + operatorRepresentation = snippet.operatorRepresentation + template = snippet.template - reference = ctxt.hoistReference(newConstName, newRefName) - ctxt.lookup(reference)._memoryLevel = self.targetMemLevel + replacedVars = [] - operatorRepresentation[varName] = reference + for name, values in variableReplacement.perTileReplacements.items(): + # Case where we have already replaced the variable + if isinstance(operatorRepresentation[name], str): + continue + _type = variableReplacement.replacementTypes[name] + # LMACAN: Hoist values expects integers (should be the only thing we deal with for now...) + intValues = [int(v) for v in values] + assert all(intV == v for intV, v in zip(intValues, values)), f"Received non-int values" + buff = self._hoistValues(ctxt, name, intValues, _type.referencedType) + ref = self._hoistReference(ctxt, name + "_ref", buff) + operatorRepresentation[name] = ref.name + replacedVars.append(name) - return ctxt, operatorRepresentation + self.dereferenceVars(template.template, replacedVars) - def _hoistTileReference(self, ctxt: NetworkContext, reference: str, name: str, offset: int) -> NetworkContext: + return ctxt - refName = ctxt.hoistReference(reference, name) - refBuf = ctxt.lookup(refName) + def _replaceTiledTensors(self, ctxt: NetworkContext, snippet: CodeSnippet, + tilingSchedule: TilingSchedule) -> NetworkContext: + operatorRepresentation = snippet.operatorRepresentation - staticBuf = ctxt.lookup(f"MEMORYARENA_{self.targetMemLevel}") + for name, offsets in itertools.chain(tilingSchedule.inputBaseOffsets.items(), + tilingSchedule.outputBaseOffsets.items()): + buffer = ctxt.lookup(operatorRepresentation[name]) + assert isinstance(buffer, VariableBuffer) + unraveledBuffer = ctxt.unravelReference(buffer) - refBuf.allocTemplate = NodeTemplate(" \ - ${type.typeName} ${name} = (${type.typeName}) " + f"((char*){str(staticBuf._instance)} + {offset});") - refBuf._memoryLevel = self.targetMemLevel + ref = self._hoistReference(ctxt, name + "_ref", unraveledBuffer) + ref = self._arenaAllocate(ctxt, ref, offsets[0]) + operatorRepresentation[name] = ref.name return ctxt - def _replaceReferences(self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - tilingSchedule: TilingSchedule, name: str) -> Tuple[NetworkContext, Dict]: + def apply(self, + ctxt: NetworkContext, + executionBlock: ExecutionBlock, + name: str, + verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]: + self._initPrefix(name) - def unravelOldRef(refName): - oldBuf = ctxt.lookup(refName) - if hasattr(oldBuf, "_referenceName"): - return unravelOldRef(oldBuf._referenceName) - return oldBuf.name + if isinstance(executionBlock, ClosureExecutionBlock): + baseExecutionBlock = executionBlock.baseBlock + else: + baseExecutionBlock = executionBlock - newRefName = self.prefix + "ref_" + name - oldRefName = operatorRepresentation[name] + patternMemoryConstraint = baseExecutionBlock.patternMemoryConstraint - if name in tilingSchedule.inputBaseOffsets: - offset = tilingSchedule.inputBaseOffsets[name] - elif name in tilingSchedule.outputBaseOffsets: - offset = tilingSchedule.outputBaseOffsets[name] - else: - raise RuntimeError(f"Name {name} not found in TilingSchedule {tilingSchedule}") + if patternMemoryConstraint is None: + return ctxt, executionBlock - unravelRef = unravelOldRef(oldRefName) + assert len(patternMemoryConstraint.nodeConstraints) == 1, "Only layerwise supported for now!" + #assert len(executionBlock.codeSnippets) == 1, "Only layerwise supported for now!" - ctxt = self._hoistTileReference(ctxt, unravelRef, newRefName, offset[0]) - operatorRepresentation[name] = newRefName + nodeMemoryConstraint = patternMemoryConstraint.nodeConstraints[0] - return ctxt, operatorRepresentation + possibleSnippets = [ + node for node in baseExecutionBlock.codeSnippets if hasattr(node.template, 'tileConstraint') + ] - def _replaceTransients(self, ctxt: NetworkContext, operatorRepresentation: OperatorRepresentation, - nodeMemoryConstraint: NodeMemoryConstraint, name: str) -> Tuple[NetworkContext, Dict]: + assert len(possibleSnippets) == 1, "More than one template node with TCF found" - memoryConstraints = nodeMemoryConstraint.tensorMemoryConstraints[operatorRepresentation[name]].memoryConstraints - assert len(memoryConstraints - ) == 1, f"Tiled transient buffer {operatorRepresentation[name]} has more than one memory level!" - key = list(memoryConstraints.keys())[0] - constraint = memoryConstraints[key] - assert constraint.addrSpace is not None, f"Address space of {constraint} cannot be None!" - offset = constraint.addrSpace[0] + snippet = possibleSnippets[0] + operatorRepresentation = snippet.operatorRepresentation + template = snippet.template - refBuf = ctxt.lookup(operatorRepresentation[name]) + unraveledOpRepr = { + key: ctxt.unravelReference(ctxt.lookup(value)).name if ctxt.is_buffer(value) else value + for key, value in operatorRepresentation.items() + } - if refBuf._memoryLevel != self.targetMemLevel: - return ctxt, operatorRepresentation + variableReplacement, tilingSchedules = template.tileConstraint.wrapTilingSolution( + nodeMemoryConstraint, self.targetMemLevel, ctxt, unraveledOpRepr) - staticBuf = ctxt.lookup(f"MEMORYARENA_{self.targetMemLevel}") + minimalVariableReplacement, newOpRepr = minimizeVariableReplacement(variableReplacement, operatorRepresentation) + operatorRepresentation.update(newOpRepr) - refBuf.allocTemplate = NodeTemplate(" \ - ${type.typeName} ${name} = (${type.typeName}) " + f"((char*){str(staticBuf._instance)} + {offset});") - refBuf.deallocTemplate = NodeTemplate("") - refBuf._memoryLevel = self.targetMemLevel + flatTilingSchedule = copy.copy(tilingSchedules[0]) + for tilingSchedule in tilingSchedules[1:]: + flatTilingSchedule += tilingSchedule - return ctxt, operatorRepresentation + ctxt = self._replaceVariableReplacements(ctxt, snippet, minimalVariableReplacement) + ctxt = self._replaceTiledTensors(ctxt, snippet, flatTilingSchedule) + ctxt = self._replaceTransients(ctxt, operatorRepresentation, nodeMemoryConstraint) - def _replaceTiledExpressions(self, ctxt: NetworkContext, templateNode: CodeSnippet, - variableReplacement: VariableReplacementScheme, tilingSchedule: TilingSchedule, - nodeMemoryConstraint: NodeMemoryConstraint) -> NetworkContext: + tilingReplacedRefMap = {} + for key in list(flatTilingSchedule.inputBaseOffsets.keys()) + list(flatTilingSchedule.outputBaseOffsets.keys()): + tilingReplacedRefMap[unraveledOpRepr[key]] = operatorRepresentation[key] - operatorRepresentation = templateNode.operatorRepresentation - template = templateNode.template + # Swap any original tensor occurances with the tiled targetMemLevel-local tensor + for codeSnippet in executionBlock.codeSnippets: + template, opRepr = codeSnippet.template, codeSnippet.operatorRepresentation - immediateList = [(key, value) - for key, value in variableReplacement.perTileReplacements.items() - if type(operatorRepresentation[key]) != str] + for key, value in opRepr.items(): + if isinstance(value, str) and value in tilingReplacedRefMap: + opRepr[key] = tilingReplacedRefMap[value] - inoutSchedule = {**tilingSchedule.inputBaseOffsets, **tilingSchedule.outputBaseOffsets} - variableList = [key for key, value in inoutSchedule.items() if type(operatorRepresentation[key]) == str] + if "closureStructArgs" in opRepr: + closureArgsStruct: Struct = opRepr['closureStructArgs'] + structDict = closureArgsStruct.value - transientBufferList = [] - for key, value in operatorRepresentation.items(): - if not isinstance(value, str): - continue - if (ctxt.is_local(value) and isinstance(ctxt.lookup(value), TransientBuffer)): - transientBufferList.append(key) + for key, value in structDict.items(): + if value.referenceName in tilingReplacedRefMap: + structDict[key] = type(value)(tilingReplacedRefMap[value.referenceName], ctxt) - parseTree = IntrospectiveCodeTransformationMixIn._generateParseTree(template) - newParseTree = copy.copy(parseTree) - nodes = parseTree.nodes + self._deinitPrefix() - newNodes = copy.copy(nodes) + return ctxt, executionBlock - for rep in immediateList: - ctxt, operatorRepresentation = self._replaceImmediate(ctxt, operatorRepresentation, rep, - variableReplacement.replacementTypes[rep[0]]) - newNodes = self._dereferencePointer(newNodes, rep[0]) - for rep in variableList: - ctxt, operatorRepresentation = self._replaceReferences(ctxt, operatorRepresentation, tilingSchedule, rep) +class TilingVariableReplacementUpdate(CodeTransformationPass, IntrospectiveCodeTransformationMixIn, + TilingHoistingMixIn): - for rep in transientBufferList: - ctxt, operatorRepresentation = self._replaceTransients(ctxt, operatorRepresentation, nodeMemoryConstraint, - rep) + _updateReferenceTemplate = NodeTemplate(""" + // UPDATE VARIABLE ${reference} + *${reference} = ${baseReference}[${tileIdxVar}]; + """) - newParseTree.nodes = newNodes - IntrospectiveCodeTransformationMixIn._reconstructCode(template, newParseTree) + def __init__(self, targetMemLevel: str, tileIdxVar: str = "TILING_I"): + super().__init__() + self.tileIdxVar = tileIdxVar + self.targetMemLevel = targetMemLevel - return ctxt + def _generateVariableUpdates(self, variableReplacement: VariableReplacementScheme, ctxt: NetworkContext, + operatorRepresentation: OperatorRepresentation) -> List[CodeSnippet]: + updates = [] + for key in variableReplacement.perTileReplacements.keys(): + ref = ctxt.lookup(operatorRepresentation[key]) + assert isinstance(ref, _ReferenceBuffer) + updates.append( + CodeSnippet(self._updateReferenceTemplate, { + "reference": ref.name, + "tileIdxVar": self.tileIdxVar, + "baseReference": ref._referenceName + })) + return updates def apply(self, ctxt: NetworkContext, executionBlock: ExecutionBlock, name: str, verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]: - - def unravelReference(ctxt: NetworkContext, name: str) -> str: - - if name not in ctxt.localObjects.keys() and name not in ctxt.globalObjects.keys(): - return name - - refBuffer = ctxt.lookup(name) - if not hasattr(refBuffer, "_referenceName"): - return name - - return unravelReference(ctxt, refBuffer._referenceName) - - self._name = name - if isinstance(executionBlock, ClosureExecutionBlock): baseExecutionBlock = executionBlock.baseBlock else: @@ -221,61 +236,33 @@ def unravelReference(ctxt: NetworkContext, name: str) -> str: return ctxt, executionBlock assert len(patternMemoryConstraint.nodeConstraints) == 1, "Only layerwise supported for now!" - #assert len(executionBlock.codeSnippets) == 1, "Only layerwise supported for now!" nodeMemoryConstraint = patternMemoryConstraint.nodeConstraints[0] - possibleTemplateNodes = [ + possibleSnippets = [ node for node in baseExecutionBlock.codeSnippets if hasattr(node.template, 'tileConstraint') ] - assert len(possibleTemplateNodes) == 1, "More than one template node with TCF found" - - templateNode = possibleTemplateNodes[0] - operatorRepresentation = templateNode.operatorRepresentation - - unravelRep = operatorRepresentation.copy() - for key in unravelRep.keys(): + assert len(possibleSnippets) == 1, "More than one template node with TCF found" - val = unravelRep[key] - if not isinstance(val, str): - continue - - unravelRep[key] = unravelReference(ctxt, val) + snippet = possibleSnippets[0] + operatorRepresentation = snippet.operatorRepresentation + template = snippet.template - template = templateNode.template - - variableReplacement, tilingSchedules = template.tileConstraint.wrapTilingSolution( - nodeMemoryConstraint, self.targetMemLevel, ctxt, unravelRep) + unraveledOpRepr = { + key: ctxt.unravelReference(ctxt.lookup(value)).name if ctxt.is_buffer(value) else value + for key, value in operatorRepresentation.items() + } - minimalVariableReplacement, newNodeRep = minimizeVariableReplacement(variableReplacement, - templateNode.operatorRepresentation) - for key, value in newNodeRep.items(): - templateNode.operatorRepresentation[key] = value + variableReplacement, _ = template.tileConstraint.wrapTilingSolution(nodeMemoryConstraint, self.targetMemLevel, + ctxt, unraveledOpRepr) - flatTilingSchedule = copy.copy(tilingSchedules[0]) - for tilingSchedule in tilingSchedules[1:]: - flatTilingSchedule += tilingSchedule + minimalVariableReplacement, newOpRepr = minimizeVariableReplacement(variableReplacement, operatorRepresentation) + operatorRepresentation.update(newOpRepr) - ctxt = self._replaceTiledExpressions(ctxt, templateNode, minimalVariableReplacement, flatTilingSchedule, - nodeMemoryConstraint) + updates = self._generateVariableUpdates(minimalVariableReplacement, ctxt, operatorRepresentation) - for codeSnippet in executionBlock.codeSnippets: + for snippet in updates: + executionBlock.addLeft(snippet.template, snippet.operatorRepresentation) - template, nRep = codeSnippet.template, codeSnippet.operatorRepresentation - - if not "closureStructArgs" in nRep: - continue - - keyList = {} - - for key in list(flatTilingSchedule.inputBaseOffsets.keys()) + list( - flatTilingSchedule.outputBaseOffsets.keys()): - keyList[unravelRep[key]] = operatorRepresentation[key] - - for key in copy.copy(nRep['closureStructArgs'].value).keys(): - if nRep['closureStructArgs'].value[key].referenceName in keyList.keys(): - nRep['closureStructArgs'].value[key] = type(nRep['closureStructArgs'].value[key])( - keyList[nRep['closureStructArgs'].value[key].referenceName], ctxt) - - return ctxt, executionBlock + return super().apply(ctxt, executionBlock, name, verbose) diff --git a/Deeploy/TilingExtension/MemoryConstraints.py b/Deeploy/TilingExtension/MemoryConstraints.py index 0c12368250..60d035ae1a 100644 --- a/Deeploy/TilingExtension/MemoryConstraints.py +++ b/Deeploy/TilingExtension/MemoryConstraints.py @@ -42,7 +42,7 @@ def __init__(self, memoryLevel: str, size: Union[IntVar, int]): self.size: Union[int, IntVar] = size self.multiBufferCoefficient: Union[int, IntVar] = 1 - self.shape: Optional[Tuple[int]] = None + self.shape: Optional[Tuple[int, ...]] = None self.addrSpace: Optional[Tuple[int, int]] = None def __repr__(self) -> str: diff --git a/Deeploy/TilingExtension/MemoryScheduler.py b/Deeploy/TilingExtension/MemoryScheduler.py index a6f6a75bc4..cc0df4846d 100644 --- a/Deeploy/TilingExtension/MemoryScheduler.py +++ b/Deeploy/TilingExtension/MemoryScheduler.py @@ -33,8 +33,7 @@ import numpy as np from ortools.constraint_solver.pywrapcp import IntVar -from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.LoweringOptimizationPasses import \ - _permuteList +from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.LoweringOptimizationPasses import _permute from Deeploy.DeeployTypes import ConstantBuffer, NetworkContext, TransientBuffer from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy from Deeploy.TilingExtension.MemoryConstraints import PatternMemoryConstraints, TensorMemoryConstraint @@ -540,7 +539,7 @@ def scheduleMemoryConstraints(self, def constraintTileBuffersWithOverlappingLifetime(self, tilerModel: TilerModel, ctxt: NetworkContext, patternMemoryConstraint: PatternMemoryConstraints, memoryHierarchy: MemoryHierarchy): - """This method adds the necessay constraints for tiling to be performed before the static memory allocation of the tile buffers. + """This method adds the necessary constraints for tiling to be performed before the static memory allocation of the tile buffers. To perform static memory allocation after tiling (i.e. decouple tiling and memory alloc), we need to do two assumptions 1. All tile buffers for each node have overlapping lifetime, so we can find their memory footprint by just summing their sizes and hence we don't need to know the specific memory allocation. This assumption is true as soon as we don't do tile several nodes together (ask me if you don't know what I mean here). @@ -659,7 +658,7 @@ def permMatrix2permList(permMatrix: np.ndarray) -> List[int]: permList = permMatrix2permList(_permutationMatrix) if pattern != [] and len(pattern) > 1: - permPattern = _permuteList(pattern, permList) + permPattern = _permute(pattern, permList) else: permPattern = pattern diff --git a/Deeploy/TilingExtension/TileConstraint.py b/Deeploy/TilingExtension/TileConstraint.py index eed22b0961..e73cd3b615 100644 --- a/Deeploy/TilingExtension/TileConstraint.py +++ b/Deeploy/TilingExtension/TileConstraint.py @@ -31,12 +31,11 @@ import numpy as np from ortools.constraint_solver.pywrapcp import IntVar -#from Deeploy import TilerModel from Deeploy.DeeployTypes import NetworkContext, OperatorRepresentation from Deeploy.TilingExtension.MemoryConstraints import MemoryConstraint, NodeMemoryConstraint, TensorMemoryConstraint from Deeploy.TilingExtension.TilerModel import TilerModel from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, HyperRectangle, MemoryTransfer, \ - TilingSchedule, VariableReplacementScheme, computeHyperRectangleList + TilingSchedule, VariableReplacementScheme, computeTileHyperRectangles class TileConstraint(): @@ -65,19 +64,15 @@ def constructSymbolicNodeRep(tilerModel: TilerModel, parseDict: Dict, @staticmethod def getBaseAddr(tilingSolution, targetMemLevel, name) -> List[Optional[int]]: + mc = tilingSolution.tensorMemoryConstraints[name].memoryConstraints[targetMemLevel] - block = tilingSolution.tensorMemoryConstraints[name].memoryConstraints[targetMemLevel] - - if block.addrSpace is None: + if mc.addrSpace is None: return [None] - baseAddr = block.addrSpace[0] - endAddr = block.addrSpace[1] - sol = [] - for it in range(block.multiBufferCoefficient): - addr = ((endAddr - baseAddr) // block.multiBufferCoefficient) * it + baseAddr - sol.append(addr) - return sol + start, end = mc.addrSpace + bufferSize = (end - start) // mc.multiBufferCoefficient + + return [start + bufferSize * i for i in range(mc.multiBufferCoefficient)] @staticmethod def extractBaseAddr(tilingSolution: NodeMemoryConstraint, targetMemLevel: str, @@ -102,9 +97,6 @@ def extractBaseAddr(tilingSolution: NodeMemoryConstraint, targetMemLevel: str, @staticmethod def sanitizeTilingSchedule(tilingSchedule: TilingSchedule) -> TilingSchedule: - - _tilingSchedule = tilingSchedule - for baseOffsetName, baseOffsetValue in tilingSchedule.inputBaseOffsets.copy().items(): if baseOffsetValue == [None]: for step in tilingSchedule.inputLoadSchedule: @@ -117,7 +109,7 @@ def sanitizeTilingSchedule(tilingSchedule: TilingSchedule) -> TilingSchedule: del step[baseOffsetName] del tilingSchedule.outputBaseOffsets[baseOffsetName] - return _tilingSchedule + return tilingSchedule @classmethod def wrapTilingSolution( @@ -144,14 +136,13 @@ def _offsetAdd(offsetA: Tuple[int, ...], offsetB: Tuple[int, ...]) -> Tuple[int, def getCubeTransfers(tensorConstraint: TensorMemoryConstraint, sourceCubes: List[AbsoluteHyperRectangle], sourceMemoryLevel: str, targetMemoryLevel: str) -> Tuple[List[AbsoluteHyperRectangle], List[int]]: - solution = [] solutionLengths = [] for sourceCube in sourceCubes: memTransfer = getMemoryTransfer(tensorConstraint, sourceCube.rectangle, sourceMemoryLevel, targetMemoryLevel) - solutionCubes = computeHyperRectangleList(memTransfer) + solutionCubes = computeTileHyperRectangles(memTransfer) solutionAbsoluteCubes = [ AbsoluteHyperRectangle(rectangle = cube, absoluteOffset = _offsetAdd(sourceCube.absoluteOffset, cube.offset)) @@ -162,32 +153,29 @@ def getCubeTransfers(tensorConstraint: TensorMemoryConstraint, sourceCubes: List return solution, solutionLengths - assert len(tilingSolution.outputTensorMemoryConstraints.keys()) == 1, "Expected node to have only one output!" - varOut = list(tilingSolution.outputTensorMemoryConstraints.keys())[0] + assert len(tilingSolution.outputTensorMemoryConstraints) == 1, "Expected node to have only one output!" + + outVar, outTensorConstraint = next(iter(tilingSolution.outputTensorMemoryConstraints.items())) + memoryPath = list(outTensorConstraint.memoryConstraints.keys()) - outTensorConstraint = tilingSolution.tensorMemoryConstraints[varOut] - outTensorMemoryLevelPath = list(outTensorConstraint.memoryConstraints.keys()) - targetIdxs = [idx for idx, key in enumerate(outTensorMemoryLevelPath) if key == targetMemLevel] + assert targetMemLevel in memoryPath, \ + f"Target memory level {targetMemLevel} does not exist in the memory path {memoryPath}" - assert len(targetIdxs) == 1, f"Received more than one spec for memoryLevel {targetMemLevel}" - targetIdx = targetIdxs[0] + targetIdx = memoryPath.index(targetMemLevel) if targetIdx == 0: # SCHEREMO: Watch out - this happens if inputs are in L(N+1) but outputs only in L(N) targetIdx = 1 - fullShape = ctxt.lookup(varOut).shape - initialOffset = tuple([0] * len(fullShape)) + fullShape = ctxt.lookup(outVar).shape + initialOffset = (0,) * len(fullShape) outputCubes = [ AbsoluteHyperRectangle(rectangle = HyperRectangle(offset = initialOffset, dims = tuple(fullShape)), absoluteOffset = initialOffset) ] - for targetIdx in list(range(targetIdx + 1))[1:]: - sourceMemoryLevel = outTensorMemoryLevelPath[targetIdx - 1] - targetMemoryLevel = outTensorMemoryLevelPath[targetIdx] - outputCubes, solutionLengths = getCubeTransfers(outTensorConstraint, outputCubes, sourceMemoryLevel, - targetMemoryLevel) + for source, target in zip(memoryPath[:targetIdx], memoryPath[1:targetIdx + 1]): + outputCubes, solutionLengths = getCubeTransfers(outTensorConstraint, outputCubes, source, target) arrayOfCubes = [] _idx = 0 diff --git a/Deeploy/TilingExtension/TilerExtension.py b/Deeploy/TilingExtension/TilerExtension.py index abd3e38329..0e79178c28 100644 --- a/Deeploy/TilingExtension/TilerExtension.py +++ b/Deeploy/TilingExtension/TilerExtension.py @@ -58,6 +58,7 @@ from Deeploy.TilingExtension.TilerModel import TilerModel TilingSolution = List[PatternMemoryConstraints] +MemoryMap = Dict[str, List[List[MemoryBlock]]] _deallocTemplate = NodeTemplate("") @@ -307,17 +308,17 @@ def minimalloc(self, memoryMap, ctxt, nodeMemoryConstraint, capacity: int, memor return memoryMap - def computeTilingSchedule(self, ctxt: NetworkContext) -> Tuple[TilingSolution, Dict[str, List[List[MemoryBlock]]]]: - + def computeTilingSchedule(self, ctxt: NetworkContext) -> TilingSolution: assert self.tilerModel is not None and self.symbolicMemoryConstraints is not None, "Set up the model before trying to compute a schedule!" - collector = self.tilerModel.trySolveModel() - tilingSchedule = self._getTilingSolution(self.tilerModel, ctxt, collector, self.symbolicMemoryConstraints) - + tilingSolution = self._getTilingSolution(self.tilerModel, ctxt, collector, self.symbolicMemoryConstraints) if not self.memoryAllocStrategy == "MiniMalloc": + assert self.tilerModel is not None self.innerMemoryScheduler.annotateSolution(ctxt, self.tilerModel) self.outerMemoryScheduler.annotateSolution(ctxt, self.tilerModel) + return tilingSolution + def computeMemoryMap(self, ctxt: NetworkContext, tilingSolution: TilingSolution) -> MemoryMap: memoryMap = {} for key in self.innerMemoryScheduler.memoryMap.keys(): @@ -334,11 +335,16 @@ def computeTilingSchedule(self, ctxt: NetworkContext) -> Tuple[TilingSolution, D for idx, memMap in enumerate(memoryMap[memoryLevel]): if len(memoryMap[memoryLevel][idx]) != 0: memoryMap[memoryLevel][idx] = self.minimalloc( - memMap, ctxt, tilingSchedule[idx].nodeConstraints[0], + memMap, ctxt, tilingSolution[idx].nodeConstraints[0], self.memoryHierarchy.memoryLevels[memoryLevel].size - constantTensorOffset, memoryLevel) + print(f"\033[92mMemory allocation sucessful!\033[0m") - for idx, pattern in enumerate(tilingSchedule): + return memoryMap + + def annotateMemoryLevel(self, ctxt: NetworkContext, tilingSolution: TilingSolution, + memoryMap: Dict) -> NetworkContext: + for idx, pattern in enumerate(tilingSolution): for nodeIdx, nodeConstraint in enumerate(pattern.nodeConstraints): for tensorConstraint in nodeConstraint.tensorMemoryConstraints.values(): for memoryConstraint in tensorConstraint.memoryConstraints.values(): @@ -359,10 +365,7 @@ def computeTilingSchedule(self, ctxt: NetworkContext) -> Tuple[TilingSolution, D block = _block[0] memoryConstraint.addrSpace = block.addrSpace - - self._convertCtxtToStaticSchedule(ctxt, memoryMap) - - return tilingSchedule, memoryMap + return ctxt def setupModel(self, ctxt: NetworkContext, schedule: Schedule, layerBinding: OrderedDict[str, ONNXLayer], targetMemoryLevelMapping: TargetMemoryLevelMapping) -> NetworkContext: @@ -911,6 +914,19 @@ def assertUniformMemoryLevelAllocation(self, ctxt: NetworkContext, defaultMemory return False return True + def testTilingSolutionCorrectness(self, tilingSolution: TilingSolution) -> None: + # LMACAN: Assert buffer sizes are word aligned as per comment in MemoryScheduler.py:MemoryScheduler._buildCostVector() + byteAlignment = MemoryScheduler.byteAlignment + for patternMemoryConstraint in tilingSolution: + for nodeMemoryConstraint in patternMemoryConstraint.nodeConstraints: + for tensorMemoryConstraint in nodeMemoryConstraint.tensorMemoryConstraints.values(): + for memoryConstraint in tensorMemoryConstraint.memoryConstraints.values(): + if memoryConstraint.addrSpace is not None: + assert isinstance(memoryConstraint.multiBufferCoefficient, int) + bufferSize = (memoryConstraint.addrSpace[1] - + memoryConstraint.addrSpace[0]) // memoryConstraint.multiBufferCoefficient + assert bufferSize % byteAlignment == 0, f"Buffer in {memoryConstraint} is not {byteAlignment} byte aligned" + def testMemoryMapCorrectness(self, memoryMap: Dict[str, List[List[MemoryBlock]]], graph: gs.Graph, schedule: Schedule) -> None: @@ -919,8 +935,8 @@ def testMemoryMapCorrectness(self, memoryMap: Dict[str, List[List[MemoryBlock]]] } # JUNGVI: Assert output buffers are alive until the end - for outputBuffer in graph.outputs: - assert memoryBlockMap[outputBuffer.name]._lifetime[-1] == len( + for tensor in graph.outputs: + assert memoryBlockMap[tensor.name]._lifetime[-1] == len( schedule), "Invalid memory map! Output buffer is not alive at the last step!" # JUNGVI: Assert input buffers are alive at the beginning @@ -956,10 +972,13 @@ def worstCaseBufferSize(self): return maxAddr - def tile(self, tilingSolution: Optional[TilingSolution] = None): - if tilingSolution is None: - schedule = self.scheduler(self.graph) + def tile(self, tilingSolution: Optional[TilingSolution] = None, memoryMap: Optional[MemoryMap] = None): + assert (tilingSolution is None and memoryMap is None) or (tilingSolution is not None and memoryMap is not None), \ + "You need to provide both the manual tilingSolution and the memoryMap to override tiling." + + schedule = self.scheduler(self.graph) + if tilingSolution is None and memoryMap is None: # JUNGVI: Currently using MiniMalloc is only supported for layer-wise execution and all tensors in the default memory level. if self.tiler.memoryAllocStrategy == "MiniMalloc": assert self.tiler.assertLayerWiseTiling(schedule), "Using MiniMalloc and DFT is not supported!" @@ -971,11 +990,22 @@ def tile(self, tilingSolution: Optional[TilingSolution] = None): schedule = schedule, layerBinding = self.layerBinding, targetMemoryLevelMapping = self.getTargetMemoryLevelMapping()) - tilingSolution, memoryMap = self.tiler.computeTilingSchedule(self.ctxt) - if self.tiler.visualizeMemoryAlloc: - self.tiler.plotMemoryAlloc(memoryMap, self.ctxt, self.deeployStateDir, self.Platform.memoryHierarchy) + tilingSolution = self.tiler.computeTilingSchedule(self.ctxt) + + memoryMap = self.tiler.computeMemoryMap(self.ctxt, tilingSolution) + + assert tilingSolution is not None and memoryMap is not None + + self.tiler.testTilingSolutionCorrectness(tilingSolution) + + self.tiler.annotateMemoryLevel(self.ctxt, tilingSolution, memoryMap) + + self.ctxt = self.tiler._convertCtxtToStaticSchedule(self.ctxt, memoryMap) + + if self.tiler.visualizeMemoryAlloc: + self.tiler.plotMemoryAlloc(memoryMap, self.ctxt, self.deeployStateDir, self.Platform.memoryHierarchy) - self.tiler.testMemoryMapCorrectness(memoryMap, self.graph, schedule) + self.tiler.testMemoryMapCorrectness(memoryMap, self.graph, schedule) # SCHEREMO: Annotate execution block with solution for layer, pattern in zip(self.layerBinding.values(), tilingSolution): diff --git a/Deeploy/TilingExtension/TilingCodegen.py b/Deeploy/TilingExtension/TilingCodegen.py index 6a2ff26674..37e032064a 100644 --- a/Deeploy/TilingExtension/TilingCodegen.py +++ b/Deeploy/TilingExtension/TilingCodegen.py @@ -26,12 +26,13 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Dict, Iterable, List, Optional, Tuple, Type +from typing import Dict, Generator, List, Sequence, Tuple, Type import numpy as np from Deeploy.AbstractDataTypes import Pointer -from Deeploy.TilingExtension.MemoryConstraints import MemoryConstraint, NodeMemoryConstraint +from Deeploy.DeeployTypes import OperatorRepresentation, VariableBuffer +from Deeploy.TilingExtension.MemoryConstraints import MemoryConstraint @dataclass @@ -194,177 +195,117 @@ def minimizeVariableReplacement( return VariableReplacementScheme(newPerTileRep, newRepTypes), operatorRepresentation -def minimizeRectangleDims(hyperRectangle: HyperRectangle, - referenceBuffer: VariableBuffer) -> Tuple[HyperRectangle, HyperRectangle]: - - rectDims = hyperRectangle.dims - rectOffset = hyperRectangle.offset - shape = referenceBuffer.shape - newDims: List[int] = [] - newOffset: List[int] = [] - - newBaseline = [] - - reversedRectOffset = list(reversed(rectOffset)) +def minimizeRectangle(rect: HyperRectangle, referenceShape: Sequence[int]) -> Tuple[HyperRectangle, Tuple[int, ...]]: + minRectShape: List[int] = [] + minRectOffset: List[int] = [] + minReferenceShape: List[int] = [] # SCHEREMO: Collapse dimensions right to left - acc = 0 - for idx, (tileDim, bufDim) in enumerate(zip(reversed(rectDims), reversed(shape))): - - if tileDim == bufDim: - assert reversedRectOffset[idx] == 0, "Can't not tile a dimension and have an offset, tf" - - # SCHEREMO: Collapse if equal - if tileDim == bufDim and acc != 0: - acc *= tileDim - elif tileDim == bufDim and acc == 0: - acc = tileDim - elif tileDim != bufDim and acc != 0: - newDims.insert(0, acc * tileDim) - newBaseline.insert(0, acc * bufDim) - newOffset.insert(0, acc * reversedRectOffset[idx]) - acc = 0 + currentCollapsedDim = 1 + for rectDim, rectOffset, referenceDim in zip(reversed(rect.dims), reversed(rect.offset), reversed(referenceShape)): + if rectDim == referenceDim: + assert rectOffset == 0, f"Rectangle offset should be zero when the dimensions are the same. Received rectangle {rect} and reference shape {referenceShape}" + currentCollapsedDim *= rectDim else: - newDims.insert(0, tileDim) - newBaseline.insert(0, bufDim) - newOffset.insert(0, reversedRectOffset[idx]) - - if acc > 1: - newDims.insert(0, acc) - newBaseline.insert(0, acc) - newOffset.insert(0, acc * reversedRectOffset[idx]) - - # JUNGVI: If the function collapsed all dimensions of the tensor, set it to dim 1 and offset 0 - if len(newDims) == 0: - newDims = [1] - newBaseline = [1] - newOffset = [0] + minRectShape.insert(0, currentCollapsedDim * rectDim) + minReferenceShape.insert(0, currentCollapsedDim * referenceDim) + minRectOffset.insert(0, currentCollapsedDim * rectOffset) + currentCollapsedDim = 1 - newRect = HyperRectangle(tuple(newOffset), tuple(newDims)) - newBaseline = HyperRectangle(tuple([0] * len(newOffset)), tuple(newBaseline)) + if currentCollapsedDim > 1 or len(minRectShape) == 0: + minRectShape.insert(0, currentCollapsedDim) + minReferenceShape.insert(0, currentCollapsedDim) + minRectOffset.insert(0, currentCollapsedDim * rect.offset[0]) - return newRect, newBaseline + return HyperRectangle(tuple(minRectOffset), tuple(minRectShape)), tuple(minReferenceShape) -def calculateRectangleOffset(hyperRectangle: HyperRectangle, referenceBuffer: VariableBuffer) -> int: +def padShape(shape: Tuple[int, ...], rank: int) -> Tuple[int, ...]: + assert rank >= len( + shape), f"Cannot pad to rank smaller then shape's. Received rank: {rank}, shape rank: {len(shape)}" + ret = tuple([1] * (rank - len(shape))) + shape + assert len(ret) == rank + return ret - minimalRect, baselineRect = minimizeRectangleDims(hyperRectangle, referenceBuffer) - offsetMult = [1] - for dim in reversed(baselineRect.dims[1:]): - offsetMult.insert(0, dim * np.prod(offsetMult)) +def padOffset(offset: Tuple[int, ...], rank: int) -> Tuple[int, ...]: + assert rank >= len( + offset), f"Cannot pad to rank smaller then offset's. Received rank: {rank}, offset rank: {len(offset)}" + ret = tuple([0] * (rank - len(offset))) + offset + assert len(ret) == rank + return ret - accOffset = 0 - for offsetIdx, mult in zip(minimalRect.offset, offsetMult): - accOffset += offsetIdx * mult - return int(accOffset * (referenceBuffer._type.referencedType.typeWidth // 8)) +def padStride(stride: Tuple[int, ...], rank: int, paddingStride: int) -> Tuple[int, ...]: + assert rank >= len( + stride), f"Cannot pad to rank smaller then stride's. Received rank: {rank}, stride rank: {len(stride)}" + ret = tuple([paddingStride] * (rank - len(stride))) + stride + assert len(ret) == rank + return ret -def extractTilingTransfer(tilingSolution: NodeMemoryConstraint, targetMemLevel: str, - tensorName: str) -> Optional[MemoryTransfer]: +def stridesFromShape(shape: Sequence[int]) -> Tuple[int, ...]: + strides = [1] * len(shape) + for idx, dim in enumerate(reversed(shape[1:])): + strides[idx + 1] = strides[idx] * dim + return tuple(reversed(strides)) - for name, constraint in tilingSolution.tensorMemoryConstraints.items(): - if not name == tensorName: - continue - sourceIdx = 0 +def calculateFlatOffset(offsets: Sequence[int], strides: Sequence[int]) -> int: + assert len(offsets) == len(strides), \ + f"Offsets and strides have to have the same number of dimensions. Length offsets: {len(offsets)}, strides: {len(strides)}" + return sum(offset * stride for offset, stride in zip(offsets, strides)) - for idx, memConstraint in enumerate(constraint.memoryConstraints.values()): - if memConstraint.memoryLevel != targetMemLevel: - continue - sourceIdx = idx - targetIdx = idx - 1 +def calculateFlatOffsetInBytes(tile: HyperRectangle, referenceBuffer: VariableBuffer) -> int: + return int( + calculateFlatOffset(tile.offset, stridesFromShape(referenceBuffer.shape)) * + (referenceBuffer._type.referencedType.typeWidth // 8)) - if sourceIdx == 0: - return None - return MemoryTransfer( - list(constraint.memoryConstraints.values())[targetIdx], - list(constraint.memoryConstraints.values())[sourceIdx]) +def computeTileHyperRectangles(memoryTransfer: MemoryTransfer) -> List[HyperRectangle]: + assert memoryTransfer.source.shape is not None, "Source transfer shape cannot be undefined!" + assert memoryTransfer.destination.shape is not None, "Destination transfer shape cannot be undefined!" - raise RuntimeError(f"{tensorName} not found in tilingSolution!") + assert len(memoryTransfer.source.shape) == len(memoryTransfer.destination.shape), \ + f"Source and target of memory transfer {memoryTransfer} don't have the same number of dimensions!" + largeShape = memoryTransfer.source.shape + smallShape = memoryTransfer.destination.shape -def computeHyperRectangleList(memTrans: MemoryTransfer) -> List[HyperRectangle]: + for dimIdx, (dimSizeSmall, dimSizeLarge) in enumerate(zip(smallShape, largeShape)): + assert dimSizeSmall <= dimSizeLarge, f"smallShape[{dimIdx}] should not be bigger then largeShape[{dimIdx}]. ({dimSizeSmall} > {dimSizeLarge})" - def nextElement(idxVec: List[int], targetVector: List[int]) -> Optional[List[int]]: - nextIdx = [] - - countUp = True - for vecIdx, maxIdx in zip(reversed(idxVec), reversed(targetVector)): - if countUp: - if vecIdx == maxIdx: - nextIdx.append(1) + def nextTileIndex(tileIndexEnd: List[int]) -> Generator[List[int]]: + tileCount = np.prod(tileIndexEnd) + tileIndex = [0] * len(tileIndexEnd) + for _ in range(tileCount): + yield tileIndex + for dimIdx, (idx, end) in enumerate(zip(tileIndex, tileIndexEnd)): + if idx + 1 < end: + tileIndex[dimIdx] = idx + 1 + break else: - nextIdx.append(vecIdx + 1) - countUp = False - else: - nextIdx.append(vecIdx) - - nextIdx.reverse() - - if countUp: - return None - - return nextIdx - - def calculateCost(idxVec: Iterable[int], smallShape: Tuple[int]) -> List[int]: - outVec = [] - for idx, step in zip(idxVec, smallShape): - outVec.append((idx - 1) * step) - - return outVec - - def calculateDim(idxVec: List[int], numTiles: List[int], smallShape: Tuple[int], - largeShape: Tuple[int]) -> List[int]: - - dimVec = [] - - for idx, (vecIdx, maxIdx) in enumerate(zip(idxVec, numTiles)): - if vecIdx != maxIdx: - dimVec.append(smallShape[idx]) - continue - if largeShape[idx] % smallShape[idx] == 0: - dimVec.append(smallShape[idx]) - continue - dimVec.append(largeShape[idx] % smallShape[idx]) - - return dimVec - - src = memTrans.source - dst = memTrans.destination - - largeShape = src.shape - smallShape = dst.shape - - assert largeShape is not None, "Transfer shapes cannot be undefined!" - assert smallShape is not None, "Transfer shapes cannot be undefined!" - - assert len(smallShape) == len( - largeShape), f"Source and target of memory transfer {memTrans} don't have the same number of dimensions!" - for idx, (dim1, dim2) in enumerate(zip(smallShape, largeShape)): - assert dim1 <= dim2, f"Large shape is smaller in dimension {idx}" - - totNumTiles = 1 - numTiles: List[int] = [] + tileIndex[dimIdx] = 0 - for (dim1, dim2) in zip(smallShape, largeShape): - totNumTiles *= np.ceil(dim2 / dim1) - numTiles.append(int(np.ceil(dim2 / dim1))) + tileHyperRectangles = [] - cubeList: List[HyperRectangle] = [] - idxVec = [1] * len(smallShape) + tileIndexEnd = [ + int(np.ceil(dimSizeLarge / dimSizeSmall)) for dimSizeLarge, dimSizeSmall in zip(largeShape, smallShape) + ] + for tileIndex in nextTileIndex(tileIndexEnd): + tileOffset = tuple(dimIdx * dimSizeSmall for dimIdx, dimSizeSmall in zip(tileIndex, smallShape)) + for dimIdx, (dimOffset, dimSizeLarge) in enumerate(zip(tileOffset, largeShape)): + assert dimOffset >= 0, f"tileOffset[{dimIdx}] shoud not be smaller then zero ({dimOffset} < 0)" + assert dimOffset < dimSizeLarge, f"tileOffset[{dimIdx}] should not be bigger or equal then largeShape[{dimIdx}] ({dimOffset} >= {dimSizeLarge})" - for i in range(int(totNumTiles)): - offsetVec = calculateCost(idxVec, smallShape) - dimVec = calculateDim(idxVec, numTiles, smallShape, largeShape) - cubeList.append(HyperRectangle(tuple(offsetVec), tuple(dimVec))) + tileSize = tuple( + min(dimSizeSmall, dimSizeLarge - dimOffset) + for dimSizeSmall, dimSizeLarge, dimOffset in zip(smallShape, largeShape, tileOffset)) + for dimIdx, (dimSize, dimSizeSmall) in enumerate(zip(tileSize, smallShape)): + assert dimSize > 0, f"tileOffset[{dimIdx}] shoud not be smaller or equal then zero ({dimSize} <= 0)" + assert dimSize <= dimSizeSmall, f"tileSize[{dimIdx}] should not be bigger then smallShape[{dimIdx}] ({dimSize} > {dimSizeSmall})" - nextVec = nextElement(idxVec, numTiles) - if nextVec is None: - break - idxVec = nextVec + tileHyperRectangles.append(HyperRectangle(tileOffset, tileSize)) - return cubeList + return tileHyperRectangles diff --git a/DeeployTest/Platforms/Siracusa/src/deeploytest.c b/DeeployTest/Platforms/Siracusa/src/deeploytest.c index 9a0d8f39db..8ed6952c72 100644 --- a/DeeployTest/Platforms/Siracusa/src/deeploytest.c +++ b/DeeployTest/Platforms/Siracusa/src/deeploytest.c @@ -169,7 +169,8 @@ void main(void) { i < DeeployNetwork_outputs_bytes[buf] / sizeof(OUTPUTTYPE); i++) { OUTPUTTYPE expected = ((OUTPUTTYPE *)testOutputVector[buf])[i]; OUTPUTTYPE actual = ((OUTPUTTYPE *)compbuf)[i]; - OUTPUTTYPE diff = expected - actual; + int error = expected - actual; + OUTPUTTYPE diff = (OUTPUTTYPE)(error < 0 ? -error : error); if (diff) { tot_err += 1; diff --git a/DeeployTest/deeployStateEqualityTest.py b/DeeployTest/deeployStateEqualityTest.py index 297e52e65c..58dbc7dad4 100644 --- a/DeeployTest/deeployStateEqualityTest.py +++ b/DeeployTest/deeployStateEqualityTest.py @@ -32,7 +32,7 @@ import onnx import onnx_graphsurgeon as gs from testUtils.platformMapping import mapDeployer, mapPlatform, setupMemoryPlatform -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset from Deeploy.DeeployTypes import NetworkContext, StructBuffer, VariableBuffer, _backendPostBindingFilename, \ _middlewarePreLoweringFilename @@ -79,7 +79,7 @@ platform, signProp = mapPlatform(args.platform) for index, num in enumerate(test_inputs): - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset diff --git a/DeeployTest/generateNetwork.py b/DeeployTest/generateNetwork.py index bf590f06b1..7e05260c35 100644 --- a/DeeployTest/generateNetwork.py +++ b/DeeployTest/generateNetwork.py @@ -31,12 +31,11 @@ import numpy as np import onnx import onnx_graphsurgeon as gs -from testUtils.codeGenerate import generateTestInputsHeader, generateTestNetworkHeader, \ - generateTestNetworkImplementation, generateTestOutputsHeader +from testUtils.codeGenerate import generateTestNetwork from testUtils.graphDebug import generateDebugConfig from testUtils.platformMapping import mapDeployer, mapPlatform from testUtils.testRunner import TestGeneratorArgumentParser -from testUtils.typeMapping import inferInputType, parseDataType +from testUtils.typeMapping import inferTypeAndOffset, parseDataType from Deeploy.AbstractDataTypes import PointerClass from Deeploy.CommonExtensions.DataTypes import IntegerDataTypes @@ -52,9 +51,6 @@ def generateNetwork(args): onnx_graph = onnx.load_model(f'{args.dir}/network.onnx') graph = gs.import_onnx(onnx_graph) - inputTypes = {} - inputOffsets = {} - inputs = np.load(f'{args.dir}/inputs.npz') outputs = np.load(f'{args.dir}/outputs.npz') if os.path.isfile(f'{args.dir}/activations.npz'): @@ -110,8 +106,11 @@ def generateNetwork(args): platform, signProp = mapPlatform(args.platform) - for index, (name, num) in enumerate(zip(inputs.files, test_inputs)): - if np.prod(num.shape) == 0: + inputTypes = {} + inputOffsets = {} + + for index, (name, values) in enumerate(zip(inputs.files, test_inputs)): + if np.prod(values.shape) == 0: continue if name in manual_keys: @@ -119,7 +118,7 @@ def generateNetwork(args): offset = manual_offsets[name] # Check if the provided values fit into the dereferenced type - vals = num.astype(np.int64) - offset + vals = values.astype(np.int64) - offset if not _type.checkPromotion(vals): lo, hi = _type.typeMin, _type.typeMax raise RuntimeError(f"Provided type '{_type.typeName}' with offset {offset} " @@ -135,7 +134,7 @@ def generateNetwork(args): _type = PointerClass(_type) else: - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(values, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset @@ -156,39 +155,36 @@ def generateNetwork(args): # Parse graph and infer output levels and signedness _ = deployer.generateFunction(verbose = verbosityCfg) - # Create input and output vectors - os.makedirs(f'{args.dumpdir}', exist_ok = True) - print("=" * 80) - testInputStr = generateTestInputsHeader(deployer, test_inputs, inputTypes, inputOffsets, verbose = args.verbose) - f = open(f'{args.dumpdir}/testinputs.h', "w") - f.write(testInputStr) - f.close() - - testOutputStr = generateTestOutputsHeader(deployer, test_outputs, signProp, verbose = args.verbose) - f = open(f'{args.dumpdir}/testoutputs.h', "w") - f.write(testOutputStr) - f.close() - - # Generate code for Network - testNetworkHeaderStr = generateTestNetworkHeader(deployer, platform) - f = open(f'{args.dumpdir}/Network.h', "w") - f.write(testNetworkHeaderStr) - f.close() - - testNetworkImplementationStr = generateTestNetworkImplementation(deployer, platform, verbose = args.verbose) - f = open(f'{args.dumpdir}/Network.c', "w") - f.write(testNetworkImplementationStr) - f.close() - - clang_format = "{BasedOnStyle: llvm, IndentWidth: 2, ColumnLimit: 160}" - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/Network.c') - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/Network.h') - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/testoutputs.h') - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/testinputs.h') + # Offset the input and output values if signprop + if signProp: + test_inputs = [value - inputOffsets[f"input_{i}"] for i, value in enumerate(test_inputs)] + + for i, values in enumerate(test_outputs): + buffer = deployer.ctxt.lookup(f"output_{i}") + if buffer._type.referencedType.typeName == "float32_t": + continue + if not buffer._signed: + values -= buffer.nLevels // 2 + + generateTestNetwork(deployer, test_inputs, test_outputs, args.dumpdir, verbosityCfg) if args.verbose: print() print("=" * 80) + print("Output:") + for i in range(len(test_outputs)): + buffer = deployer.ctxt.lookup(f"output_{i}") + logLine = f" - '{buffer.name}': Type: {buffer._type.referencedType.typeName}" + if signProp: + logLine += f", nLevels: {buffer.nLevels}, Signed: {buffer._signed}" + print(logLine) + print('Input:') + for i in range(len(test_inputs)): + buffer = deployer.ctxt.lookup(f"input_{i}") + print( + f" - '{buffer.name}': Type: {buffer._type.referencedType.typeName}, Offset: {inputOffsets[buffer.name]}" + ) + print("=" * 80) num_ops = deployer.numberOfOps(args.verbose) print("=" * 80) print() diff --git a/DeeployTest/testDebugPrintPass.py b/DeeployTest/testDebugPrintPass.py index bbd7373f10..a3e05e39e0 100644 --- a/DeeployTest/testDebugPrintPass.py +++ b/DeeployTest/testDebugPrintPass.py @@ -31,7 +31,7 @@ import onnx_graphsurgeon as gs from testUtils.platformMapping import mapDeployer, mapPlatform from testUtils.testRunner import TestGeneratorArgumentParser, getPaths -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.DebugPasses import DebugPrintPass from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy, MemoryLevel @@ -71,10 +71,7 @@ test_inputs = [inputs[x].reshape(-1).astype(np.float64) for x in inputs.files] test_outputs = [outputs[x].reshape(-1).astype(np.float64) for x in outputs.files] for index, num in enumerate(test_inputs): - # WIESP: Do not infer types and offset of empty arrays - if np.prod(num.shape) == 0: - continue - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset diff --git a/DeeployTest/testDmas.py b/DeeployTest/testDmas.py new file mode 100644 index 0000000000..6cca30e15d --- /dev/null +++ b/DeeployTest/testDmas.py @@ -0,0 +1,68 @@ +import itertools +import subprocess +from typing import Tuple + + +def test(dma: str, inputShape: Tuple[int, ...], tileShape: Tuple[int, ...], nodeCount: int, dataType: str, + doublebuffer: bool): + cfg_str = f""" + - input shape: {inputShape} + - tile shape: {tileShape} + - node count: {nodeCount} + - data type: {dataType} + - doublebuffering: {doublebuffer} + - dma: {dma} + """ + + print(f"test{dma}: Testing {dma} with followig configuration:" + cfg_str) + + testRunnerMap = { + "MchanDma": "testRunner_siracusa_mchandma.py", + "L3Dma": "testRunner_siracusa_l3dma.py", + "SnitchDma": "testRunner_snitch_dma.py", + } + + assert dma in testRunnerMap, f"{dma} missing its own testRunner mapping" + + testRunner = testRunnerMap[dma] + + cmd = [f"python {testRunner}", f"-t test{dma}", "-DNUM_CORES=8"] + cmd.append(f"--input-shape {' '.join(str(x) for x in inputShape)}") + cmd.append(f"--tile-shape {' '.join(str(x) for x in tileShape)}") + cmd.append(f"--node-count {nodeCount}") + cmd.append(f"--type {dataType}") + if doublebuffer: + cmd.append("--doublebuffer") + + full_cmd = " ".join(cmd) + + print(f"Running command:\n{full_cmd}\n") + + try: + subprocess.run(full_cmd, shell = True, check = True) + except subprocess.CalledProcessError: + print(f"test{dma}: Failed test:" + cfg_str) + print(f"Rerun with command:\n{full_cmd}") + exit(-1) + + +# input shape, tile shape, node count, data type +test_shapes_and_more = [ + ((10, 10), (10, 10), 1, "uint8_t"), + ((10, 10), (10, 4), 1, "uint8_t"), + ((10, 10), (10, 4), 1, "uint16_t"), + ((10, 10), (10, 4), 1, "uint32_t"), + ((10, 10), (3, 4), 1, "uint32_t"), + ((10, 10), (3, 4), 2, "uint32_t"), + ((10, 10, 10), (2, 3, 4), 1, "uint8_t"), + ((10, 10, 10, 10), (2, 3, 5, 4), 1, "uint8_t"), + ((10, 10, 10, 10), (2, 3, 5, 4), 1, "uint32_t"), + ((10, 10, 10, 10, 10), (2, 3, 5, 7, 4), 1, "uint8_t"), +] + +is_doublebuffers = [True, False] +dmas = ["MchanDma", "L3Dma", "SnitchDma"] + +for testShape, doublebuffer, dma in itertools.product(test_shapes_and_more, is_doublebuffers, dmas): + inputShape, tileShape, nodeCount, dataType = testShape + test(dma, inputShape, tileShape, nodeCount, dataType, doublebuffer) diff --git a/DeeployTest/testMVP.py b/DeeployTest/testMVP.py index 6f0342e7b4..a50ff739ea 100644 --- a/DeeployTest/testMVP.py +++ b/DeeployTest/testMVP.py @@ -26,94 +26,30 @@ import os import sys from collections import OrderedDict -from typing import List, Union +from typing import List import numpy as np import onnx import onnx_graphsurgeon as gs import pytest -from ortools.constraint_solver.pywrapcp import IntVar -from testUtils.codeGenerate import generateL3HexDump, generateTestInputsHeader, generateTestNetworkHeader, \ - generateTestNetworkImplementation, generateTestOutputsHeader +from testUtils.codeGenerate import generateTestNetwork from testUtils.graphDebug import generateDebugConfig from testUtils.platformMapping import mapDeployer, mapPlatform, setupMemoryPlatform from testUtils.testRunner import TestGeneratorArgumentParser -from testUtils.typeMapping import inferInputType +from testUtils.tilingUtils import DBOnlyL3Tiler, DBTiler, SBTiler +from testUtils.typeMapping import inferTypeAndOffset -from Deeploy.DeeployTypes import CodeGenVerbosity, ConstantBuffer, NetworkContext, NetworkDeployer, ONNXLayer, \ - SubGraph, TransientBuffer +from Deeploy.DeeployTypes import CodeGenVerbosity, NetworkDeployer, ONNXLayer from Deeploy.EngineExtension.NetworkDeployers.EngineColoringDeployer import EngineColoringDeployerWrapper from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy, MemoryLevel from Deeploy.MemoryLevelExtension.NetworkDeployers.MemoryLevelDeployer import MemoryDeployerWrapper from Deeploy.MemoryLevelExtension.OptimizationPasses.MemoryLevelAnnotationPasses import AnnotateDefaultMemoryLevel, \ AnnotateIOMemoryLevel, AnnotateNeurekaWeightMemoryLevel -from Deeploy.TilingExtension.TilerExtension import Tiler, TilerDeployerWrapper -from Deeploy.TilingExtension.TilerModel import TilerModel +from Deeploy.TilingExtension.TilerExtension import TilerDeployerWrapper _TEXT_ALIGN = 30 -class DBOnlyL3Tiler(Tiler): - - def multiBufferStrategy(self, tilerModel: TilerModel, ctxt: NetworkContext, pattern: SubGraph, path: List[str], - hop: str, tensorName: str) -> Union[int, IntVar]: - - varBuffer = ctxt.lookup(tensorName) - - generalCoeff = 2 - - if isinstance(varBuffer, TransientBuffer): - coefficient = 1 - elif isinstance(varBuffer, ConstantBuffer): - coefficient = generalCoeff - else: - coefficient = generalCoeff - - if args.defaultMemLevel == "L2": - return coefficient - - if hop == 'L1': - return 1 - - return coefficient - - -class DBTiler(Tiler): - - def multiBufferStrategy(self, tilerModel: TilerModel, ctxt: NetworkContext, pattern: SubGraph, path: List[str], - hop: str, tensorName: str) -> Union[int, IntVar]: - varBuffer = ctxt.lookup(tensorName) - - generalCoeff = 2 - - if isinstance(varBuffer, TransientBuffer): - coefficient = 1 - elif isinstance(varBuffer, ConstantBuffer): - coefficient = generalCoeff - else: - coefficient = generalCoeff - - return coefficient - - -class SBTiler(Tiler): - - def multiBufferStrategy(self, tilerModel: TilerModel, ctxt: NetworkContext, pattern: SubGraph, path: List[str], - hop: str, tensorName: str) -> Union[int, IntVar]: - varBuffer = ctxt.lookup(tensorName) - - generalCoeff = 1 - - if isinstance(varBuffer, TransientBuffer): - coefficient = 1 - elif isinstance(varBuffer, ConstantBuffer): - coefficient = generalCoeff - else: - coefficient = generalCoeff - - return coefficient - - # Mock of the Global Scheduler's inteface # Returns a list of list of nodes instead of simply a list # Inner list represent the patter over which we tile @@ -161,10 +97,7 @@ def setupDeployer(graph: gs.Graph, memoryHierarchy: MemoryHierarchy, defaultTarg platform.engines[0].enableStrides = True for index, num in enumerate(test_inputs): - # WIESP: Do not infer types and offset of empty arrays - if np.prod(num.shape) == 0: - continue - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset @@ -198,7 +131,11 @@ def setupDeployer(graph: gs.Graph, memoryHierarchy: MemoryHierarchy, defaultTarg # Make the deployer tiler aware if args.doublebuffer: - deployer = TilerDeployerWrapper(deployer, DBOnlyL3Tiler) + assert args.defaultMemLevel in ["L3", "L2"] + if args.defaultMemLevel == "L3": + deployer = TilerDeployerWrapper(deployer, DBOnlyL3Tiler) + else: + deployer = TilerDeployerWrapper(deployer, DBTiler) else: deployer = TilerDeployerWrapper(deployer, SBTiler) @@ -336,10 +273,7 @@ def setupDeployer(graph: gs.Graph, memoryHierarchy: MemoryHierarchy, defaultTarg signProp = False for index, num in enumerate(test_inputs): - # WIESP: Do not infer types and offset of empty arrays - if np.prod(num.shape) == 0: - continue - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset @@ -355,42 +289,36 @@ def setupDeployer(graph: gs.Graph, memoryHierarchy: MemoryHierarchy, defaultTarg _ = deployer.generateFunction(verbosityCfg) - # Create input and output vectors - os.makedirs(f'{args.dumpdir}', exist_ok = True) - - print("=" * 80) - testInputStr = generateTestInputsHeader(deployer, test_inputs, inputTypes, inputOffsets, args.verbose) - f = open(f'{args.dumpdir}/testinputs.h', "w") - f.write(testInputStr) - f.close() - - testOutputStr = generateTestOutputsHeader(deployer, test_outputs, signProp, args.verbose) - f = open(f'{args.dumpdir}/testoutputs.h', "w") - f.write(testOutputStr) - f.close() + # Offset the input and output values if signprop + if signProp: + test_inputs = [value - inputOffsets[f"input_{i}"] for i, value in enumerate(test_inputs)] - # Generate code for Network - testNetworkHeaderStr = generateTestNetworkHeader(deployer, platform) - f = open(f'{args.dumpdir}/Network.h', "w") - f.write(testNetworkHeaderStr) - f.close() + for i, values in enumerate(test_outputs): + buffer = deployer.ctxt.lookup(f"output_{i}") + if buffer._type.referencedType.typeName == "float32_t": + continue + if not buffer._signed: + values -= buffer.nLevels // 2 - testNetworkImplementationStr = generateTestNetworkImplementation(deployer, platform) - f = open(f'{args.dumpdir}/Network.c', "w") - f.write(testNetworkImplementationStr) - f.close() - - generateL3HexDump(deployer, os.path.join(f'{args.dumpdir}', 'hex'), test_inputs, test_outputs) - - clang_format = "{BasedOnStyle: llvm, IndentWidth: 2, ColumnLimit: 160}" - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/Network.c') - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/Network.h') - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/testoutputs.h') - os.system(f'clang-format -i --style="{clang_format}" {args.dumpdir}/testinputs.h') + generateTestNetwork(deployer, test_inputs, test_outputs, args.dumpdir, verbosityCfg) if args.verbose: print() print("=" * 80) + print("Output:") + for i in range(len(test_outputs)): + buffer = deployer.ctxt.lookup(f"output_{i}") + logLine = f" - '{buffer.name}': Type: {buffer._type.referencedType.typeName}" + if signProp: + logLine += f", nLevels: {buffer.nLevels}, Signed: {buffer._signed}" + print(logLine) + print('Input:') + for i in range(len(test_inputs)): + buffer = deployer.ctxt.lookup(f"input_{i}") + print( + f" - '{buffer.name}': Type: {buffer._type.referencedType.typeName}, Offset: {inputOffsets[buffer.name]}" + ) + print("=" * 80) num_ops = deployer.numberOfOps(args.verbose) print("=" * 80) print() diff --git a/DeeployTest/testMemoryLevelExtension.py b/DeeployTest/testMemoryLevelExtension.py index 5532f5c010..22a5405a5c 100644 --- a/DeeployTest/testMemoryLevelExtension.py +++ b/DeeployTest/testMemoryLevelExtension.py @@ -32,7 +32,7 @@ import onnx_graphsurgeon as gs from testUtils.platformMapping import defaultScheduler, mapDeployer, mapPlatform, setupMemoryPlatform from testUtils.testRunner import TestGeneratorArgumentParser, getPaths -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset from Deeploy.CommonExtensions.OptimizationPasses.TopologyOptimizationPasses.LoweringOptimizationPasses import \ NCHWtoNHWCPass, TransposeMatmulInputsPass @@ -87,7 +87,7 @@ platform, signProp = mapPlatform(args.platform) for index, num in enumerate(test_inputs): - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset if "simpleRegression" in args.dir: diff --git a/DeeployTest/testPrintInputOutputTransformation.py b/DeeployTest/testPrintInputOutputTransformation.py index 3b2d6d144d..1bf4f90c54 100644 --- a/DeeployTest/testPrintInputOutputTransformation.py +++ b/DeeployTest/testPrintInputOutputTransformation.py @@ -31,7 +31,7 @@ import onnx_graphsurgeon as gs from testUtils.platformMapping import mapDeployer, mapPlatform from testUtils.testRunner import TestGeneratorArgumentParser, getPaths -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset from Deeploy.CommonExtensions.CodeTransformationPasses.PrintInputs import MemoryAwarePrintInputGeneration, \ MemoryAwarePrintOutputGeneration, PrintInputGeneration, PrintOutputGeneration @@ -89,10 +89,7 @@ test_inputs = [inputs[x].reshape(-1).astype(np.float64) for x in inputs.files] test_outputs = [outputs[x].reshape(-1).astype(np.float64) for x in outputs.files] for index, num in enumerate(test_inputs): - # WIESP: Do not infer types and offset of empty arrays - if np.prod(num.shape) == 0: - continue - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset diff --git a/DeeployTest/testRunner_siracusa_l3dma.py b/DeeployTest/testRunner_siracusa_l3dma.py new file mode 100644 index 0000000000..7507bf8ec9 --- /dev/null +++ b/DeeployTest/testRunner_siracusa_l3dma.py @@ -0,0 +1,94 @@ +import os + +import numpy as np +from testUtils.codeGenerate import generateTestNetwork +from testUtils.dmaUtils import MemcpyLayer, MemcpyParser, MemcpyTileConstraint, MemcpyTypeChecker, generate_graph, \ + memcpyTemplate, prepare_deployer_with_custom_tiling, setup_pulp_deployer +from testUtils.testRunner import TestRunner, TestRunnerArgumentParser +from testUtils.typeMapping import baseTypeFromName, dtypeFromDeeployType + +from Deeploy.AbstractDataTypes import PointerClass +from Deeploy.CommonExtensions.CodeTransformationPasses.MemoryAllocation import ArgumentStructGeneration, \ + MemoryManagementGeneration +from Deeploy.DeeployTypes import CodeTransformation, NodeBinding, NodeMapper, _NoVerbosity +from Deeploy.Targets.PULPOpen.Bindings import L3MemoryAwareFunctionCallClosure, TilingCallClosure +from Deeploy.Targets.PULPOpen.CodeTransformationPasses.PULPL3Tiling import PULPL3Tiling +from Deeploy.Targets.PULPOpen.DMA.L3Dma import l3DmaHack +from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement, \ + TilingVariableReplacementUpdate +from Deeploy.TilingExtension.TilerExtension import TilingReadyNodeBindings + +testRunnerArgumentParser = TestRunnerArgumentParser(tiling_arguments = True) +testRunnerArgumentParser.add_argument('--input-shape', + nargs = '+', + required = True, + dest = 'input_shape', + type = int, + help = "Shape of the copied tensor") +testRunnerArgumentParser.add_argument('--tile-shape', + nargs = '+', + required = True, + dest = 'tile_shape', + type = int, + help = "Shape of the tiles produced in the manual tiling solution") +testRunnerArgumentParser.add_argument('--node-count', + dest = 'node_count', + type = int, + default = 1, + help = "Number of generated memcpy nodes") +testRunnerArgumentParser.add_argument('--type', type = str, default = "uint8_t", help = "Tensor elements datatype") +testRunner = TestRunner('Siracusa', 'gvsoc', True, testRunnerArgumentParser) + +inputShape = testRunner._args.input_shape +tileShape = testRunner._args.tile_shape +node_count = testRunner._args.node_count +_type = baseTypeFromName(testRunner._args.type) +dtype = dtypeFromDeeployType(_type) +defaultMemory = "L3" +targetMemory = "L2" + +assert len(inputShape) == len(tileShape), \ + f'Input and tile shape should be of the same dimensionality. Received {len(inputShape)}D input shape vs. {len(tileShape)}D tile shape.' +assert all(tileDim <= inDim for inDim, tileDim in zip(inputShape, tileShape)), \ + f'Each tile shape dimension should be smaller then the corresponding input one. Received {tileShape} > {inputShape}' + +graph = generate_graph(node_count, inputShape, dtype) +inputTypes = {"input_0": PointerClass(_type)} +_DEEPLOYSTATEDIR = os.path.join(testRunner._dir_gen, "deeployStates") +deployer = setup_pulp_deployer(defaultMemory, targetMemory, graph, inputTypes, testRunner._args.doublebuffer, + _DEEPLOYSTATEDIR) + +transformer = CodeTransformation([ + TilingVariableReplacement(targetMemory), + TilingCallClosure(writeback = False, generateStruct = True), + TilingVariableReplacementUpdate(targetMemory), + PULPL3Tiling("L3", "L2", l3DmaHack), + ArgumentStructGeneration(), + L3MemoryAwareFunctionCallClosure(writeback = False), + MemoryManagementGeneration("L2"), + MemoryManagementGeneration("L3.*"), + MemoryManagementGeneration(), +]) + +binding = NodeBinding(MemcpyTypeChecker(), memcpyTemplate, transformer) +tilingReadyBindings = TilingReadyNodeBindings([binding], MemcpyTileConstraint()) +memcpyMapper = NodeMapper(MemcpyParser(), tilingReadyBindings) +memcpyMapping = {"Memcpy": MemcpyLayer([memcpyMapper])} +deployer.Platform.engines[0].Mapping.update(memcpyMapping) + +prepare_deployer_with_custom_tiling(deployer, defaultMemory, targetMemory, tileShape, testRunner._args.doublebuffer) + +if not testRunner._args.skipgen: + if dtype == np.float32: + test_inputs = np.random.rand(*inputShape) + else: + info = np.iinfo(dtype) + test_inputs = np.arange(stop = np.prod(inputShape), dtype = dtype).reshape(inputShape) + test_outputs = test_inputs + generateTestNetwork(deployer, [test_inputs], [test_outputs], testRunner._dir_gen, _NoVerbosity) + +# Deconstructed testRunner.run() with skipped generation because we did the generation already +testRunner.configure_cmake_project() +testRunner.build_binary() +if not testRunner._args.skipsim: + testRunner.run_simulation() diff --git a/DeeployTest/testRunner_siracusa_mchandma.py b/DeeployTest/testRunner_siracusa_mchandma.py new file mode 100644 index 0000000000..c16f584b21 --- /dev/null +++ b/DeeployTest/testRunner_siracusa_mchandma.py @@ -0,0 +1,95 @@ +import os + +import numpy as np +from testUtils.codeGenerate import generateTestNetwork +from testUtils.dmaUtils import MemcpyLayer, MemcpyParser, MemcpyTileConstraint, MemcpyTypeChecker, generate_graph, \ + memcpyTemplate, prepare_deployer_with_custom_tiling, setup_pulp_deployer +from testUtils.testRunner import TestRunner, TestRunnerArgumentParser +from testUtils.typeMapping import baseTypeFromName, dtypeFromDeeployType + +from Deeploy.AbstractDataTypes import PointerClass +from Deeploy.CommonExtensions.CodeTransformationPasses.MemoryAllocation import ArgumentStructGeneration, \ + MemoryManagementGeneration +from Deeploy.DeeployTypes import CodeTransformation, NodeBinding, NodeMapper, _NoVerbosity +from Deeploy.Targets.PULPOpen.Bindings import MemoryAwareFunctionCallClosure, TilingCallClosure +from Deeploy.Targets.PULPOpen.CodeTransformationPasses.PULPClusterTiling import PULPClusterTiling +from Deeploy.Targets.PULPOpen.DMA.MchanDma import MchanDma +from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement, \ + TilingVariableReplacementUpdate +from Deeploy.TilingExtension.TilerExtension import TilingReadyNodeBindings + +testRunnerArgumentParser = TestRunnerArgumentParser(tiling_arguments = True) +testRunnerArgumentParser.add_argument('--input-shape', + nargs = '+', + required = True, + dest = 'input_shape', + type = int, + help = "Shape of the copied tensor") +testRunnerArgumentParser.add_argument('--tile-shape', + nargs = '+', + required = True, + dest = 'tile_shape', + type = int, + help = "Shape of the tiles produced in the manual tiling solution") +testRunnerArgumentParser.add_argument('--node-count', + dest = 'node_count', + type = int, + default = 1, + help = "Number of generated memcpy nodes") +testRunnerArgumentParser.add_argument('--type', type = str, default = "uint8_t", help = "Tensor elements datatype") +testRunner = TestRunner('Siracusa', 'gvsoc', True, testRunnerArgumentParser) + +inputShape = testRunner._args.input_shape +tileShape = testRunner._args.tile_shape +node_count = testRunner._args.node_count +_type = baseTypeFromName(testRunner._args.type) +dtype = dtypeFromDeeployType(_type) +defaultMemory = "L2" +targetMemory = "L1" + +assert len(inputShape) == len(tileShape), \ + f'Input and tile shape should be of the same dimensionality. Received {len(inputShape)}D input shape vs. {len(tileShape)}D tile shape.' +assert all(tileDim <= inDim for inDim, tileDim in zip(inputShape, tileShape)), \ + f'Each tile shape dimension should be smaller then the corresponding input one. Received {tileShape} > {inputShape}' + +graph = generate_graph(node_count, inputShape, dtype) +inputTypes = {"input_0": PointerClass(_type)} +_DEEPLOYSTATEDIR = os.path.join(testRunner._dir_gen, "deeployStates") +deployer = setup_pulp_deployer(defaultMemory, targetMemory, graph, inputTypes, testRunner._args.doublebuffer, + _DEEPLOYSTATEDIR) + +transformer = CodeTransformation([ + TilingVariableReplacement(targetMemory), + TilingCallClosure(writeback = False, generateStruct = True), + TilingVariableReplacementUpdate(targetMemory), + PULPClusterTiling(defaultMemory, targetMemory, MchanDma()), + ArgumentStructGeneration(), + MemoryManagementGeneration(targetMemory), + TilingVariableReplacement(defaultMemory), + MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), + MemoryManagementGeneration(defaultMemory), + MemoryManagementGeneration(), +]) + +binding = NodeBinding(MemcpyTypeChecker(), memcpyTemplate, transformer) +tilingReadyBindings = TilingReadyNodeBindings([binding], MemcpyTileConstraint()) +memcpyMapper = NodeMapper(MemcpyParser(), tilingReadyBindings) +memcpyMapping = {"Memcpy": MemcpyLayer([memcpyMapper])} +deployer.Platform.engines[0].Mapping.update(memcpyMapping) + +prepare_deployer_with_custom_tiling(deployer, defaultMemory, targetMemory, tileShape, testRunner._args.doublebuffer) + +if not testRunner._args.skipgen: + if dtype == np.float32: + test_inputs = np.random.rand(*inputShape) + else: + info = np.iinfo(dtype) + test_inputs = np.arange(stop = np.prod(inputShape), dtype = dtype).reshape(inputShape) + test_outputs = test_inputs + generateTestNetwork(deployer, [test_inputs], [test_outputs], testRunner._dir_gen, _NoVerbosity) + +# Deconstructed testRunner.run() with skipped generation because we did the generation already +testRunner.configure_cmake_project() +testRunner.build_binary() +if not testRunner._args.skipsim: + testRunner.run_simulation() diff --git a/DeeployTest/testRunner_snitch_dma.py b/DeeployTest/testRunner_snitch_dma.py new file mode 100644 index 0000000000..96e6542bb9 --- /dev/null +++ b/DeeployTest/testRunner_snitch_dma.py @@ -0,0 +1,100 @@ +import os + +import numpy as np +from testUtils.codeGenerate import generateTestNetwork +from testUtils.dmaUtils import MemcpyLayer, MemcpyParser, MemcpyTileConstraint, MemcpyTypeChecker, generate_graph, \ + memcpyTemplate, prepare_deployer_with_custom_tiling, setup_snitch_deployer +from testUtils.testRunner import TestRunner, TestRunnerArgumentParser +from testUtils.typeMapping import baseTypeFromName, dtypeFromDeeployType + +from Deeploy.AbstractDataTypes import PointerClass +from Deeploy.CommonExtensions.CodeTransformationPasses.MemoryAllocation import ArgumentStructGeneration, \ + MemoryManagementGeneration +from Deeploy.DeeployTypes import CodeTransformation, NodeBinding, NodeMapper, _NoVerbosity +from Deeploy.Targets.Snitch.Bindings import MemoryAwareFunctionCallClosure, TilingCallClosure +from Deeploy.Targets.Snitch.CodeTransformationPasses import SnitchClusterTiling +from Deeploy.Targets.Snitch.CodeTransformationPasses.SnitchClusterSynch import SnitchSynchCoresPass +from Deeploy.Targets.Snitch.CodeTransformationPasses.SnitchCoreFilter import SnitchCoreFilterPass +from Deeploy.Targets.Snitch.CodeTransformationPasses.SnitchProfileExecutionBlock import SnitchProfileExecutionBlockPass +from Deeploy.Targets.Snitch.DMA.SnitchDma import SnitchDma +from Deeploy.TilingExtension.CodeTransformationPasses.TilingVariableReplacement import TilingVariableReplacement, \ + TilingVariableReplacementUpdate +from Deeploy.TilingExtension.TilerExtension import TilingReadyNodeBindings + +testRunnerArgumentParser = TestRunnerArgumentParser(tiling_arguments = True) +testRunnerArgumentParser.add_argument('--input-shape', + nargs = '+', + required = True, + dest = 'input_shape', + type = int, + help = "Shape of the copied tensor") +testRunnerArgumentParser.add_argument('--tile-shape', + nargs = '+', + required = True, + dest = 'tile_shape', + type = int, + help = "Shape of the tiles produced in the manual tiling solution") +testRunnerArgumentParser.add_argument('--node-count', + dest = 'node_count', + type = int, + default = 1, + help = "Number of generated memcpy nodes") +testRunnerArgumentParser.add_argument('--type', type = str, default = "uint8_t", help = "Tensor elements datatype") +testRunner = TestRunner('Snitch', 'gvsoc', tiling = True, argument_parser = testRunnerArgumentParser) + +inputShape = testRunner._args.input_shape +tileShape = testRunner._args.tile_shape +node_count = testRunner._args.node_count +_type = baseTypeFromName(testRunner._args.type) +dtype = dtypeFromDeeployType(_type) +defaultMemory = "L2" +targetMemory = "L1" + +assert len(inputShape) == len(tileShape), \ + f'Input and tile shape should be of the same dimensionality. Received {len(inputShape)}D input shape vs. {len(tileShape)}D tile shape.' +assert all(tileDim <= inDim for inDim, tileDim in zip(inputShape, tileShape)), \ + f'Each tile shape dimension should be smaller then the corresponding input one. Received {tileShape} > {inputShape}' + +graph = generate_graph(node_count, inputShape, dtype) +inputTypes = {"input_0": PointerClass(_type)} +_DEEPLOYSTATEDIR = os.path.join(testRunner._dir_gen, "deeployStates") +deployer = setup_snitch_deployer(defaultMemory, targetMemory, graph, inputTypes, testRunner._args.doublebuffer, + _DEEPLOYSTATEDIR) + +transformer = CodeTransformation([ + SnitchCoreFilterPass("compute"), + SnitchProfileExecutionBlockPass(), + TilingVariableReplacement(targetMemory), + TilingCallClosure(writeback = False), + SnitchSynchCoresPass(), + TilingVariableReplacementUpdate(targetMemory), + SnitchClusterTiling(defaultMemory, targetMemory, SnitchDma()), + ArgumentStructGeneration(), + MemoryManagementGeneration(targetMemory), + MemoryAwareFunctionCallClosure(writeback = False, generateStruct = True), + MemoryManagementGeneration(defaultMemory), + MemoryManagementGeneration(), +]) + +binding = NodeBinding(MemcpyTypeChecker(), memcpyTemplate, transformer) +tilingReadyBindings = TilingReadyNodeBindings([binding], MemcpyTileConstraint()) +memcpyMapper = NodeMapper(MemcpyParser(), tilingReadyBindings) +memcpyMapping = {"Memcpy": MemcpyLayer([memcpyMapper])} +deployer.Platform.engines[0].Mapping.update(memcpyMapping) + +prepare_deployer_with_custom_tiling(deployer, defaultMemory, targetMemory, tileShape, testRunner._args.doublebuffer) + +if not testRunner._args.skipgen: + if dtype == np.float32: + test_inputs = np.random.rand(*inputShape) + else: + info = np.iinfo(dtype) + test_inputs = np.arange(stop = np.prod(inputShape), dtype = dtype).reshape(inputShape) + test_outputs = test_inputs + generateTestNetwork(deployer, [test_inputs], [test_outputs], testRunner._dir_gen, _NoVerbosity) + +# Deconstructed testRunner.run() with skipped generation because we did the generation already +testRunner.configure_cmake_project() +testRunner.build_binary() +if not testRunner._args.skipsim: + testRunner.run_simulation() diff --git a/DeeployTest/testSchedulingExtension.py b/DeeployTest/testSchedulingExtension.py index d6372def22..bd339b93dd 100644 --- a/DeeployTest/testSchedulingExtension.py +++ b/DeeployTest/testSchedulingExtension.py @@ -33,7 +33,7 @@ import pytest from testUtils.platformMapping import mapDeployer, mapPlatform, setupMemoryPlatform from testUtils.testRunner import TestGeneratorArgumentParser -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset from Deeploy.DeeployTypes import NetworkContext, NetworkDeployer, ONNXLayer, Schedule, StructBuffer, TransientBuffer, \ VariableBuffer @@ -249,7 +249,7 @@ def setupDeployer(memoryHierarchy: MemoryHierarchy, graph: gs.Graph) -> NetworkD platform, signProp = mapPlatform(args.platform) for index, num in enumerate(test_inputs): - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset diff --git a/DeeployTest/testSlice_PULP.py b/DeeployTest/testSlice_PULP.py index dda9d13a58..35052381ee 100644 --- a/DeeployTest/testSlice_PULP.py +++ b/DeeployTest/testSlice_PULP.py @@ -30,12 +30,12 @@ import numpy as np import onnx import onnx_graphsurgeon as gs -from testUtils.codeGenerate import generateTestInputsHeader, generateTestNetworkHeader, \ - generateTestNetworkImplementation, generateTestOutputsHeader +from testUtils.codeGenerate import generateTestNetwork from testUtils.platformMapping import mapDeployer, setupMemoryPlatform from testUtils.testRunner import escapeAnsi -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset +from Deeploy.DeeployTypes import _NoVerbosity from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy, MemoryLevel from Deeploy.MemoryLevelExtension.NetworkDeployers.MemoryLevelDeployer import MemoryDeployerWrapper from Deeploy.Targets.PULPOpen.Platform import PULPPlatform @@ -86,7 +86,7 @@ platform = PULPPlatform() for index, num in enumerate(test_inputs): - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset @@ -100,9 +100,9 @@ deployer.frontEnd() deployer.parse(deployer.default_channels_first) - deployer.ctxt.lookup('onnx::Slice_5')._memoryLevel = "L1" - deployer.ctxt.lookup('onnx::Slice_5').allocTemplate = pulpL1AllocateTemplate - deployer.ctxt.lookup('onnx::Slice_5').deallocTemplate = pulpL1FreeTemplate + deployer.ctxt.lookup('onnxSlice_5_tensor')._memoryLevel = "L1" + deployer.ctxt.lookup('onnxSlice_5_tensor').allocTemplate = pulpL1AllocateTemplate + deployer.ctxt.lookup('onnxSlice_5_tensor').deallocTemplate = pulpL1FreeTemplate deployer.midEnd() @@ -110,35 +110,17 @@ deployer.prepared = True deployer.generateInferenceCode() - # Create input and output vectors - os.makedirs('TEST_SIRACUSA/Tests/testSlice', exist_ok = True) - - testInputStr = generateTestInputsHeader(deployer, test_inputs, inputTypes, inputOffsets) - f = open('TEST_SIRACUSA/Tests/testSlice/testinputs.h', "w") - f.write(testInputStr) - f.close() - - testOutputStr = generateTestOutputsHeader(deployer, test_outputs, signProp, False) - f = open('TEST_SIRACUSA/Tests/testSlice/testoutputs.h', "w") - f.write(testOutputStr) - f.close() - - # Generate code for Network - testNetworkHeaderStr = generateTestNetworkHeader(deployer, platform) - f = open('TEST_SIRACUSA/Tests/testSlice/Network.h', "w") - f.write(testNetworkHeaderStr) - f.close() - - testNetworkImplementationStr = generateTestNetworkImplementation(deployer, platform) - f = open('TEST_SIRACUSA/Tests/testSlice/Network.c', "w") - f.write(testNetworkImplementationStr) - f.close() - - clang_format = "{BasedOnStyle: llvm, IndentWidth: 2, ColumnLimit: 160}" - os.system(f'clang-format -i --style="{clang_format}" TEST_SIRACUSA/Tests/testSlice/Network.c') - os.system(f'clang-format -i --style="{clang_format}" TEST_SIRACUSA/Tests/testSlice/Network.h') - os.system(f'clang-format -i --style="{clang_format}" TEST_SIRACUSA/Tests/testSlice/testoutputs.h') - os.system(f'clang-format -i --style="{clang_format}" TEST_SIRACUSA/Tests/testSlice/testinputs.h') + # Offset the values if signprop + if signProp: + test_inputs = [value - inputOffsets[f"input_{i}"] for i, value in enumerate(test_inputs)] + + for i, values in enumerate(test_outputs): + buffer = deployer.ctxt.lookup(f"output_{i}") + isFloat = buffer._type.referencedType.typeName == "float32_t" + if not isFloat and not buffer._signed: + values -= buffer.nLevels // 2 + + generateTestNetwork(deployer, test_inputs, test_outputs, 'TEST_SIRACUSA/Tests/testSlice', _NoVerbosity) os.system( f"$CMAKE -DTOOLCHAIN={args.toolchain} -DTOOLCHAIN_INSTALL_DIR={_TOOLCHAIN_DIR} -DTESTNAME=testSlice -DGENERATED_SOURCE=TEST_SIRACUSA/Tests/testSlice -Dplatform=Siracusa -B TEST_SIRACUSA/build -DNUM_CORES=1 .." diff --git a/DeeployTest/testTilerExtension.py b/DeeployTest/testTilerExtension.py index edf1e6d1cc..6ee23b40fc 100644 --- a/DeeployTest/testTilerExtension.py +++ b/DeeployTest/testTilerExtension.py @@ -33,7 +33,7 @@ import pytest from testUtils.platformMapping import mapDeployer, mapPlatform, setupMemoryPlatform from testUtils.testRunner import TestGeneratorArgumentParser -from testUtils.typeMapping import inferInputType +from testUtils.typeMapping import inferTypeAndOffset from Deeploy.DeeployTypes import GlobalDefinition, NetworkDeployer, ONNXLayer, Schedule, TransientBuffer from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy, MemoryLevel @@ -165,7 +165,7 @@ def setupDeployer(memoryHierarchy: MemoryHierarchy, graph: gs.Graph) -> NetworkD platform, signProp = mapPlatform(args.platform) for index, num in enumerate(test_inputs): - _type, offset = inferInputType(num, signProp)[0] + _type, offset = inferTypeAndOffset(num, signProp) inputTypes[f"input_{index}"] = _type inputOffsets[f"input_{index}"] = offset if "simpleRegression" in args.dir: diff --git a/DeeployTest/testUtils/codeGenerate.py b/DeeployTest/testUtils/codeGenerate.py index 5e572643a4..eb148ad2be 100644 --- a/DeeployTest/testUtils/codeGenerate.py +++ b/DeeployTest/testUtils/codeGenerate.py @@ -24,11 +24,11 @@ # limitations under the License. import os -from typing import Dict, List, Optional, Tuple +from typing import List, Tuple import numpy as np -from Deeploy.DeeployTypes import ConstantBuffer, DeploymentPlatform, NetworkDeployer, VariableBuffer +from Deeploy.DeeployTypes import CodeGenVerbosity, ConstantBuffer, NetworkDeployer, VariableBuffer from Deeploy.Targets.MemPool.Platform import MemPoolPlatform _TEXT_ALIGN = 30 @@ -50,115 +50,78 @@ def _shapeBroadcast(ctxt, value, name): return broadcastNum -def generateTestInputsHeader(deployer: NetworkDeployer, - test_inputs: List, - inputTypes: Dict, - inputOffsets: Dict, - verbose: Optional[bool] = None) -> str: +def generateTestInputsHeader(deployer: NetworkDeployer, test_inputs: List) -> str: + vectors = [] retStr = "" - inputNames = [deployer.ctxt.lookup(buf.name) for buf in deployer.graph.inputs] - inputTypes = {buf.name: buf._type for buf in inputNames} - - for index, num in enumerate(test_inputs): - - if f"input_{index}" not in inputTypes.keys(): + for index, values in enumerate(test_inputs): + # WIESEP: Correctly handle empty arrays + if np.prod(values.shape) == 0: continue - # WIESEP: Correctly handle empty arrays - if np.prod(num.shape) == 0: + bufferName = f"input_{index}" + + #LMACAN: We have some tests which have extra inputs and this is a hack to circumvent that + if not deployer.ctxt.is_buffer(bufferName): continue - test_inputs[index] -= inputOffsets[f"input_{index}"] + values = _shapeBroadcast(deployer.ctxt, values, bufferName) - broadcastNum = _shapeBroadcast(deployer.ctxt, num, f"input_{index}") + buffer = deployer.ctxt.lookup(bufferName) + typeName = buffer._type.referencedType.typeName + typeWidth = buffer._type.referencedType.typeWidth - data_type = inputTypes[f"input_{index}"] - data_width = inputTypes[f"input_{index}"].referencedType.typeWidth + vectorName = f"testInputVector{index}" + vectors.append(vectorName) - retStr += f"{data_type.referencedType.typeName} testInputVector{index}[] =" + retStr += f"{typeName} {vectorName}[] =" retStr += "{" - if data_type.referencedType.typeName == 'float32_t': - list_str = (", ").join([f'{x}f' if not (np.isinf(x) or np.isnan(x)) else str(x) for x in broadcastNum]) + if typeName == 'float32_t': + list_str = (", ").join([f'{x}f' if not (np.isinf(x) or np.isnan(x)) else str(x) for x in values]) else: - list_str = (", ").join([str(x) for x in broadcastNum]) + list_str = (", ").join([str(x) for x in values]) - # WIESEP: Arrays have to be 4 byte alinged (at lest in banshee) - bytes = len(broadcastNum) * (data_width // 8) + # WIESEP: Arrays have to be 4 byte aligned (at least in banshee) + bytes = (len(values) * typeWidth) // 8 if bytes % 4 != 0: - bytes = 4 * int((bytes / 4 + 1)) - padding = (bytes * 8) // data_width - len(broadcastNum) + paddingBytes = bytes % 4 + paddingElements = paddingBytes * 8 // typeWidth list_str += ", " - list_str += (", ").join([str(0) for x in range(padding)]) + list_str += (", ").join([str(0) for _ in range(paddingElements)]) retStr += list_str retStr += "};\n" - retStr += f"void* testInputVector[{len(inputTypes)}] = " + "{" - retStr += ", ".join([ - f"testInputVector{idx}" for idx, _ in enumerate(test_inputs) - if np.prod(test_inputs[idx].shape) != 0 and f"input_{idx}" in inputTypes.keys() - ]) + retStr += f"void* testInputVector[{len(vectors)}] = {{" + retStr += ", ".join(vectors) retStr += "};\n" - - if verbose: - print('Input:') - for name in inputTypes.keys(): - buf = deployer.ctxt.lookup(name) - print(f" - '{name}': Type: {buf._type.referencedType.typeName}, Offset: {inputOffsets[name]}") - return retStr -def generateTestOutputsHeader(deployer: NetworkDeployer, - test_outputs: List, - signProp: Optional[bool] = None, - verbose: Optional[bool] = None) -> str: - - output_signed = {} - output_n_levels = {} - output_data_type = {} - - if signProp is None: - signProp = False - - if verbose is None: - verbose = False - +def generateTestOutputsHeader(deployer: NetworkDeployer, test_outputs: List[np.ndarray]) -> str: retStr = "" + for index, values in enumerate(test_outputs): + typeName = deployer.ctxt.lookup(f'output_{index}')._type.referencedType.typeName + typeWidth = deployer.ctxt.lookup(f'output_{index}')._type.referencedType.typeWidth - for index, num in enumerate(test_outputs): - output_data_type[f"output_{index}"] = deployer.ctxt.lookup(f'output_{index}')._type - - data_type = output_data_type[f"output_{index}"] - isdatafloat = (data_type.referencedType.typeName == "float32_t") - - output_n_levels[f"output_{index}"] = deployer.ctxt.lookup(f'output_{index}').nLevels - output_signed[f"output_{index}"] = deployer.ctxt.lookup(f'output_{index}')._signed - if signProp and not isdatafloat: - test_outputs[index] -= int( - ((1 - output_signed[f"output_{index}"]) * (output_n_levels[f"output_{index}"] / 2))) - - data_width = data_type.referencedType.typeWidth - retStr += f"#define OUTPUTTYPE {data_type.referencedType.typeName}\n" - if isdatafloat: - retStr += f"#define ISOUTPUTFLOAT 1\n" - else: - retStr += f"#define ISOUTPUTFLOAT 0\n" - retStr += f"{data_type.referencedType.typeName} testOutputVector{index}[] =" + retStr += f"#define OUTPUTTYPE {typeName}\n" + retStr += f"#define ISOUTPUTFLOAT {int(typeName == 'float32_t')}\n" + retStr += f"{typeName} testOutputVector{index}[] =" retStr += "{" - # WIESEP: Arrays have to be 4 byte alinged (at lest in banshee) - if data_type.referencedType.typeName == 'float32_t': - list_str = (", ").join([f'{x}f' if not (np.isinf(x) or np.isnan(x)) else str(x) for x in num]) + values = values.flatten() + + if typeName == "float32_t": + list_str = (", ").join([f'{x}f' if not (np.isinf(x) or np.isnan(x)) else str(x) for x in values]) else: - list_str = (", ").join([str(x) for x in num]) + list_str = (", ").join([str(x) for x in values]) - bytes = len(num) * (data_width // 8) + # WIESEP: Arrays have to be 4 byte aligned (at least in banshee) + bytes = (len(values) * typeWidth) // 8 if bytes % 4 != 0: - bytes = 4 * int((bytes / 4 + 1)) - padding = (bytes * 8) // data_width - len(num) + paddingBytes = bytes % 4 + paddingElements = paddingBytes * 8 // typeWidth list_str += ", " - list_str += (", ").join([str(0) for x in range(padding)]) + list_str += (", ").join([str(0) for _ in range(paddingElements)]) retStr += list_str retStr += "};\n" @@ -166,27 +129,16 @@ def generateTestOutputsHeader(deployer: NetworkDeployer, retStr += f"void* testOutputVector[{len(test_outputs)}] = " + "{" retStr += ", ".join([f"testOutputVector{idx}" for idx, _ in enumerate(test_outputs)]) retStr += "};\n" - - if verbose: - print('Output:') - if signProp: - for (name, buf), (_, n_level), (_, signed) in zip(output_data_type.items(), output_n_levels.items(), - output_signed.items()): - print(f" - '{name}': Type: {buf.referencedType.typeName}, nLevels: {n_level}, Signed: {signed}") - else: - for (name, buf) in output_data_type.items(): - print(f" - '{name}': Type: {buf.referencedType.typeName}") - return retStr -def generateTestNetworkHeader(deployer: NetworkDeployer, platform: DeploymentPlatform) -> str: +def generateTestNetworkHeader(deployer: NetworkDeployer) -> str: retStr = "" retStr += """ - #ifndef __DEEPLOY_HEADER_ - #define __DEEPLOY_HEADER_ + #ifndef __DEEPLOY_HEADER__ + #define __DEEPLOY_HEADER__ #include #include #include @@ -206,13 +158,7 @@ def generateTestNetworkHeader(deployer: NetworkDeployer, platform: DeploymentPla return retStr -def generateTestNetworkImplementation(deployer: NetworkDeployer, - platform: DeploymentPlatform, - verbose: Optional[bool] = None) -> str: - - if verbose is None: - verbose = False - +def generateTestNetworkImplementation(deployer: NetworkDeployer, verbosityCfg: CodeGenVerbosity) -> str: retStr = "" retStr += """#include @@ -230,7 +176,7 @@ def generateTestNetworkImplementation(deployer: NetworkDeployer, retStr += deployer.generateGlobalDefinitionCode() # WIESEP: Mempool assigns section attributes to intermediate buffers to allow . - if isinstance(platform, MemPoolPlatform): + if isinstance(deployer.Platform, MemPoolPlatform): retStr += deployer.generateInferenceInitializationCode() retStr += """ void RunNetwork(__attribute__((unused)) uint32_t core_id, __attribute__((unused)) uint32_t numThreads){ @@ -241,7 +187,7 @@ def generateTestNetworkImplementation(deployer: NetworkDeployer, """ retStr += deployer.generateInferenceInitializationCode() - retStr += deployer.generateFunction(verbose) + retStr += deployer.generateFunction(verbosityCfg) retStr += """ } @@ -308,3 +254,36 @@ def dumpBuffer(buf: VariableBuffer, path: str): if hasattr(buf, "extName"): pathName = os.path.join(path, f"{buf.extName}.hex") dumpBuffer(buf, pathName) + + +def generateTestNetwork(deployer: NetworkDeployer, test_inputs: List[np.ndarray], test_outputs: List[np.ndarray], + dumpdir: str, verbosityCfg: CodeGenVerbosity) -> None: + assert deployer.prepared, "An unprepared deployer was given" + + # Create input and output vectors + os.makedirs(dumpdir, exist_ok = True) + + testInputStr = generateTestInputsHeader(deployer, test_inputs) + with open(f'{dumpdir}/testinputs.h', "w") as f: + f.write(testInputStr) + + testOutputStr = generateTestOutputsHeader(deployer, test_outputs) + with open(f'{dumpdir}/testoutputs.h', "w") as f: + f.write(testOutputStr) + + # Generate code for Network + testNetworkHeaderStr = generateTestNetworkHeader(deployer) + with open(f'{dumpdir}/Network.h', "w") as f: + f.write(testNetworkHeaderStr) + + testNetworkImplementationStr = generateTestNetworkImplementation(deployer, verbosityCfg) + with open(f'{dumpdir}/Network.c', "w") as f: + f.write(testNetworkImplementationStr) + + generateL3HexDump(deployer, os.path.join(f'{dumpdir}', 'hex'), test_inputs, test_outputs) + + clang_format = "{BasedOnStyle: llvm, IndentWidth: 2, ColumnLimit: 160}" + os.system(f'clang-format -i --style="{clang_format}" {dumpdir}/Network.c') + os.system(f'clang-format -i --style="{clang_format}" {dumpdir}/Network.h') + os.system(f'clang-format -i --style="{clang_format}" {dumpdir}/testoutputs.h') + os.system(f'clang-format -i --style="{clang_format}" {dumpdir}/testinputs.h') diff --git a/DeeployTest/testUtils/dmaUtils.py b/DeeployTest/testUtils/dmaUtils.py new file mode 100644 index 0000000000..f9722168d6 --- /dev/null +++ b/DeeployTest/testUtils/dmaUtils.py @@ -0,0 +1,373 @@ +import math +from typing import Dict, List, Optional, Tuple, Type + +import numpy.typing as npt +import onnx_graphsurgeon as gs + +from Deeploy.AbstractDataTypes import BaseType, Pointer, PointerClass +from Deeploy.CommonExtensions.DataTypes import minimalIntegerType +from Deeploy.DeeployTypes import NetworkContext, NetworkDeployer, NodeParser, NodeTemplate, NodeTypeChecker, \ + ONNXLayer, OperatorRepresentation, VariableBuffer +from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy, MemoryLevel +from Deeploy.MemoryLevelExtension.NetworkDeployers.MemoryLevelDeployer import MemoryDeployerWrapper, \ + MemoryPlatformWrapper +from Deeploy.MemoryLevelExtension.OptimizationPasses.MemoryLevelAnnotationPasses import AnnotateDefaultMemoryLevel, \ + AnnotateIOMemoryLevel +from Deeploy.Targets.PULPOpen.Deployer import PULPDeployer +from Deeploy.Targets.PULPOpen.Platform import MemoryPULPPlatform, PULPOptimizer +from Deeploy.Targets.Snitch.Deployer import SnitchDeployer +from Deeploy.Targets.Snitch.Platform import SnitchOptimizer, SnitchPlatform +from Deeploy.TilingExtension.MemoryConstraints import MemoryConstraint, NodeMemoryConstraint, \ + PatternMemoryConstraints, TensorMemoryConstraint +from Deeploy.TilingExtension.MemoryScheduler import MemoryBlock +from Deeploy.TilingExtension.TileConstraint import TileConstraint +from Deeploy.TilingExtension.TilerExtension import MemoryMap, TilerDeployerWrapper, TilingSolution +from Deeploy.TilingExtension.TilingCodegen import AbsoluteHyperRectangle, TilingSchedule, VariableReplacementScheme + +from .tilingUtils import DBOnlyL3Tiler, DBTiler, SBTiler + +memcpyTemplate = NodeTemplate(""" +memcpy((void *)${dest}, (void *)${src}, ${size}); +""") + + +# Same interface as NodeTypeChecker but allow any input type and the +# output type matches the input type. +class MemcpyTypeChecker(NodeTypeChecker): + + def __init__(self): + super().__init__([], []) + + def typeInferOutput(self, ctxt: NetworkContext, node: gs.Node, + operatorRepresentation: OperatorRepresentation) -> NetworkContext: + assert len(node.inputs) == 1 and len(node.outputs) == 1 + buffer_in = ctxt.lookup(node.inputs[0].name) + ctxt.annotateType(node.outputs[0].name, buffer_in._type) + return ctxt + + def typeCheckNodeInputs(self, ctxt: NetworkContext, node: gs.Node) -> bool: + return True + + def typeInferGlobalCtxt(self, ctxt: NetworkContext, node: gs.Node) -> NetworkContext: + # Whatever it has already annotated, it's good + return ctxt + + +class MemcpyTileConstraint(TileConstraint): + + @classmethod + def serializeTilingSolution( + cls, tilingSolution: NodeMemoryConstraint, absoluteOutputCubes: List[AbsoluteHyperRectangle], + targetMemLevel: str, ctxt: NetworkContext, + operatorRepresentation: OperatorRepresentation) -> Tuple[VariableReplacementScheme, TilingSchedule]: + inputLoadSchedule = [{"src": absCube.rectangle} for absCube in absoluteOutputCubes] + outputLoadSchedule = [{"dest": absCube.rectangle} for absCube in absoluteOutputCubes] + inputOffsets, outputOffsets = cls.extractBaseAddr(tilingSolution, targetMemLevel, operatorRepresentation, + ["src", "dest"]) + + def size(abs: AbsoluteHyperRectangle, buffer: VariableBuffer) -> int: + return math.prod(abs.rectangle.dims) * (buffer._type.referencedType.typeWidth // 8) + + buffer_src = ctxt.lookup(operatorRepresentation['src']) + assert isinstance(buffer_src, VariableBuffer) + + replacements: Dict[str, List[int]] = {"size": [size(abs, buffer_src) for abs in absoluteOutputCubes]} + replacement_types = {key: PointerClass(minimalIntegerType(values)) for key, values in replacements.items()} + + return VariableReplacementScheme(replacements, + replacement_types), TilingSchedule(inputOffsets, outputOffsets, + inputLoadSchedule, outputLoadSchedule) + + +class MemcpyParser(NodeParser): + + def parseNode(self, node: gs.Node) -> bool: + return len(node.inputs) == 1 and len(node.outputs) == 1 + + def parseNodeCtxt(self, + ctxt: NetworkContext, + node: gs.Node, + channels_first: bool = True) -> Tuple[NetworkContext, bool]: + assert len(node.inputs) == 1 and len(node.outputs) == 1 + src = ctxt.lookup(node.inputs[0].name) + self.operatorRepresentation['src'] = src.name + self.operatorRepresentation['dest'] = ctxt.lookup(node.outputs[0].name).name + self.operatorRepresentation['size'] = math.prod(src.shape) * (src._type.referencedType.typeWidth // 8) + return ctxt, True + + +class MemcpyLayer(ONNXLayer): + pass + + +def generate_graph(nodeCount: int, shape: Tuple[int, ...], dtype: npt.DTypeLike) -> gs.Graph: + assert nodeCount > 0 + + tensor_in = gs.Variable(name = "input_0", dtype = dtype, shape = shape) + + nodes = [] + for i in range(nodeCount): + tensor_out = gs.Variable(name = f"out_{i}", dtype = dtype, shape = shape) + nodes.append(gs.Node("Memcpy", f"memcpy_{i}", {}, [tensor_in], [tensor_out])) + tensor_in = tensor_out + + return gs.Graph(nodes, [nodes[0].inputs[0]], [nodes[-1].outputs[0]], "dma_test_graph") + + +def generate_tiling(ctxt: NetworkContext, memoryStart: str, memoryOrder: List[str], memoryHierarchy: MemoryHierarchy, + inputShape: Tuple[int, ...], tileShape: Tuple[int, ...], graph: gs.Graph, _type: BaseType, + doublebuffer: bool) -> Tuple[TilingSolution, MemoryMap]: + assert memoryStart in memoryOrder + memoryStartIndex = memoryOrder.index(memoryStart) + + if memoryStartIndex + 1 < len(memoryOrder): + memoryMultibuffer = memoryOrder[memoryOrder.index(memoryStart) + 1] + else: + memoryMultibuffer = None + + if memoryStartIndex + 2 < len(memoryOrder): + singleTileMemories = memoryOrder[memoryStartIndex + 2:] + else: + singleTileMemories = [] + + inputSize = math.prod(inputShape) + tileSize = math.prod(tileShape) + + def assertFitsInMemory(size: int, memory: str) -> None: + memorySize = memoryHierarchy.memoryLevels[memory].size + assert size <= memorySize, f"The required tensor space is too big for the {memory} memory. Required space: {size}, memory space: {memorySize}" + + inputSizeInBytes = inputSize * (_type.typeWidth // 8) + assertFitsInMemory(2 * inputSizeInBytes, memoryStart) + + tileSizeInBytes = tileSize * (_type.typeWidth // 8) + for memory in singleTileMemories: + assertFitsInMemory(2 * tileSizeInBytes, memory) + + if doublebuffer: + multiBufferCoefficient = 2 + else: + multiBufferCoefficient = 1 + + multibufferSizeInBytes = tileSizeInBytes * multiBufferCoefficient + if memoryMultibuffer is not None: + assertFitsInMemory(multibufferSizeInBytes + tileSizeInBytes, memoryMultibuffer) + + inputMultibufferAddrSpace = (0, multibufferSizeInBytes) + outputMultibufferAddrSpace = (multibufferSizeInBytes, 2 * multibufferSizeInBytes) + + inputTileAddrSpace = (0, tileSizeInBytes) + outputTileAddrSpace = (tileSizeInBytes, 2 * tileSizeInBytes) + + # Tiling Solution + + tilingSolution = [] + + def generateMemoryConstraint(memory: str, shape: Tuple[int, ...], multiBufferCoefficient: int, + addrSpace: Optional[Tuple[int, int]]) -> MemoryConstraint: + size = math.prod(shape) + mc = MemoryConstraint(memory, size) + mc.shape = shape + mc.multiBufferCoefficient = multiBufferCoefficient + if addrSpace is not None: + mc.addrSpace = addrSpace + return mc + + for node in graph.nodes: + inputMemoryConstraints = {} + outputMemoryConstraints = {} + for i, memory in enumerate(memoryOrder[memoryOrder.index(memoryStart):]): + if i == 0: + inputMc = generateMemoryConstraint(memory = memory, + shape = inputShape, + multiBufferCoefficient = 1, + addrSpace = None) + outputMc = generateMemoryConstraint(memory = memory, + shape = inputShape, + multiBufferCoefficient = 1, + addrSpace = None) + elif i == 1: + inputMc = generateMemoryConstraint(memory = memory, + shape = tileShape, + multiBufferCoefficient = multiBufferCoefficient, + addrSpace = inputMultibufferAddrSpace) + outputMc = generateMemoryConstraint(memory = memory, + shape = tileShape, + multiBufferCoefficient = multiBufferCoefficient, + addrSpace = outputMultibufferAddrSpace) + else: + inputMc = generateMemoryConstraint(memory = memory, + shape = tileShape, + multiBufferCoefficient = 1, + addrSpace = inputTileAddrSpace) + outputMc = generateMemoryConstraint(memory = memory, + shape = tileShape, + multiBufferCoefficient = 1, + addrSpace = outputTileAddrSpace) + inputMemoryConstraints[memory] = inputMc + outputMemoryConstraints[memory] = outputMc + + inputTensorMemoryConstraint = TensorMemoryConstraint(tensorName = node.inputs[0].name, + constraints = inputMemoryConstraints, + ctxt = ctxt) + + outputTensorMemoryConstraint = TensorMemoryConstraint(tensorName = node.outputs[0].name, + constraints = outputMemoryConstraints, + ctxt = ctxt) + + nodeMemoryConstraint = NodeMemoryConstraint() + nodeMemoryConstraint.addTensorConstraint(inputTensorMemoryConstraint, 'input') + nodeMemoryConstraint.addTensorConstraint(outputTensorMemoryConstraint, 'output') + + patternMemoryConstraints = PatternMemoryConstraints() + patternMemoryConstraints.addConstraint(nodeMemoryConstraint) + + tilingSolution.append(patternMemoryConstraints) + + # Memory Map + + # Initialize an empty memory map + memoryMap = {memory: [[] for _ in range(len(graph.nodes) + 1)] for memory in memoryOrder} + + # Set memoryStart memory + + def appendMemoryMapStart(tensorName: str, lifetime: Tuple[int, int], addrSpace: Tuple[int, int]) -> None: + memoryMap[memoryStart][-1].append(MemoryBlock(tensorName, memoryStart, lifetime, addrSpace)) + + addrSpacePing = (0, inputSizeInBytes) + addrSpacePong = (inputSizeInBytes, 2 * inputSizeInBytes) + + ## First input tensor has a special lifetime (0, 0) + appendMemoryMapStart(graph.nodes[0].inputs[0].name, (0, 0), addrSpacePing) + + for i, node in enumerate(graph.nodes): + # Start with addrSpacePong because we used "Ping" for the first input tensor + appendMemoryMapStart(node.outputs[0].name, (i, i + 1), addrSpacePong if i % 2 == 0 else addrSpacePing) + + ## Set the rest + + def setMemoryMapRest(memory: str, inputAddrSpace: Tuple[int, int], outputAddrSpace: Tuple[int, int]) -> None: + for i, node in enumerate(graph.nodes): + # Empirically concluded from looking at produced memory maps + if i + 1 == len(graph.nodes): + endLifetime = i + 1 + else: + endLifetime = i + + memoryMap[memory][i].extend([ + MemoryBlock(name = node.inputs[0].name, level = memory, lifetime = (i, i), addrSpace = inputAddrSpace), + MemoryBlock(name = node.outputs[0].name, + level = memory, + lifetime = (i, endLifetime), + addrSpace = outputAddrSpace), + ]) + + if memoryMultibuffer is not None: + setMemoryMapRest(memoryMultibuffer, inputMultibufferAddrSpace, outputMultibufferAddrSpace) + + for memory in singleTileMemories: + setMemoryMapRest(memory, inputTileAddrSpace, outputTileAddrSpace) + + return tilingSolution, memoryMap + + +def defaultScheduler(graph: gs.Graph) -> List[List[gs.Node]]: + return [[node] for node in graph.nodes] + + +def setup_pulp_deployer(defaultMemory: str, targetMemory: str, graph: gs.Graph, inputTypes: Dict[str, Type[Pointer]], + doublebuffer: bool, deeployStateDir: str) -> NetworkDeployer: + L3 = MemoryLevel(name = "L3", neighbourNames = ["L2"], size = 64000000) + L2 = MemoryLevel(name = "L2", neighbourNames = ["L3", "L1"], size = 1024000) + L1 = MemoryLevel(name = "L1", neighbourNames = ["L2"], size = 64000) + memoryLevels = [L3, L2, L1] + memoryLevelMap = {mem.name: mem for mem in memoryLevels} + + assert defaultMemory in memoryLevelMap, f"defaultMemory {defaultMemory} is not part of PULP's memory hierarchy {list(memoryLevelMap.keys())}" + assert targetMemory in memoryLevelMap, f"targetMemory {targetMemory} is not part of PULP's memory hierarchy {list(memoryLevelMap.keys())}" + + memoryHierarchy = MemoryHierarchy(memoryLevels) + memoryHierarchy.setDefaultMemoryLevel(defaultMemory) + + platform = MemoryPULPPlatform(memoryHierarchy, memoryLevelMap[targetMemory]) + + deployer = PULPDeployer(graph, + platform, + inputTypes, + PULPOptimizer, + defaultScheduler, + default_channels_first = True, + deeployStateDir = deeployStateDir) + + memoryLevelAnnotationPasses = [AnnotateIOMemoryLevel(defaultMemory), AnnotateDefaultMemoryLevel(memoryHierarchy)] + # Make the deployer memory-level aware + deployer = MemoryDeployerWrapper(deployer, memoryLevelAnnotationPasses) + + if doublebuffer: + assert defaultMemory in ["L3", "L2"] + if defaultMemory == "L3": + deployer = TilerDeployerWrapper(deployer, DBOnlyL3Tiler) + else: + deployer = TilerDeployerWrapper(deployer, DBTiler) + else: + deployer = TilerDeployerWrapper(deployer, SBTiler) + + return deployer + + +def setup_snitch_deployer(defaultMemory: str, targetMemory: str, graph: gs.Graph, inputTypes: Dict[str, Type[Pointer]], + doublebuffer: bool, deeployStateDir: str) -> NetworkDeployer: + L3 = MemoryLevel(name = "L3", neighbourNames = ["L2"], size = 64000000) + L2 = MemoryLevel(name = "L2", neighbourNames = ["L3", "L1"], size = 1024000) + L1 = MemoryLevel(name = "L1", neighbourNames = ["L2"], size = 64000) + memoryLevels = [L3, L2, L1] + memoryLevelMap = {mem.name: mem for mem in memoryLevels} + + assert defaultMemory in memoryLevelMap, f"defaultMemory {defaultMemory} is not part of PULP's memory hierarchy {list(memoryLevelMap.keys())}" + assert targetMemory in memoryLevelMap, f"targetMemory {targetMemory} is not part of PULP's memory hierarchy {list(memoryLevelMap.keys())}" + + memoryHierarchy = MemoryHierarchy(memoryLevels) + memoryHierarchy.setDefaultMemoryLevel(defaultMemory) + + platform = SnitchPlatform() + platform = MemoryPlatformWrapper(platform, memoryHierarchy, memoryLevelMap[targetMemory]) + + deployer = SnitchDeployer(graph, + platform, + inputTypes, + SnitchOptimizer, + defaultScheduler, + deeployStateDir = deeployStateDir) + memoryLevelAnnotationPasses = [AnnotateIOMemoryLevel(defaultMemory), AnnotateDefaultMemoryLevel(memoryHierarchy)] + # Make the deployer memory-level aware + deployer = MemoryDeployerWrapper(deployer, memoryLevelAnnotationPasses) + + assert defaultMemory == "L2" + if doublebuffer: + deployer = TilerDeployerWrapper(deployer, DBTiler) + else: + deployer = TilerDeployerWrapper(deployer, SBTiler) + + return deployer + + +def prepare_deployer_with_custom_tiling(deployer: NetworkDeployer, defaultMemory: str, targetMemory: str, + tileShape: Tuple[int, ...], doublebuffer: bool) -> None: + # Decomposed deployer.prepare() to enter a custom tiling solution + deployer.frontEnd() + super(TilerDeployerWrapper, deployer).bind() + + tilingSolution, memoryMap = generate_tiling( + ctxt = deployer.ctxt, + memoryStart = defaultMemory, + memoryOrder = [defaultMemory, targetMemory], + memoryHierarchy = deployer.Platform.memoryHierarchy, + inputShape = deployer.graph.inputs[0].shape, + tileShape = tileShape, + graph = deployer.graph, + _type = deployer.inputTypes['input_0'].referencedType, + doublebuffer = doublebuffer, + ) + deployer.tile(tilingSolution, memoryMap) + deployer.backEnd() + deployer.prepared = True diff --git a/DeeployTest/testUtils/platformMapping.py b/DeeployTest/testUtils/platformMapping.py index 3e9639a688..56cd3d7d28 100644 --- a/DeeployTest/testUtils/platformMapping.py +++ b/DeeployTest/testUtils/platformMapping.py @@ -23,10 +23,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable, Dict, Optional, Tuple, Union +from typing import Callable, Dict, Optional, Tuple, Type, Union import onnx_graphsurgeon as gs +from Deeploy.AbstractDataTypes import Pointer from Deeploy.DeeployTypes import DeploymentPlatform, NetworkDeployer, TopologyOptimizer from Deeploy.MemoryLevelExtension.MemoryLevels import MemoryHierarchy, MemoryLevel from Deeploy.MemoryLevelExtension.NetworkDeployers.MemoryLevelDeployer import MemoryPlatform, MemoryPlatformWrapper @@ -111,7 +112,7 @@ def setupMemoryPlatform(platform: DeploymentPlatform, memoryHierarchy: MemoryHie def mapDeployer(platform: DeploymentPlatform, graph: gs.Graph, - inputTypes: Dict[str, type], + inputTypes: Dict[str, Type[Pointer]], loweringOptimizer: Optional[TopologyOptimizer] = None, scheduler: Optional[Callable] = None, name: Optional[str] = None, diff --git a/DeeployTest/testUtils/testRunner.py b/DeeployTest/testUtils/testRunner.py index a3dc6a7189..353856c2ca 100644 --- a/DeeployTest/testUtils/testRunner.py +++ b/DeeployTest/testUtils/testRunner.py @@ -322,6 +322,7 @@ def __init__(self, self.gen_args = gen_args self._dir_gen_root = f'TEST_{platform.upper()}' + assert self._args.toolchain_install_dir is not None, f"Environment variable LLVM_INSTALL_DIR is not set" self._dir_toolchain = os.path.normpath(self._args.toolchain_install_dir) self._dir_build = f"{self._dir_gen_root}/build" self._dir_gen, self._dir_test, self._name_test = getPaths(self._args.dir, self._dir_gen_root) diff --git a/DeeployTest/testUtils/tilingUtils.py b/DeeployTest/testUtils/tilingUtils.py new file mode 100644 index 0000000000..78a9bbcdd8 --- /dev/null +++ b/DeeployTest/testUtils/tilingUtils.py @@ -0,0 +1,41 @@ +from typing import List, Union + +from ortools.constraint_solver.pywrapcp import IntVar + +from Deeploy.DeeployTypes import NetworkContext, SubGraph, TransientBuffer +from Deeploy.TilingExtension.TilerExtension import Tiler +from Deeploy.TilingExtension.TilerModel import TilerModel + + +class DBOnlyL3Tiler(Tiler): + + def multiBufferStrategy(self, tilerModel: TilerModel, ctxt: NetworkContext, pattern: SubGraph, path: List[str], + hop: str, tensorName: str) -> Union[int, IntVar]: + buffer = ctxt.lookup(tensorName) + + if isinstance(buffer, TransientBuffer): + return 1 + + if hop == 'L1': + return 1 + + return 2 + + +class DBTiler(Tiler): + + def multiBufferStrategy(self, tilerModel: TilerModel, ctxt: NetworkContext, pattern: SubGraph, path: List[str], + hop: str, tensorName: str) -> Union[int, IntVar]: + buffer = ctxt.lookup(tensorName) + + if isinstance(buffer, TransientBuffer): + return 1 + + return 2 + + +class SBTiler(Tiler): + + def multiBufferStrategy(self, tilerModel: TilerModel, ctxt: NetworkContext, pattern: SubGraph, path: List[str], + hop: str, tensorName: str) -> Union[int, IntVar]: + return 1 diff --git a/DeeployTest/testUtils/typeMapping.py b/DeeployTest/testUtils/typeMapping.py index a551b25150..15242c93ea 100644 --- a/DeeployTest/testUtils/typeMapping.py +++ b/DeeployTest/testUtils/typeMapping.py @@ -23,15 +23,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple -from typing import List +from typing import Tuple, Type import numpy as np +import numpy.typing as npt -from Deeploy.AbstractDataTypes import PointerClass -from Deeploy.CommonExtensions.DataTypes import FloatDataTypes, IntegerDataTypes, int8_t - -offsetType = namedtuple("offsetType", ("type", "offset")) +from Deeploy.AbstractDataTypes import BaseType, IntegerImmediate, Pointer, PointerClass +from Deeploy.CommonExtensions.DataTypes import FloatDataTypes, IntegerDataTypes, float32_t, int8_t, int16_t, int32_t, \ + minimalFloatType, minimalIntegerType, uint8_t, uint16_t, uint32_t _ALL_DTYPES: dict[str, type] = {t.typeName: t for t in (*IntegerDataTypes, *FloatDataTypes)} @@ -60,34 +59,36 @@ def parseDataType(name: str) -> type: return _ALL_DTYPES[name] -def isInteger(_input: np.array) -> bool: - if np.abs((_input.astype(int) - _input)).max() > 0.001: - return False - return True +def isInteger(x: npt.NDArray) -> bool: + return np.abs((x.astype(int) - x)).max() <= 0.001 + + +def inferMinimalType(values: np.ndarray, default: Type[BaseType] = int8_t) -> Type[BaseType]: + # WIESEP: We cannot do type inference for empty arrays. + if np.prod(values.shape) == 0: + print(f"Warning: Empty input array for type inference for {values}!") + return default + + if isInteger(values): + return minimalIntegerType(values) + else: + return minimalFloatType(values) -def isUnsigned(_input: np.array) -> bool: - if (_input).min() < 0: - return False - return True +def signPropTypeAndOffset(_type: Type[IntegerImmediate]) -> Tuple[Type[IntegerImmediate], int]: + if _type.signed: + return _type, 0 + unsigned2signed = { + unsigned.typeName: signed for unsigned, signed in zip([t for t in IntegerDataTypes if t.typeMin == 0 + ], [t for t in IntegerDataTypes if t.typeMin < 0]) + } -def dataWidth(n): - count = 0 - n = np.abs(int(n - 1)) - while (n > 0): - count += 1 - n = n >> 8 - ret = 2**(count + 2) - if ret < 8: - ret = 8 - return ret + signedType = unsigned2signed[_type.typeName] + return signedType, 2**(signedType.typeWidth - 1) -def inferInputType(values: np.ndarray, - signProp: bool = False, - defaultType = int8_t, - defaultOffset = 0) -> List[offsetType]: +def inferTypeAndOffset(values: np.ndarray, signProp: bool = False) -> Tuple[Type[Pointer], int]: """Infers the data type of the provided input array. Parameters @@ -97,50 +98,55 @@ def inferInputType(values: np.ndarray, signProp : bool Whether to consider signedness when inferring the data type. - - defaultType : type - The default data type to use if inference fails. - - defaultOffset : int - The default offset to use if inference fails. - Returns ------- - List[offsetType] - A list of inferred data types and their corresponding offsets. + Tuple[Type[BaseType], int] + The inferred type and offset """ - # WIESEP: We cannot do type inference for empty arrays. - if np.prod(values.shape) == 0: - print(f"Warning: Empty input array for type inference for {values}!") - return [(defaultType, defaultOffset)] - - signedPlatformTypes = [_type for _type in IntegerDataTypes if _type.typeMin < 0] - - matchingTypes = [] - - # There is implicit knowledge encoded in the order of the checks (i.e. first unsigned, signed - # and then float). - if signProp and isUnsigned(values) and isInteger(values): - for _type in sorted(signedPlatformTypes, key = lambda x: x.typeWidth): - signPropOffset = (2**(_type.typeWidth - 1)) - if _type.checkPromotion(values - signPropOffset): - matchingTypes.append(offsetType(PointerClass(_type), signPropOffset)) - elif isInteger(values): - sorted_types = sorted( - IntegerDataTypes, - key = lambda t: (t.typeWidth, t.typeMin < 0), - ) - - for _type in sorted_types: - if _type.checkPromotion(values): - matchingTypes.append(offsetType(PointerClass(_type), 0)) - else: - for _type in sorted(FloatDataTypes, key = lambda x: x.typeWidth): - if _type.checkPromotion(values): - matchingTypes.append(offsetType(PointerClass(_type), 0)) + _type = inferMinimalType(values) - if not matchingTypes: - raise RuntimeError("Could not find a matching type!") - - return matchingTypes + if signProp and issubclass(_type, IntegerImmediate): + _type, offset = signPropTypeAndOffset(_type) + else: + offset = 0 + + return PointerClass(_type), offset + + +def baseTypeFromName(name: str) -> Type[BaseType]: + if name == "int8_t": + return int8_t + elif name == "uint8_t": + return uint8_t + elif name == "int16_t": + return int16_t + elif name == "uint16_t": + return uint16_t + elif name == "int32_t": + return int32_t + elif name == "uint32_t": + return uint32_t + elif name == "float32_t": + return float32_t + else: + raise RuntimeError(f"Unrecognized name {name}") + + +def dtypeFromDeeployType(_ty: Type[BaseType]) -> npt.DTypeLike: + if _ty == int8_t: + return np.int8 + elif _ty == uint8_t: + return np.uint8 + elif _ty == int16_t: + return np.int16 + elif _ty == uint16_t: + return np.uint16 + elif _ty == int32_t: + return np.int32 + elif _ty == uint32_t: + return np.uint32 + elif _ty == float32_t: + return np.float32 + else: + raise RuntimeError(f"Unimplemented conversion for type {_ty.typeName}") diff --git a/TargetLibraries/PULPOpen/CMakeLists.txt b/TargetLibraries/PULPOpen/CMakeLists.txt index 30bc9aa094..bf67dfca01 100644 --- a/TargetLibraries/PULPOpen/CMakeLists.txt +++ b/TargetLibraries/PULPOpen/CMakeLists.txt @@ -3,7 +3,7 @@ file(GLOB_RECURSE SOURCES ) if(NOT DEFINED ENV{PULP_SDK_HOME}) - message(FATAL_ERROR "Environemnt variable PULP_SDK_HOME not set.") + message(FATAL_ERROR "Environment variable PULP_SDK_HOME not set.") endif() if(platform STREQUAL "Siracusa" OR platform STREQUAL "Siracusa_w_neureka") diff --git a/TargetLibraries/PULPOpen/inc/dory_dma.h b/TargetLibraries/PULPOpen/inc/dory_dma.h deleted file mode 100644 index 9b2c4259ab..0000000000 --- a/TargetLibraries/PULPOpen/inc/dory_dma.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * dory.h - * Alessio Burrello - * - * Copyright (C) 2019-2020 University of Bologna - * - * SPDX-License-Identifier: Apache-2.0 - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _DORY_DMA_H -#define _DORY_DMA_H - -typedef struct { - void *ext; - void *loc; - unsigned short hwc_to_chw; - unsigned short stride_2d; - unsigned short number_of_2d_copies; - unsigned short stride_1d; - unsigned short number_of_1d_copies; - unsigned int length_1d_copy; - unsigned int mchan_cmd; - int dir; // 0 l1->l2, 1 l2->l1 - int tid; -} DMA_copy; - -void dory_dma_memcpy_hwc_to_chw(DMA_copy *copy); - -void dory_dma_memcpy_1d_async(DMA_copy *copy); - -void dory_dma_memcpy_2d_async(DMA_copy *copy); - -void dory_dma_memcpy_3d_async(DMA_copy *copy); - -void dory_dma_memcpy_async(DMA_copy *copy); - -void dory_dma_free(DMA_copy *copy); - -void dory_dma_barrier(DMA_copy *copy); - -int dory_dma_allocate(); -#endif diff --git a/TargetLibraries/PULPOpen/inc/mchan.h b/TargetLibraries/PULPOpen/inc/mchan.h deleted file mode 100644 index cd7c2ee799..0000000000 --- a/TargetLibraries/PULPOpen/inc/mchan.h +++ /dev/null @@ -1,161 +0,0 @@ -/* ===================================================================== - * Title: mchan.h - * Description: - * - * $Date: 26.07.2024 - * - * ===================================================================== */ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - - * Adopted from PULP-SDK (https://github.com/pulp-platform/pulp-sdk), released - under Apache 2.0 - - */ - -#ifndef _MCHAN_H -#define _MCHAN_H - -// Requires to have MCHAN_BASE_ADDR, MCHAN_EVENT defined outside of header -#ifndef MCHAN_BASE_ADDR -#error "[mchan.h] MCHAN_BASE_ADDR not defined!" -#endif - -#if !defined(MCHAN_EVENT) && !defined(MCHAN_POLLED) -#error "[mchan.h] Nor MCHAN_EVENT nor MCHAN_POLLED defined!" -#endif - -#if defined(MCHAN_EVENT) && !defined(MCHAN_EVENT_BIT) -#error \ - "[mchan.h] MCHAN_EVENT_BIT should be defined when using events as signalization!" -#endif - -#include "pmsis.h" - -#define MCHAN_CMD_OFFSET 0 -#define MCHAN_STATUS_OFFSET 4 - -#define MCHAN_CMD_ADDR (MCHAN_BASE_ADDR + MCHAN_CMD_OFFSET) -#define MCHAN_STATUS_ADDR (MCHAN_BASE_ADDR + MCHAN_STATUS_OFFSET) - -#define READ_REG(addr) (*(volatile int *)(addr)) -#define WRITE_REG(addr, value) \ - do { \ - *(volatile int *)(addr) = (int)value; \ - } while (0) - -#define MCHAN_READ_CMD() READ_REG(MCHAN_CMD_ADDR) -#define MCHAN_WRITE_CMD(value) WRITE_REG(MCHAN_CMD_ADDR, value) - -#define MCHAN_READ_STATUS() READ_REG(MCHAN_STATUS_ADDR) -#define MCHAN_WRITE_STATUS(value) WRITE_REG(MCHAN_STATUS_ADDR, value) - -// MCHAN version 7 has 1 more bit for the transfer length, so all the flag -// offsets are shifted by 1. Also, LOC (TCDM) striding is not supported in v6. -#if MCHAN_VERSION == 7 -#define MCHAN_TRANSFER_LEN_SIZE (17) -#else -#define MCHAN_TRANSFER_LEN_SIZE (16) -#endif - -#define MCHAN_CMD_FLAG_DIRECTION_LOC2EXT (0 << (MCHAN_TRANSFER_LEN_SIZE + 0)) -#define MCHAN_CMD_FLAG_DIRECTION_EXT2LOC (1 << (MCHAN_TRANSFER_LEN_SIZE + 0)) -#define MCHAN_CMD_FLAG_INCREMENTAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 1)) -#define MCHAN_CMD_FLAG_2D_TRANSFER_EXTERNAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 2)) -#define MCHAN_CMD_FLAG_EVENT_ENABLE (1 << (MCHAN_TRANSFER_LEN_SIZE + 3)) -#define MCHAN_CMD_FLAG_INTERRUPT_ENABLE (1 << (MCHAN_TRANSFER_LEN_SIZE + 4)) -#define MCHAN_CMD_FLAG_BROADCAST_FINISH (1 << (MCHAN_TRANSFER_LEN_SIZE + 5)) -#if MCHAN_VERSION == 7 -#define MCHAN_CMD_FLAG_2D_TRANSFER_LOCAL \ - (1 << (MCHAN_TRANSFER_LEN_SIZE + 6)) // can only be used with MCHAN v7 -#endif -#define MCHAN_CMD_SHIFT_DIRECTION MCHAN_TRANSFER_LEN_SIZE - -#define MCHAN_CMD(len, dir, inc, loc_2d, ext_2d, int_en, event_en, broadcast) \ - (len | dir | inc | loc_2d | ext_2d | broadcast | int_en | event_en) - -typedef enum { - MCHAN_DMA_TRANSFER_DIRECTION_EXT2LOC = MCHAN_CMD_FLAG_DIRECTION_EXT2LOC, - MCHAN_DMA_TRANSFER_DIRECTION_LOC2EXT = MCHAN_CMD_FLAG_DIRECTION_LOC2EXT -} mchan_dma_transfer_direction_e; - -typedef struct { - int cmd; - int size; - - void *loc; - int loc_size_1d; - int loc_stride_1d; - - void *ext; - int ext_size_1d; - int ext_stride_1d; -} mchan_transfer_t; - -static int mchan_transfer_get_id() { return MCHAN_READ_CMD(); } - -static void mchan_transfer_push_1d(mchan_transfer_t trans) { - MCHAN_WRITE_CMD(trans.cmd); - MCHAN_WRITE_CMD(trans.loc); - MCHAN_WRITE_CMD(trans.ext); -} - -static void mchan_transfer_push_2d(mchan_transfer_t trans) { - MCHAN_WRITE_CMD(trans.cmd); - MCHAN_WRITE_CMD(trans.loc); - MCHAN_WRITE_CMD(trans.ext); -// MCHAN version 7 takes 2D "count" (length of 1D transfers) and stride in 2 -// steps, v7 takes it in 1 step with the stride shifted to the upper 16 bits. -#if MCHAN_VERSION == 7 - MCHAN_WRITE_CMD(trans.ext_size_1d); - MCHAN_WRITE_CMD(trans.ext_stride_1d); -#else - MCHAN_WRITE_CMD(trans.ext_size_1d | (trans.ext_stride_1d << 16)); -#endif -} - -static void mchan_transfer_push(mchan_transfer_t trans) { - MCHAN_WRITE_CMD(trans.cmd); - MCHAN_WRITE_CMD(trans.loc); - MCHAN_WRITE_CMD(trans.ext); - - if (trans.ext_size_1d < trans.size) { - MCHAN_WRITE_CMD(trans.ext_size_1d); - MCHAN_WRITE_CMD(trans.ext_stride_1d); - } - - if (trans.loc_size_1d < trans.size) { - MCHAN_WRITE_CMD(trans.loc_size_1d); - MCHAN_WRITE_CMD(trans.loc_stride_1d); - } -} - -static void mchan_transfer_free(int tid) { MCHAN_WRITE_STATUS(1 << tid); } - -static int mchan_transfer_busy(int tid) { - return MCHAN_READ_STATUS() & (1 << tid); -} - -static void mchan_transfer_wait(int tid) { -#if defined(MCHAN_EVENT) - while (mchan_transfer_busy(tid)) - eu_evt_maskWaitAndClr(1 << MCHAN_EVENT_BIT); -#elif defined(MCHAN_POLLED) - while (mchan_transfer_busy(tid)) - ; -#endif -} - -#endif diff --git a/TargetLibraries/PULPOpen/inc/mchan_siracusa.h b/TargetLibraries/PULPOpen/inc/mchan_siracusa.h new file mode 100644 index 0000000000..2d44d7b29e --- /dev/null +++ b/TargetLibraries/PULPOpen/inc/mchan_siracusa.h @@ -0,0 +1,15 @@ +// Default mchan base address +#ifndef MCHAN_BASE_ADDR +#define MCHAN_BASE_ADDR (ARCHI_MCHAN_DEMUX_ADDR) // CLUSTER_MCHAN_ADDR +#endif + +// Default mchan await mode +#if !defined(MCHAN_EVENT) && !defined(MCHAN_POLLED) +#define MCHAN_EVENT +#endif + +#ifdef MCHAN_EVENT +#define MCHAN_EVENT_BIT (ARCHI_CL_EVT_DMA0) // 8 +#endif + +#include "mchan_v7.h" diff --git a/TargetLibraries/PULPOpen/inc/mchan_v6.h b/TargetLibraries/PULPOpen/inc/mchan_v6.h new file mode 100644 index 0000000000..ffd4045519 --- /dev/null +++ b/TargetLibraries/PULPOpen/inc/mchan_v6.h @@ -0,0 +1,112 @@ +/* ===================================================================== + * Title: mchan_v6.h + * Description: + * + * $Date: 26.07.2024 + * + * ===================================================================== */ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * Adopted from PULP-SDK (https://github.com/pulp-platform/pulp-sdk), released + under Apache 2.0 + + */ + +#ifndef __MCHAN_V6_H__ +#define __MCHAN_V6_H__ + +// Requires to have MCHAN_BASE_ADDR, MCHAN_EVENT defined outside of header +#ifndef MCHAN_BASE_ADDR +#error "[mchan_v6.h] MCHAN_BASE_ADDR not defined!" +#endif + +#if !defined(MCHAN_EVENT) && !defined(MCHAN_POLLED) +#error "[mchan_v6.h] Nor MCHAN_EVENT nor MCHAN_POLLED defined!" +#endif + +#if defined(MCHAN_EVENT) && defined(MCHAN_POLLED) +#error "[mchan_v6.h] Define either MCHAN_EVENT or MCHAN_POLLED, not both!" +#endif + +#if defined(MCHAN_EVENT) && !defined(MCHAN_EVENT_BIT) +#error \ + "[mchan_v6.h] MCHAN_EVENT_BIT should be defined when using events as signalization!" +#endif + +#if !defined(MCHAN_VERSION) +#define MCHAN_VERSION 6 +#elif MCHAN_VERSION != 6 +#error "[mchan_v6.h] Illegal MCHAN_VERSION. Supported only 6" +#endif + +#include "pmsis.h" + +#define MCHAN_TRANSFER_LEN_SIZE (16) + +#define MCHAN_CMD_FLAG_DIRECTION_LOC2EXT (0 << (MCHAN_TRANSFER_LEN_SIZE + 0)) +#define MCHAN_CMD_FLAG_DIRECTION_EXT2LOC (1 << (MCHAN_TRANSFER_LEN_SIZE + 0)) +#define MCHAN_CMD_FLAG_INCREMENTAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 1)) +#define MCHAN_CMD_FLAG_2D_TRANSFER_EXTERNAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 2)) +#define MCHAN_CMD_FLAG_EVENT_ENABLE (1 << (MCHAN_TRANSFER_LEN_SIZE + 3)) +#define MCHAN_CMD_FLAG_INTERRUPT_ENABLE (1 << (MCHAN_TRANSFER_LEN_SIZE + 4)) +#define MCHAN_CMD_FLAG_BROADCAST_FINISH (1 << (MCHAN_TRANSFER_LEN_SIZE + 5)) + +static volatile uint32_t *const cmd_ptr = + (volatile uint32_t *const)(MCHAN_BASE_ADDR + 0x0); +static volatile uint32_t *const status_ptr = + (volatile uint32_t *const)(MCHAN_BASE_ADDR + 0x4); + +static void mchan_transfer_1d(uint32_t cmd, void *loc, void *ext) { + // TODO: assert flags are set correctly + *cmd_ptr = (uint32_t)cmd; + *cmd_ptr = (uint32_t)loc; + *cmd_ptr = (uint32_t)ext; +} + +static void mchan_transfer_2d_ext_strided(uint32_t cmd, void *loc, void *ext, + uint16_t ext_size_1d, + uint16_t ext_stride_2d) { + // TODO: assert flags are set correctly + *cmd_ptr = (uint32_t)cmd; + *cmd_ptr = (uint32_t)loc; + *cmd_ptr = (uint32_t)ext; + *cmd_ptr = (uint32_t)ext_size_1d | ((uint32_t)ext_stride_2d << 16); +} + +static uint32_t mchan_channel_alloc() { return *cmd_ptr; } + +static void mchan_channel_free(uint32_t channel_id) { + // TODO: assert channel_id is smaller then 32 + *status_ptr = 1 << channel_id; +} + +static uint32_t mchan_channel_is_busy(uint32_t channel_id) { + // TODO: assert channel_id is smaller then 32 + return *status_ptr & (1 << channel_id); +} + +static void mchan_channel_wait(uint32_t channel_id) { + // TODO: assert channel_id is smaller then 32 +#if defined(MCHAN_EVENT) + while (mchan_channel_is_busy(channel_id)) + eu_evt_maskWaitAndClr(1 << MCHAN_EVENT_BIT); +#elif defined(MCHAN_POLLED) + while (mchan_channel_is_busy(channel_id)) + ; +#endif +} + +#endif // __MCHAN_V6_H__ diff --git a/TargetLibraries/PULPOpen/inc/mchan_v7.h b/TargetLibraries/PULPOpen/inc/mchan_v7.h new file mode 100644 index 0000000000..8f14a0efdc --- /dev/null +++ b/TargetLibraries/PULPOpen/inc/mchan_v7.h @@ -0,0 +1,138 @@ +/* ===================================================================== + * Title: mchan_v7.h + * Description: + * + * $Date: 26.07.2024 + * + * ===================================================================== */ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * Adopted from PULP-SDK (https://github.com/pulp-platform/pulp-sdk), released + under Apache 2.0 + + */ + +#ifndef __MCHAN_V7_H__ +#define __MCHAN_V7_H__ + +// Requires to have MCHAN_BASE_ADDR, MCHAN_EVENT defined outside of header +#ifndef MCHAN_BASE_ADDR +#error "[mchan_v7.h] MCHAN_BASE_ADDR not defined!" +#endif + +#if !defined(MCHAN_EVENT) && !defined(MCHAN_POLLED) +#error "[mchan_v7.h] Nor MCHAN_EVENT nor MCHAN_POLLED defined!" +#endif + +#if defined(MCHAN_EVENT) && defined(MCHAN_POLLED) +#error "[mchan_v7.h] Define either MCHAN_EVENT or MCHAN_POLLED, not both!" +#endif + +#if defined(MCHAN_EVENT) && !defined(MCHAN_EVENT_BIT) +#error \ + "[mchan_v7.h] MCHAN_EVENT_BIT should be defined when using events as signalization!" +#endif + +#if !defined(MCHAN_VERSION) +#define MCHAN_VERSION 7 +#elif MCHAN_VERSION != 7 +#error "[mchan_v7.h] Illegal MCHAN_VERSION. Supported only 7" +#endif + +#include "pmsis.h" + +#define MCHAN_TRANSFER_LEN_SIZE (17) + +#define MCHAN_CMD_FLAG_DIRECTION_LOC2EXT (0 << (MCHAN_TRANSFER_LEN_SIZE + 0)) +#define MCHAN_CMD_FLAG_DIRECTION_EXT2LOC (1 << (MCHAN_TRANSFER_LEN_SIZE + 0)) +#define MCHAN_CMD_FLAG_INCREMENTAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 1)) +#define MCHAN_CMD_FLAG_2D_TRANSFER_EXTERNAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 2)) +#define MCHAN_CMD_FLAG_EVENT_ENABLE (1 << (MCHAN_TRANSFER_LEN_SIZE + 3)) +#define MCHAN_CMD_FLAG_INTERRUPT_ENABLE (1 << (MCHAN_TRANSFER_LEN_SIZE + 4)) +#define MCHAN_CMD_FLAG_BROADCAST_FINISH (1 << (MCHAN_TRANSFER_LEN_SIZE + 5)) +#define MCHAN_CMD_FLAG_2D_TRANSFER_LOCAL (1 << (MCHAN_TRANSFER_LEN_SIZE + 6)) + +static volatile uint32_t *const cmd_ptr = + (volatile uint32_t *const)(MCHAN_BASE_ADDR + 0x0); +static volatile uint32_t *const status_ptr = + (volatile uint32_t *const)(MCHAN_BASE_ADDR + 0x4); + +static void mchan_transfer_1d(uint32_t cmd, void *loc, void *ext) { + // TODO: assert flags are set correctly + *cmd_ptr = (uint32_t)cmd; + *cmd_ptr = (uint32_t)loc; + *cmd_ptr = (uint32_t)ext; +} + +static void mchan_transfer_2d_loc_strided(uint32_t cmd, void *loc, void *ext, + uint32_t loc_size_1d, + uint32_t loc_stride_2d) { + // TODO: assert flags are set correctly + *cmd_ptr = (uint32_t)cmd; + *cmd_ptr = (uint32_t)loc; + *cmd_ptr = (uint32_t)ext; + *cmd_ptr = (uint32_t)loc_size_1d; + *cmd_ptr = (uint32_t)loc_stride_2d; +} + +static void mchan_transfer_2d_ext_strided(uint32_t cmd, void *loc, void *ext, + uint32_t ext_size_1d, + uint32_t ext_stride_2d) { + // TODO: assert flags are set correctly + *cmd_ptr = (uint32_t)cmd; + *cmd_ptr = (uint32_t)loc; + *cmd_ptr = (uint32_t)ext; + *cmd_ptr = (uint32_t)ext_size_1d; + *cmd_ptr = (uint32_t)ext_stride_2d; +} + +static void mchan_transfer_2d_loc_strided_ext_strided( + uint32_t cmd, void *loc, void *ext, uint32_t loc_size_1d, + uint32_t loc_stride_2d, uint32_t ext_size_1d, uint32_t ext_stride_2d) { + // TODO: assert flags are set correctly + *cmd_ptr = (uint32_t)cmd; + *cmd_ptr = (uint32_t)loc; + *cmd_ptr = (uint32_t)ext; + *cmd_ptr = (uint32_t)ext_size_1d; + *cmd_ptr = (uint32_t)ext_stride_2d; + *cmd_ptr = (uint32_t)loc_size_1d; + *cmd_ptr = (uint32_t)loc_stride_2d; +} + +static uint32_t mchan_channel_alloc() { return *cmd_ptr; } + +static void mchan_channel_free(uint32_t channel_id) { + // TODO: assert tid is smaller then 32 + *status_ptr = 1 << channel_id; +} + +static uint32_t mchan_channel_is_busy(uint32_t channel_id) { + // TODO: assert tid is smaller then 32 + return *status_ptr & (1 << channel_id); +} + +static void mchan_channel_wait(uint32_t channel_id) { + // TODO: assert tid is smaller then 32 +#if defined(MCHAN_EVENT) + while (mchan_channel_is_busy(channel_id)) + eu_evt_maskWaitAndClr(1 << MCHAN_EVENT_BIT); +#elif defined(MCHAN_POLLED) + while (mchan_channel_is_busy(channel_id)) + ; +#endif +} + +#endif // __MCHAN_V7_H__ diff --git a/TargetLibraries/PULPOpen/src/dory_dma.c b/TargetLibraries/PULPOpen/src/dory_dma.c deleted file mode 100644 index 0aa31dcd17..0000000000 --- a/TargetLibraries/PULPOpen/src/dory_dma.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * dory_dma.c - * Alessio Burrello - * - * Copyright (C) 2019-2020 University of Bologna - * - * SPDX-License-Identifier: Apache-2.0 - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "dory_dma.h" - -#include "pmsis.h" - -#ifndef MCHAN_BASE_ADDR -// FIXME: For GAP9, this must point to ARCHI_MCHAN_EXT_ADDR!!! -// In PULP-SDK for Kraken, this is fixed. -// GAP8 hardware to be tested... -#define MCHAN_BASE_ADDR (ARCHI_MCHAN_DEMUX_ADDR) // CLUSTER_MCHAN_ADDR -#endif -#define MCHAN_EVENT -// #define MCHAN_POLLED -#ifdef MCHAN_EVENT -#define MCHAN_EVENT_BIT (ARCHI_CL_EVT_DMA0) // 8 -#endif -#include "mchan.h" - -#if defined(MCHAN_POLLED) -#define MCHAN_FLAGS (MCHAN_CMD_FLAG_INCREMENTAL) -#elif defined(MCHAN_EVENT) -#define MCHAN_FLAGS (MCHAN_CMD_FLAG_EVENT_ENABLE | MCHAN_CMD_FLAG_INCREMENTAL) -#elif defined(MCHAN_INTERRUPT) -#define MCHAN_FLAGS \ - (MCHAN_CMD_FLAG_INTERRUPT_ENABLE | MCHAN_CMD_FLAG_INCREMENTAL) -#endif - -#define MCHAN_FLAGS_1D (MCHAN_FLAGS) -#define MCHAN_FLAGS_2D (MCHAN_FLAGS | MCHAN_CMD_FLAG_2D_TRANSFER_EXTERNAL) - -#define MIN(a, b) ((a) < (b) ? (a) : (b)) - -void dory_dma_memcpy_hwc_to_chw(DMA_copy *copy) { - int core_id = pi_core_id(); - int Log2Core = log2(NUM_CORES); - int number_of_copies_per_core = - (copy->length_1d_copy >> Log2Core) + - ((copy->length_1d_copy & (NUM_CORES - 1)) != 0); - int start_pixel, stop_pixel; // "pixel" is a misnomer; the CHANNELS are - // divided between the cores - // this function assumes that a DW tile is always as wide as the complete - // feature map (this is enforced by DORY's tiler) - start_pixel = MIN(number_of_copies_per_core * core_id, copy->length_1d_copy); - stop_pixel = - MIN(start_pixel + number_of_copies_per_core, copy->length_1d_copy); - void *ext = copy->ext + start_pixel; - void *loc = copy->loc + copy->number_of_1d_copies * - copy->number_of_2d_copies * start_pixel; - const int size_2d = copy->number_of_1d_copies * copy->number_of_2d_copies; - - for (int i = start_pixel; i < stop_pixel; i++) { - mchan_transfer_t trans = {.cmd = size_2d | - copy->dir << MCHAN_CMD_SHIFT_DIRECTION | - MCHAN_FLAGS_2D, - .size = size_2d, - .ext = ext, - .loc = loc, - .ext_size_1d = 1, // one byte at a time... - .ext_stride_1d = copy->stride_1d}; - mchan_transfer_push_2d(trans); -#ifdef ALWAYS_BLOCK_DMA_TRANSFERS // needed on GAP8 board - dory_dma_barrier(copy); -#endif - ext += 1; // next channel - loc += copy->number_of_1d_copies * copy->number_of_2d_copies; - } -} - -void dory_dma_memcpy_1d_async(DMA_copy *copy) { - if (pi_core_id() == 0) { - mchan_transfer_t trans = { - .cmd = copy->length_1d_copy * copy->number_of_1d_copies * - copy->number_of_2d_copies | - (copy->dir << MCHAN_CMD_SHIFT_DIRECTION) | MCHAN_FLAGS_1D, - .size = copy->length_1d_copy * copy->number_of_1d_copies * - copy->number_of_2d_copies, - .ext = copy->ext, - .loc = copy->loc}; - mchan_transfer_push_1d(trans); - } -} - -void dory_dma_memcpy_2d_async(DMA_copy *copy) { - if (pi_core_id() == 0) { - const int size_2d = copy->number_of_1d_copies * copy->length_1d_copy * - copy->number_of_2d_copies; - const int stride = - (copy->number_of_2d_copies == 1) ? copy->stride_1d : copy->stride_2d; - const int size_1d = (copy->number_of_2d_copies == 1) - ? copy->length_1d_copy - : copy->length_1d_copy * copy->number_of_1d_copies; - - mchan_transfer_t trans = {.cmd = size_2d | - copy->dir << MCHAN_CMD_SHIFT_DIRECTION | - MCHAN_FLAGS_2D, - .size = size_2d, - .ext = copy->ext, - .loc = copy->loc, - .ext_size_1d = size_1d, - .ext_stride_1d = stride}; - mchan_transfer_push_2d(trans); - } -} - -void dory_dma_memcpy_3d_async(DMA_copy *copy) { - int core_id = pi_core_id(); - if (core_id == 0) { - int Log2Core = log2(1); - int number_of_2d_copies_per_core = (copy->number_of_2d_copies >> Log2Core) + - ((copy->number_of_2d_copies & (0)) != 0); - int start_pixel, stop_pixel; - start_pixel = - MIN(number_of_2d_copies_per_core * core_id, copy->number_of_2d_copies); - stop_pixel = MIN(start_pixel + number_of_2d_copies_per_core, - copy->number_of_2d_copies); - void *ext = copy->ext + copy->stride_2d * start_pixel; - void *loc = copy->loc + - copy->length_1d_copy * copy->number_of_1d_copies * start_pixel; - const int size_2d = copy->number_of_1d_copies * copy->length_1d_copy; - - for (int i = start_pixel; i < stop_pixel; i++) { - mchan_transfer_t trans = {.cmd = size_2d | - copy->dir << MCHAN_CMD_SHIFT_DIRECTION | - MCHAN_FLAGS_2D, - .size = size_2d, - .ext = ext, - .loc = loc, - .ext_size_1d = copy->length_1d_copy, - .ext_stride_1d = copy->stride_1d}; - mchan_transfer_push_2d(trans); -#ifdef ALWAYS_BLOCK_DMA_TRANSFERS // needed on GAP8 board - // dory_dma_barrier(copy); -#endif - loc += size_2d; - ext += copy->stride_2d; - } - } -} - -void dory_dma_memcpy_async(DMA_copy *copy) { - if (copy->hwc_to_chw == 1) { - dory_dma_memcpy_hwc_to_chw(copy); - } else if ((copy->number_of_2d_copies == 1 && - copy->number_of_1d_copies == 1) || - (copy->stride_1d == copy->length_1d_copy && - copy->number_of_1d_copies * copy->length_1d_copy == - copy->stride_2d) || - (copy->number_of_2d_copies == 1 && - copy->length_1d_copy == copy->stride_1d)) { - dory_dma_memcpy_1d_async(copy); - } else if ((copy->number_of_2d_copies == 1) || - (copy->length_1d_copy == copy->stride_1d)) { // wrong! - dory_dma_memcpy_2d_async(copy); - } else { - dory_dma_memcpy_3d_async(copy); - } -} - -void dory_dma_memcpy_1d_mindims_async(DMA_copy *copy) { - mchan_transfer_t trans = { - .cmd = copy->mchan_cmd, .ext = copy->ext, .loc = copy->loc}; - mchan_transfer_push_1d(trans); -} - -void dory_dma_memcpy_2d_mindims_async(DMA_copy *copy) { - mchan_transfer_t trans = {.cmd = copy->mchan_cmd, - .ext = copy->ext, - .loc = copy->loc, - .ext_size_1d = copy->length_1d_copy, - .ext_stride_1d = copy->stride_1d}; - mchan_transfer_push_2d(trans); -} - -void dory_dma_memcpy_3d_mindims_async(DMA_copy *copy) { - void *ext = copy->ext; - void *loc = copy->loc; - const int length_2d_copy = - copy->mchan_cmd & ((1 << MCHAN_TRANSFER_LEN_SIZE) - 1); - - for (int i = 0; i < copy->number_of_2d_copies; i++) { - mchan_transfer_t trans = {.cmd = copy->mchan_cmd, - .ext = ext, - .loc = loc, - .ext_size_1d = copy->length_1d_copy, - .ext_stride_1d = copy->stride_1d}; - mchan_transfer_push_2d(trans); - loc += length_2d_copy; - ext += copy->stride_2d; -#ifdef ALWAYS_BLOCK_DMA_TRANSFERS // needed on GAP8 board - // dory_dma_barrier(copy); -#endif - } -} - -void dory_dma_memcpy_mindims_async(DMA_copy *copy) { - if (copy->number_of_2d_copies == 1 && copy->number_of_1d_copies == 1) { - dory_dma_memcpy_1d_mindims_async(copy); - } else if (copy->number_of_2d_copies == 1) { - dory_dma_memcpy_2d_mindims_async(copy); - } else { - dory_dma_memcpy_3d_mindims_async(copy); - } -} - -void dory_dma_free(DMA_copy *copy) { mchan_transfer_free(copy->tid); } - -void dory_dma_barrier(DMA_copy *copy) { mchan_transfer_wait(copy->tid); } - -int dory_dma_allocate() { return mchan_transfer_get_id(); } diff --git a/cmake/snitch/snitch.cmake b/cmake/snitch/snitch.cmake index 4170a99aab..9a12366fbd 100644 --- a/cmake/snitch/snitch.cmake +++ b/cmake/snitch/snitch.cmake @@ -1,3 +1,7 @@ +if(NOT DEFINED ENV{SNITCH_HOME}) + message(FATAL_ERROR "Environment variable SNITCH_HOME not set.") +endif() + set(SNITCH_HOME $ENV{SNITCH_HOME}) set(SNITCH_RUNTIME_HOME ${SNITCH_HOME}/sw/snRuntime)