Skip to content

Commit fbd6d85

Browse files
committed
training in oss
2 parents dbd3d54 + aebc2e3 commit fbd6d85

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+799
-500
lines changed

.github/workflows/trunk.yml

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,6 @@ jobs:
223223
strategy:
224224
matrix:
225225
dtype: [fp32]
226-
build-tool: [buck2, cmake]
227226
mode: [portable, xnnpack+kv+custom, mps, coreml]
228227
fail-fast: false
229228
with:
@@ -235,25 +234,12 @@ jobs:
235234
script: |
236235
237236
DTYPE=${{ matrix.dtype }}
238-
BUILD_TOOL=${{ matrix.build-tool }}
239237
MODE=${{ matrix.mode }}
240238
241-
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
242-
# TODO: Will add more modes that don't support buck2
243-
if [[ "${MODE}" == "mps" ]]; then
244-
echo "mps doesn't support buck2."
245-
exit 0
246-
fi
247-
if [[ "${MODE}" == "coreml" ]]; then
248-
echo "coreml doesn't support buck2."
249-
exit 0
250-
fi
251-
fi
252-
253239
bash .ci/scripts/setup-conda.sh
254240
255241
# Setup executorch
256-
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
242+
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh cmake
257243
258244
if [[ "${MODE}" == "mps" ]]; then
259245
# Install mps delegate
@@ -268,7 +254,7 @@ jobs:
268254
# Install requirements for export_llama
269255
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/models/llama2/install_requirements.sh
270256
# Test llama2
271-
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh stories110M "${BUILD_TOOL}" "${DTYPE}" "${MODE}"
257+
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh stories110M cmake "${DTYPE}" "${MODE}"
272258
273259
# # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner.
274260
# test-llava-runner-macos:

backends/apple/coreml/scripts/install_requirements.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ rm -rf "$COREML_DIR_PATH/third-party"
2424
mkdir "$COREML_DIR_PATH/third-party"
2525

2626
echo "${green}ExecuTorch: Cloning coremltools."
27-
git clone --depth 1 --branch 8.0b2 "https://github.com/apple/coremltools.git" $COREMLTOOLS_DIR_PATH
27+
git clone --depth 1 --branch 8.0 "https://github.com/apple/coremltools.git" $COREMLTOOLS_DIR_PATH
2828
cd $COREMLTOOLS_DIR_PATH
2929

3030
STATUS=$?

backends/cadence/aot/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,11 @@ def get_ops_count(graph_module: torch.fx.GraphModule) -> Dict[str, int]:
104104
):
105105
continue
106106
# If the op is already present, increment the count
107-
if get_edge_overload_packet(node.target).__name__ in freq:
108-
freq[get_edge_overload_packet(node.target).__name__] += 1
107+
if node.target._name in freq:
108+
freq[node.target._name] += 1
109109
# else, add a new entry
110110
else:
111-
freq[get_edge_overload_packet(node.target).__name__] = 1
111+
freq[node.target._name] = 1
112112
return freq
113113

114114

backends/qualcomm/tests/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,6 @@ def lower_module_and_test_output(
350350
# Therefore, won't want to pre-allocate
351351
# by memory manager in runtime.
352352
memory_planning_pass=MemoryPlanningPass(
353-
memory_planning_algo="greedy",
354353
alloc_graph_input=not self.shared_buffer,
355354
alloc_graph_output=not self.shared_buffer,
356355
),

0 commit comments

Comments
 (0)