Skip to content

Commit f3b7b64

Browse files
authored
Merge branch 'main' into fix/112208
2 parents 047dada + f87f3ad commit f3b7b64

File tree

8 files changed

+258
-25
lines changed

8 files changed

+258
-25
lines changed

.github/workflows/containers/github-action-ci/stage1.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ FROM docker.io/library/ubuntu:22.04 as base
22
ENV LLVM_SYSROOT=/opt/llvm
33

44
FROM base as stage1-toolchain
5-
ENV LLVM_VERSION=18.1.8
5+
ENV LLVM_VERSION=19.1.2
66

77
RUN apt-get update && \
88
apt-get install -y \

lldb/include/lldb/Core/Module.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030

3131
#include "llvm/ADT/DenseSet.h"
3232
#include "llvm/ADT/STLFunctionalExtras.h"
33+
#include "llvm/ADT/StableHashing.h"
3334
#include "llvm/ADT/StringRef.h"
3435
#include "llvm/Support/Chrono.h"
3536

@@ -1057,8 +1058,11 @@ class Module : public std::enable_shared_from_this<Module>,
10571058
/// time for the symbol tables can be aggregated here.
10581059
StatsDuration m_symtab_index_time;
10591060

1060-
std::once_flag m_optimization_warning;
1061-
std::once_flag m_language_warning;
1061+
/// A set of hashes of all warnings and errors, to avoid reporting them
1062+
/// multiple times to the same Debugger.
1063+
llvm::DenseMap<llvm::stable_hash, std::unique_ptr<std::once_flag>>
1064+
m_shown_diagnostics;
1065+
std::recursive_mutex m_diagnostic_mutex;
10621066

10631067
void SymbolIndicesToSymbolContextList(Symtab *symtab,
10641068
std::vector<uint32_t> &symbol_indexes,
@@ -1086,6 +1090,7 @@ class Module : public std::enable_shared_from_this<Module>,
10861090
void ReportWarning(const llvm::formatv_object_base &payload);
10871091
void ReportError(const llvm::formatv_object_base &payload);
10881092
void ReportErrorIfModifyDetected(const llvm::formatv_object_base &payload);
1093+
std::once_flag *GetDiagnosticOnceFlag(llvm::StringRef msg);
10891094
};
10901095

10911096
} // namespace lldb_private

lldb/source/Core/Module.cpp

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1093,8 +1093,8 @@ void Module::ReportWarningOptimization(
10931093
ss << file_name
10941094
<< " was compiled with optimization - stepping may behave "
10951095
"oddly; variables may not be available.";
1096-
Debugger::ReportWarning(std::string(ss.GetString()), debugger_id,
1097-
&m_optimization_warning);
1096+
llvm::StringRef msg = ss.GetString();
1097+
Debugger::ReportWarning(msg.str(), debugger_id, GetDiagnosticOnceFlag(msg));
10981098
}
10991099

11001100
void Module::ReportWarningUnsupportedLanguage(
@@ -1104,8 +1104,8 @@ void Module::ReportWarningUnsupportedLanguage(
11041104
<< Language::GetNameForLanguageType(language)
11051105
<< "\". "
11061106
"Inspection of frame variables will be limited.";
1107-
Debugger::ReportWarning(std::string(ss.GetString()), debugger_id,
1108-
&m_language_warning);
1107+
llvm::StringRef msg = ss.GetString();
1108+
Debugger::ReportWarning(msg.str(), debugger_id, GetDiagnosticOnceFlag(msg));
11091109
}
11101110

11111111
void Module::ReportErrorIfModifyDetected(
@@ -1125,20 +1125,29 @@ void Module::ReportErrorIfModifyDetected(
11251125
}
11261126
}
11271127

1128+
std::once_flag *Module::GetDiagnosticOnceFlag(llvm::StringRef msg) {
1129+
std::lock_guard<std::recursive_mutex> guard(m_diagnostic_mutex);
1130+
auto &once_ptr = m_shown_diagnostics[llvm::stable_hash_name(msg)];
1131+
if (!once_ptr)
1132+
once_ptr = std::make_unique<std::once_flag>();
1133+
return once_ptr.get();
1134+
}
1135+
11281136
void Module::ReportError(const llvm::formatv_object_base &payload) {
11291137
StreamString strm;
11301138
GetDescription(strm.AsRawOstream(), lldb::eDescriptionLevelBrief);
1131-
strm.PutChar(' ');
1132-
strm.PutCString(payload.str());
1133-
Debugger::ReportError(strm.GetString().str());
1139+
std::string msg = payload.str();
1140+
strm << ' ' << msg;
1141+
Debugger::ReportError(strm.GetString().str(), {}, GetDiagnosticOnceFlag(msg));
11341142
}
11351143

11361144
void Module::ReportWarning(const llvm::formatv_object_base &payload) {
11371145
StreamString strm;
11381146
GetDescription(strm.AsRawOstream(), lldb::eDescriptionLevelFull);
1139-
strm.PutChar(' ');
1140-
strm.PutCString(payload.str());
1141-
Debugger::ReportWarning(std::string(strm.GetString()));
1147+
std::string msg = payload.str();
1148+
strm << ' ' << msg;
1149+
Debugger::ReportWarning(strm.GetString().str(), {},
1150+
GetDiagnosticOnceFlag(msg));
11421151
}
11431152

11441153
void Module::LogMessage(Log *log, const llvm::formatv_object_base &payload) {

lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2069,13 +2069,15 @@ void SymbolFileDWARF::UpdateExternalModuleListIfNeeded() {
20692069
Status error = ModuleList::GetSharedModule(dwo_module_spec, module_sp,
20702070
nullptr, nullptr, nullptr);
20712071
if (!module_sp) {
2072+
// ReportWarning also rate-limits based on the warning string,
2073+
// but in a -gmodules build, each object file has a similar DAG
2074+
// of module dependencies that would all be listed here.
20722075
GetObjectFile()->GetModule()->ReportWarning(
2073-
"{0:x16}: unable to locate module needed for external types: "
2074-
"{1}\nerror: {2}\nDebugging will be degraded due to missing "
2075-
"types. Rebuilding the project will regenerate the needed "
2076-
"module files.",
2077-
die.GetOffset(), dwo_module_spec.GetFileSpec().GetPath().c_str(),
2078-
error.AsCString("unknown error"));
2076+
"{0}", error.AsCString("unknown error"));
2077+
GetObjectFile()->GetModule()->ReportWarning(
2078+
"Unable to locate module needed for external types.\n"
2079+
"Debugging will be degraded due to missing types. Rebuilding the "
2080+
"project will regenerate the needed module files.");
20792081
continue;
20802082
}
20812083

@@ -2095,12 +2097,11 @@ void SymbolFileDWARF::UpdateExternalModuleListIfNeeded() {
20952097

20962098
if (dwo_id != dwo_dwo_id) {
20972099
GetObjectFile()->GetModule()->ReportWarning(
2098-
"{0:x16}: Module {1} is out-of-date (hash mismatch). Type "
2099-
"information "
2100-
"from this module may be incomplete or inconsistent with the rest of "
2101-
"the program. Rebuilding the project will regenerate the needed "
2102-
"module files.",
2103-
die.GetOffset(), dwo_module_spec.GetFileSpec().GetPath().c_str());
2100+
"Module {0} is out-of-date (hash mismatch).\n"
2101+
"Type information from this module may be incomplete or inconsistent "
2102+
"with the rest of the program. Rebuilding the project will "
2103+
"regenerate the needed module files.",
2104+
dwo_module_spec.GetFileSpec().GetPath());
21042105
}
21052106
}
21062107
}
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# REQUIRES: system-darwin
2+
# Test the rate-limiting of module not found warnings.
3+
# RUN: rm -rf %t
4+
# RUN: mkdir -p %t
5+
6+
# RUN: echo 'module "C" { header "c.h" }' >%t/module.modulemap
7+
# RUN: echo 'struct c {};' >>%t/c.h
8+
# RUN: echo '@import C;' >%t/a.m
9+
# RUN: echo 'struct a { struct c c; } a;' >>%t/a.m
10+
# RUN: echo '@import C;' >%t/b.m
11+
# RUN: echo 'struct b { struct c c; } b;' >>%t/b.m
12+
# RUN: echo 'int main() {}' >>%t/b.m
13+
14+
# RUN: %clang_host -fmodules -Xclang -fmodules-cache-path=%t/cache -I%t -g -gmodules %t/a.m -o %t/a.o -c
15+
# RUN: %clang_host -fmodules -Xclang -fmodules-cache-path=%t/cache -I%t -g -gmodules %t/b.m -o %t/b.o -c
16+
# RUN: %clang_host %t/a.o %t/b.o -o %t/a.out
17+
# RUN: rm -rf %t/cache
18+
# RUN: %lldb %t/a.out -o "b main" -o run -o "p a" -o "p b" -o q 2>&1 | FileCheck %s
19+
# CHECK: {{[ab]}}.o{{.*}}/cache/{{.*}}/C-{{.*}}.pcm' does not exist
20+
# CHECK-NOT: /cache/{{.*}}/C-{.*}.pcm' does not exist
21+
# CHECK: {{[ab]}}.o{{.*}}/cache/{{.*}}/C-{{.*}}.pcm' does not exist
22+
# CHECK-NOT: /cache/{{.*}}/C-{.*}.pcm' does not exist

mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3114,6 +3114,143 @@ structured_op: !LinalgStructuredOpConfig
31143114
- !ScalarExpression
31153115
scalar_arg: KZp
31163116
--- !LinalgOpConfig
3117+
metadata: !LinalgOpMetadata
3118+
name: conv_2d_nchw_fchw_q
3119+
cpp_class_name: Conv2DNchwFchwQOp
3120+
doc: |-
3121+
Performs 2-D convolution with zero point offsets.
3122+
3123+
Layout:
3124+
* Input: NCHW.
3125+
* Kernel: FCHW.
3126+
3127+
Numeric casting is performed on the operands to the inner multiply, promoting
3128+
them to the same data type as the accumulator/output. This includes the zero
3129+
point offsets common to quantized operations.
3130+
implements:
3131+
- LinalgConvolutionOpInterface
3132+
structured_op: !LinalgStructuredOpConfig
3133+
args:
3134+
- !LinalgOperandDefConfig
3135+
name: I
3136+
kind: input_tensor
3137+
type_var: T1
3138+
shape_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> (s0,
3139+
s1, s2 * s3 + s4 * s5, s6 * s7 + s8 * s9)>
3140+
- !LinalgOperandDefConfig
3141+
name: K
3142+
kind: input_tensor
3143+
type_var: T2
3144+
shape_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> (s10,
3145+
s1, s4, s8)>
3146+
- !LinalgOperandDefConfig
3147+
name: IZp
3148+
kind: scalar
3149+
type_var: I32
3150+
- !LinalgOperandDefConfig
3151+
name: KZp
3152+
kind: scalar
3153+
type_var: I32
3154+
- !LinalgOperandDefConfig
3155+
name: O
3156+
kind: output_tensor
3157+
type_var: U
3158+
shape_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> (s0,
3159+
s10, s2, s6)>
3160+
- !LinalgOperandDefConfig
3161+
name: strides
3162+
kind: index_attr
3163+
index_attr_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] ->
3164+
(s3, s7)>
3165+
default_indices:
3166+
- 1
3167+
- 1
3168+
- !LinalgOperandDefConfig
3169+
name: dilations
3170+
kind: index_attr
3171+
index_attr_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] ->
3172+
(s5, s9)>
3173+
default_indices:
3174+
- 1
3175+
- 1
3176+
indexing_maps: !LinalgIndexingMapsConfig
3177+
static_indexing_maps:
3178+
- affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8,
3179+
s9, s10] -> (d0, d4, d2 * s3 + d5 * s5, d3 * s7 + d6 * s9)>
3180+
- affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8,
3181+
s9, s10] -> (d1, d4, d5, d6)>
3182+
- affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8,
3183+
s9, s10] -> ()>
3184+
- affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8,
3185+
s9, s10] -> ()>
3186+
- affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8,
3187+
s9, s10] -> (d0, d1, d2, d3)>
3188+
iterator_types:
3189+
- parallel
3190+
- parallel
3191+
- parallel
3192+
- parallel
3193+
- reduction
3194+
- reduction
3195+
- reduction
3196+
assignments:
3197+
- !ScalarAssign
3198+
arg: O
3199+
value: !ScalarExpression
3200+
scalar_fn:
3201+
kind: binary
3202+
fn_name: add
3203+
operands:
3204+
- !ScalarExpression
3205+
scalar_arg: O
3206+
- !ScalarExpression
3207+
scalar_fn:
3208+
kind: binary
3209+
fn_name: mul
3210+
operands:
3211+
- !ScalarExpression
3212+
scalar_fn:
3213+
kind: binary
3214+
fn_name: sub
3215+
operands:
3216+
- !ScalarExpression
3217+
scalar_fn:
3218+
kind: type
3219+
fn_name: cast_signed
3220+
type_var: U
3221+
operands:
3222+
- !ScalarExpression
3223+
scalar_arg: I
3224+
- !ScalarExpression
3225+
scalar_fn:
3226+
kind: type
3227+
fn_name: cast_signed
3228+
type_var: U
3229+
operands:
3230+
- !ScalarExpression
3231+
scalar_arg: IZp
3232+
- !ScalarExpression
3233+
scalar_fn:
3234+
kind: binary
3235+
fn_name: sub
3236+
operands:
3237+
- !ScalarExpression
3238+
scalar_fn:
3239+
kind: type
3240+
fn_name: cast_signed
3241+
type_var: U
3242+
operands:
3243+
- !ScalarExpression
3244+
scalar_arg: K
3245+
- !ScalarExpression
3246+
scalar_fn:
3247+
kind: type
3248+
fn_name: cast_signed
3249+
type_var: U
3250+
operands:
3251+
- !ScalarExpression
3252+
scalar_arg: KZp
3253+
--- !LinalgOpConfig
31173254
metadata: !LinalgOpMetadata
31183255
name: conv_2d_nchw_fchw
31193256
cpp_class_name: Conv2DNchwFchwOp

mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -876,6 +876,35 @@ def conv_2d_nhwc_fhwc_q(
876876
) * (TypeFn.cast_signed(U, K[D.f, D.kh, D.kw, D.c]) - TypeFn.cast_signed(U, KZp))
877877

878878

879+
@linalg_structured_op
880+
def conv_2d_nchw_fchw_q(
881+
I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW),
882+
K=TensorDef(T2, S.F, S.C, S.KH, S.KW),
883+
IZp=ScalarDef(I32),
884+
KZp=ScalarDef(I32),
885+
O=TensorDef(U, S.N, S.F, S.OH, S.OW, output=True),
886+
strides=IndexAttrDef(S.SH, S.SW, default=[1, 1]),
887+
dilations=IndexAttrDef(S.DH, S.DW, default=[1, 1]),
888+
):
889+
"""Performs 2-D convolution with zero point offsets.
890+
891+
Layout:
892+
* Input: NCHW.
893+
* Kernel: FCHW.
894+
895+
Numeric casting is performed on the operands to the inner multiply, promoting
896+
them to the same data type as the accumulator/output. This includes the zero
897+
point offsets common to quantized operations.
898+
"""
899+
implements(ConvolutionOpInterface)
900+
domain(D.n, D.f, D.oh, D.ow, D.c, D.kh, D.kw)
901+
O[D.n, D.f, D.oh, D.ow] += (
902+
TypeFn.cast_signed(
903+
U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW]
904+
)
905+
- TypeFn.cast_signed(U, IZp)
906+
) * (TypeFn.cast_signed(U, K[D.f, D.c, D.kh, D.kw]) - TypeFn.cast_signed(U, KZp))
907+
879908
@linalg_structured_op
880909
def conv_2d_nchw_fchw(
881910
I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW),

mlir/test/Dialect/Linalg/roundtrip.mlir

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -664,3 +664,33 @@ func.func @winograd_output_dyn(%arg0: tensor<6x6x?x?x?x?xf32>, %arg1: tensor<?x?
664664

665665
// CHECK-LABEL: func @winograd_output_dyn
666666
// CHECK: linalg.winograd_output_transform m(4) r(3) ins(%arg0 : tensor<6x6x?x?x?x?xf32>) outs(%arg1 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
667+
668+
// -----
669+
670+
func.func @conv2d_channel_first_q(%img: tensor<100x3x224x224xi32>, %filt: tensor<64x3x5x5xi32>, %a: i32, %b: i32) -> tensor<100x64x220x220xi32> {
671+
%init = arith.constant dense<0> : tensor<100x64x220x220xi32>
672+
%1 = linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>,
673+
strides = dense<1> : tensor<2xi64>}
674+
ins(%img, %filt, %a, %b : tensor<100x3x224x224xi32>, tensor<64x3x5x5xi32>, i32, i32)
675+
outs(%init : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32>
676+
return %1 : tensor<100x64x220x220xi32>
677+
}
678+
679+
// CHECK-LABEL: func @conv2d_channel_first_q(
680+
// CHECK: %[[arg0:[a-zA-z0-9]*]]: tensor<100x3x224x224xi32>, %[[arg1:[a-zA-z0-9]*]]: tensor<64x3x5x5xi32>, %[[arg2:[a-zA-z0-9]*]]: i32, %[[arg3:[a-zA-z0-9]*]]: i32)
681+
// CHECK: linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%[[arg0]], %[[arg1]], %[[arg2]], %[[arg3]] : tensor<100x3x224x224xi32>, tensor<64x3x5x5xi32>, i32, i32) outs(%{{.*}} : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32>
682+
683+
// -----
684+
685+
func.func @conv2d_channel_first_q_promote(%img: tensor<100x3x224x224xi8>, %filt: tensor<64x3x5x5xi8>, %a: i8, %b: i8) -> tensor<100x64x220x220xi32> {
686+
%init = arith.constant dense<0> : tensor<100x64x220x220xi32>
687+
%1 = linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>,
688+
strides = dense<1> : tensor<2xi64>}
689+
ins(%img, %filt, %a, %b : tensor<100x3x224x224xi8>, tensor<64x3x5x5xi8>, i8, i8)
690+
outs(%init : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32>
691+
return %1 : tensor<100x64x220x220xi32>
692+
}
693+
694+
// CHECK-LABEL: func @conv2d_channel_first_q_promote(
695+
// CHECK: %[[arg0:[a-zA-z0-9]*]]: tensor<100x3x224x224xi8>, %[[arg1:[a-zA-z0-9]*]]: tensor<64x3x5x5xi8>, %[[arg2:[a-zA-z0-9]*]]: i8, %[[arg3:[a-zA-z0-9]*]]: i8)
696+
// CHECK: linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%[[arg0]], %[[arg1]], %[[arg2]], %[[arg3]] : tensor<100x3x224x224xi8>, tensor<64x3x5x5xi8>, i8, i8) outs(%{{.*}} : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32>

0 commit comments

Comments
 (0)