Skip to content

Commit e377436

Browse files
committed
Merge branch 'master' of github.com:apple/swift into autodiff_upstream_conformances_derivatives_master
2 parents da36555 + f27f1cd commit e377436

File tree

351 files changed

+16359
-2436
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

351 files changed

+16359
-2436
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
|**[Android](https://github.com/apple/swift-community-hosted-continuous-integration/blob/master/nodes/x86_64_ubuntu_16_04_LTS_android.json)** | ARMv7 |[![Build Status](https://ci-external.swift.org/job/oss-swift-RA-linux-ubuntu-16.04-android/lastCompletedBuild/badge/icon)](https://ci-external.swift.org/job/oss-swift-RA-linux-ubuntu-16.04-android)|
2020
|**[Android](https://github.com/apple/swift-community-hosted-continuous-integration/blob/master/nodes/x86_64_ubuntu_16_04_LTS_android.json)** | AArch64 |[![Build Status](https://ci-external.swift.org/job/oss-swift-RA-linux-ubuntu-16.04-android-arm64/lastCompletedBuild/badge/icon)](https://ci-external.swift.org/job/oss-swift-RA-linux-ubuntu-16.04-android-arm64)|
2121
|**[Windows 2019 (VS 2017)](https://github.com/apple/swift-community-hosted-continuous-integration/blob/master/nodes/x86_64_windows_2019.json)** | x86_64 | [![Build Status](https://ci-external.swift.org/job/oss-swift-windows-x86_64/lastCompletedBuild/badge/icon)](https://ci-external.swift.org/job/oss-swift-windows-x86_64)|
22-
|**[Windows 2019 (VS 2019)](https://github.com/apple/swift-community-hosted-continuous-integration/blob/master/nodes/x86_64_windows_2019_VS2019.json)** | x86_64 | [![Build Status](https://ci-external.swift.org/job/oss-swift-windows-x86_64-vs2019/lastCompletedBuild/badge/icon)](https://ci-external.swift.org/job/ooss-swift-windows-x86_64-vs2019)|
22+
|**[Windows 2019 (VS 2019)](https://github.com/apple/swift-community-hosted-continuous-integration/blob/master/nodes/x86_64_windows_2019_VS2019.json)** | x86_64 | [![Build Status](https://ci-external.swift.org/job/oss-swift-windows-x86_64-vs2019/lastCompletedBuild/badge/icon)](https://ci-external.swift.org/job/oss-swift-windows-x86_64-vs2019)|
2323

2424
**Swift TensorFlow Community-Hosted CI Platforms**
2525

benchmark/CMakeLists.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,9 @@ endif()
227227
set(SWIFT_BENCHMARK_EXTRA_FLAGS "" CACHE STRING
228228
"Extra options to pass to swiftc when building the benchmarks")
229229

230+
set(SWIFT_BENCHMARK_UNOPTIMIZED_DRIVER NO CACHE BOOL
231+
"Build the benchmark driver utilites without optimization (default: no)")
232+
230233
if (SWIFT_BENCHMARK_BUILT_STANDALONE)
231234
# This option's value must match the value of the same option used when
232235
# building the swift runtime.

benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -379,10 +379,13 @@ function (swift_benchmark_compile_archopts)
379379

380380
set(common_swift4_options ${common_options} "-swift-version" "4")
381381

382-
# Always optimize the driver modules.
383-
# Note that we compile the driver for Osize also with -Osize
384-
# (and not with -O), because of <rdar://problem/19614516>.
385-
string(REPLACE "Onone" "O" driver_opt "${optflag}")
382+
# Always optimize the driver modules, unless we're building benchmarks for
383+
# debugger testing.
384+
if(NOT SWIFT_BENCHMARK_UNOPTIMIZED_DRIVER)
385+
# Note that we compile the driver for Osize also with -Osize
386+
# (and not with -O), because of <rdar://problem/19614516>.
387+
string(REPLACE "Onone" "O" driver_opt "${optflag}")
388+
endif()
386389

387390
set(common_options_driver
388391
"-c"

benchmark/scripts/Benchmark_DTrace.in

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
103103
stdout=subprocess.PIPE,
104104
stderr=open("/dev/null", "w"),
105105
env=e,
106+
universal_newlines=True,
106107
)
107108
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
108109
return [
@@ -136,7 +137,9 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
136137
results.append(result_3)
137138
results.append(single_iter)
138139

139-
return DTraceResult(test_name, int(not foundInstability), results)
140+
return DTraceResult(
141+
test_name, int(not foundInstability), results, self.csv_output
142+
)
140143

141144

142145
SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))

benchmark/scripts/Benchmark_Driver

Lines changed: 39 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ class `BenchmarkDoctor` analyzes performance tests, implements `check` COMMAND.
2626
"""
2727

2828
import argparse
29+
import functools
2930
import glob
3031
import logging
3132
import math
@@ -64,7 +65,9 @@ class BenchmarkDriver(object):
6465
os.environ["SWIFT_DETERMINISTIC_HASHING"] = "1"
6566

6667
def _invoke(self, cmd):
67-
return self._subprocess.check_output(cmd, stderr=self._subprocess.STDOUT)
68+
return self._subprocess.check_output(
69+
cmd, stderr=self._subprocess.STDOUT, universal_newlines=True
70+
)
6871

6972
@property
7073
def test_harness(self):
@@ -165,7 +168,7 @@ class BenchmarkDriver(object):
165168
)
166169
output = self._invoke(cmd)
167170
results = self.parser.results_from_string(output)
168-
return results.items()[0][1] if test else results
171+
return list(results.items())[0][1] if test else results
169172

170173
def _cmd_run(
171174
self,
@@ -207,7 +210,7 @@ class BenchmarkDriver(object):
207210
a.merge(b)
208211
return a
209212

210-
return reduce(
213+
return functools.reduce(
211214
merge_results,
212215
[
213216
self.run(test, measure_memory=True, num_iters=1, quantile=20)
@@ -249,19 +252,21 @@ class BenchmarkDriver(object):
249252
print(format(values))
250253

251254
def result_values(r):
252-
return map(
253-
str,
254-
[
255-
r.test_num,
256-
r.name,
257-
r.num_samples,
258-
r.min,
259-
r.samples.q1,
260-
r.median,
261-
r.samples.q3,
262-
r.max,
263-
r.max_rss,
264-
],
255+
return list(
256+
map(
257+
str,
258+
[
259+
r.test_num,
260+
r.name,
261+
r.num_samples,
262+
r.min,
263+
r.samples.q1,
264+
r.median,
265+
r.samples.q3,
266+
r.max,
267+
r.max_rss,
268+
],
269+
)
265270
)
266271

267272
header = [
@@ -370,7 +375,12 @@ class MarkdownReportHandler(logging.StreamHandler):
370375
msg = self.format(record)
371376
stream = self.stream
372377
try:
373-
if isinstance(msg, unicode) and getattr(stream, "encoding", None):
378+
# In Python 2 Unicode strings have a special type
379+
unicode_type = unicode
380+
except NameError:
381+
unicode_type = str
382+
try:
383+
if isinstance(msg, unicode_type) and getattr(stream, "encoding", None):
374384
stream.write(msg.encode(stream.encoding))
375385
else:
376386
stream.write(msg)
@@ -487,16 +497,14 @@ class BenchmarkDoctor(object):
487497
name = measurements["name"]
488498
setup, ratio = BenchmarkDoctor._setup_overhead(measurements)
489499
setup = 0 if ratio < 0.05 else setup
490-
runtime = min(
491-
[
492-
(result.samples.min - correction)
493-
for i_series in [
494-
BenchmarkDoctor._select(measurements, num_iters=i)
495-
for correction in [(setup / i) for i in [1, 2]]
496-
]
497-
for result in i_series
498-
]
499-
)
500+
501+
runtimes = []
502+
for i in range(1, 3):
503+
correction = setup / i
504+
i_series = BenchmarkDoctor._select(measurements, num_iters=i)
505+
for result in i_series:
506+
runtimes.append(result.samples.min - correction)
507+
runtime = min(runtimes)
500508

501509
threshold = 1000
502510
if threshold < runtime:
@@ -572,7 +580,9 @@ class BenchmarkDoctor(object):
572580

573581
@staticmethod
574582
def _reasonable_setup_time(measurements):
575-
setup = min([result.setup for result in BenchmarkDoctor._select(measurements)])
583+
setup = min(
584+
[result.setup or 0 for result in BenchmarkDoctor._select(measurements)]
585+
)
576586
if 200000 < setup: # 200 ms
577587
BenchmarkDoctor.log_runtime.error(
578588
"'%s' setup took at least %d μs.", measurements["name"], setup
@@ -857,6 +867,7 @@ def parse_args(args):
857867
help="See COMMAND -h for additional arguments",
858868
metavar="COMMAND",
859869
)
870+
subparsers.required = True
860871

861872
shared_benchmarks_parser = argparse.ArgumentParser(add_help=False)
862873
benchmarks_group = shared_benchmarks_parser.add_mutually_exclusive_group()

benchmark/scripts/Benchmark_QuickCheck.in

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ class QuickCheckBenchmarkDriver(perf_test_driver.BenchmarkDriver):
6363
],
6464
stdout=subprocess.PIPE,
6565
stderr=subprocess.PIPE,
66+
universal_newlines=True,
6667
)
6768
error_out = p.communicate()[1].split("\n")
6869
result = p.returncode
@@ -76,7 +77,7 @@ class QuickCheckBenchmarkDriver(perf_test_driver.BenchmarkDriver):
7677
try:
7778
args = [data, num_iters]
7879
perf_test_driver.run_with_timeout(self.run_test_inner, args)
79-
except Exception, e:
80+
except Exception as e:
8081
sys.stderr.write(
8182
"Child Process Failed! (%s,%s). Error: %s\n"
8283
% (data["path"], data["test_name"], e)

benchmark/scripts/Benchmark_RuntimeLeaksRunner.in

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
8989
],
9090
stdout=subprocess.PIPE,
9191
stderr=subprocess.PIPE,
92+
universal_newlines=True,
9293
)
9394
error_out = p.communicate()[1].split("\n")
9495
result = p.returncode
@@ -102,7 +103,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
102103
try:
103104
args = [data, num_iters]
104105
result = perf_test_driver.run_with_timeout(self.run_test_inner, args)
105-
except Exception, e:
106+
except Exception as e:
106107
sys.stderr.write(
107108
"Child Process Failed! (%s,%s). Error: %s\n"
108109
% (data["path"], data["test_name"], e)

benchmark/scripts/compare_perf_tests.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ class `ReportFormatter` creates the test comparison report in specified format.
3030
from __future__ import print_function
3131

3232
import argparse
33+
import functools
3334
import re
3435
import sys
3536
from bisect import bisect, bisect_left, bisect_right
@@ -142,7 +143,7 @@ def num_samples(self):
142143
@property
143144
def all_samples(self):
144145
"""List of all samples in ascending order."""
145-
return sorted(self.samples + self.outliers, key=lambda s: s.i)
146+
return sorted(self.samples + self.outliers, key=lambda s: s.i or -1)
146147

147148
@property
148149
def min(self):
@@ -189,13 +190,16 @@ def sd(self):
189190
return 0 if self.count < 2 else sqrt(self.S_runtime / (self.count - 1))
190191

191192
@staticmethod
192-
def running_mean_variance((k, M_, S_), x):
193+
def running_mean_variance(stats, x):
193194
"""Compute running variance, B. P. Welford's method.
194195
195196
See Knuth TAOCP vol 2, 3rd edition, page 232, or
196197
https://www.johndcook.com/blog/standard_deviation/
197198
M is mean, Standard Deviation is defined as sqrt(S/k-1)
198199
"""
200+
201+
(k, M_, S_) = stats
202+
199203
k = float(k + 1)
200204
M = M_ + (x - M_) / k
201205
S = S_ + (x - M_) * (x - M)
@@ -247,7 +251,7 @@ def __init__(self, csv_row, quantiles=False, memory=False, delta=False, meta=Fal
247251
runtimes = csv_row[3:mem_index] if memory or meta else csv_row[3:]
248252
if delta:
249253
runtimes = [int(x) if x else 0 for x in runtimes]
250-
runtimes = reduce(
254+
runtimes = functools.reduce(
251255
lambda l, x: l.append(l[-1] + x) or l if l else [x], # runnin
252256
runtimes,
253257
None,
@@ -315,7 +319,8 @@ def merge(self, r):
315319
"""
316320
# Statistics
317321
if self.samples and r.samples:
318-
map(self.samples.add, r.samples.samples)
322+
for sample in r.samples.samples:
323+
self.samples.add(sample)
319324
sams = self.samples
320325
self.num_samples = sams.num_samples
321326
self.min, self.max, self.median, self.mean, self.sd = (
@@ -490,7 +495,7 @@ def add_or_merge(names, r):
490495
names[r.name].merge(r)
491496
return names
492497

493-
return reduce(add_or_merge, tests, dict())
498+
return functools.reduce(add_or_merge, tests, dict())
494499

495500
@staticmethod
496501
def results_from_string(log_contents):
@@ -544,10 +549,12 @@ def __init__(self, old_results, new_results, delta_threshold):
544549
def compare(name):
545550
return ResultComparison(old_results[name], new_results[name])
546551

547-
comparisons = map(compare, comparable_tests)
552+
comparisons = list(map(compare, comparable_tests))
548553

549554
def partition(l, p):
550-
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], []))
555+
return functools.reduce(
556+
lambda x, y: x[not p(y)].append(y) or x, l, ([], [])
557+
)
551558

552559
decreased, not_decreased = partition(
553560
comparisons, lambda c: c.ratio < (1 - delta_threshold)
@@ -668,7 +675,7 @@ def _column_widths(self):
668675
def max_widths(maximum, widths):
669676
return map(max, zip(maximum, widths))
670677

671-
return reduce(max_widths, widths, [0] * 5)
678+
return list(functools.reduce(max_widths, widths, [0] * 5))
672679

673680
def _formatted_text(
674681
self, label_formatter, COLUMN_SEPARATOR, DELIMITER_ROW, SEPARATOR, SECTION

benchmark/scripts/perf_test_driver/perf_test_driver.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,8 @@ def process_input(self, data):
111111
def run_for_opt_level(self, binary, opt_level, test_filter):
112112
print("testing driver at path: %s" % binary)
113113
names = []
114-
for l in subprocess.check_output([binary, "--list"]).split("\n")[1:]:
114+
output = subprocess.check_output([binary, "--list"], universal_newlines=True)
115+
for l in output.split("\n")[1:]:
115116
m = BENCHMARK_OUTPUT_RE.match(l)
116117
if m is None:
117118
continue

0 commit comments

Comments
 (0)