-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathai.bib
More file actions
105 lines (92 loc) · 6.15 KB
/
ai.bib
File metadata and controls
105 lines (92 loc) · 6.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
% Encoding: UTF-8
@Article{hofstadter1994copycat,
author = {Hofstadter, {D}ouglas {R} and {Mitchell}, {Melanie}},
title = {The {C}opycat project: A model of mental fluidity and analogy-making.},
journal = {Advances in connectionist and neural computation theory},
year = {1994},
publisher = {Ablex Publishing},
}
@Article{kim2018not,
author = {Kim, Junkyung and Ricci, Matthew and Serre, Thomas},
title = {Not-So-CLEVR: learning same--different relations strains feedforward neural networks},
journal = {Interface focus},
year = {2018},
volume = {8},
number = {4},
pages = {20180011},
publisher = {The Royal Society},
}
@InProceedings{zhang2019raven,
author = {Zhang, Chi and Gao, Feng and Jia, Baoxiong and Zhu, Yixin and Zhu, Song-Chun},
title = {Raven: A dataset for relational and analogical visual reasoning},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year = {2019},
pages = {5317--5327},
}
@InProceedings{alcorn2019strike,
author = {Alcorn, Michael A and Li, Qi and Gong, Zhitao and Wang, Chengfei and Mai, Long and Ku, Wei-Shinn and Nguyen, Anh},
title = {Strike (with) a pose: Neural networks are easily fooled by strange poses of familiar objects},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year = {2019},
pages = {4845--4854},
}
@InProceedings{eykholt2018robust,
author = {Eykholt, Kevin and Evtimov, Ivan and Fernandes, Earlence and Li, Bo and Rahmati, Amir and Xiao, Chaowei and Prakash, Atul and Kohno, Tadayoshi and Song, Dawn},
title = {Robust physical-world attacks on deep learning visual classification},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year = {2018},
pages = {1625--1634},
}
@Article{kansky2017schema,
author = {Kansky, Ken and Silver, Tom and M{\'e}ly, David A and Eldawy, Mohamed and L{\'a}zaro-Gredilla, Miguel and Lou, Xinghua and Dorfman, Nimrod and Sidor, Szymon and Phoenix, Scott and George, Dileep},
title = {Schema networks: Zero-shot transfer with a generative causal model of intuitive physics},
journal = {arXiv preprint arXiv:1706.04317},
year = {2017},
}
@InProceedings{levesque2012winograd,
author = {Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
title = {The winograd schema challenge},
booktitle = {Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
year = {2012},
}
@Article{mccarthy2006proposal,
author = {McCarthy, John and Minsky, Marvin L and Rochester, Nathaniel and Shannon, Claude E},
title = {A proposal for the dartmouth summer research project on artificial intelligence, august 31, 1955},
journal = {AI magazine},
year = {2006},
volume = {27},
number = {4},
pages = {12--12},
}
@Article{mnih2015human,
author = {Mnih, Volodymyr and Kavukcuoglu, Koray and Silver, David and Rusu, Andrei A and Veness, Joel and Bellemare, Marc G and Graves, Alex and Riedmiller, Martin and Fidjeland, Andreas K and Ostrovski, Georg and others},
title = {Human-level control through deep reinforcement learning},
journal = {nature},
year = {2015},
volume = {518},
number = {7540},
pages = {529--533},
publisher = {Nature Publishing Group},
}
@InProceedings{sharif2016accessorize,
author = {Sharif, Mahmood and Bhagavatula, Sruti and Bauer, Lujo and Reiter, Michael K},
title = {Accessorize to a crime: Real and stealthy attacks on state-of-the-art face recognition},
booktitle = {Proceedings of the 2016 acm sigsac conference on computer and communications security},
year = {2016},
pages = {1528--1540},
}
@Article{szegedy2013intriguing,
author = {Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob},
title = {Intriguing properties of neural networks},
journal = {arXiv preprint arXiv:1312.6199},
year = {2013},
}
@Article{Chollet2019OnTM,
author = {Franccois Chollet},
title = {On the Measure of Intelligence},
journal = {ArXiv},
year = {2019},
volume = {abs/1911.01547},
abstract = {To make deliberate progress towards more intelligent and more human-like artificial systems, we need to be following an appropriate feedback signal: we need to be able to define and evaluate intelligence in a way that enables comparisons between two systems, as well as comparisons with humans. Over the past hundred years, there has been an abundance of attempts to define and measure intelligence, across both the fields of psychology and AI. We summarize and critically assess these definitions and evaluation approaches, while making apparent the two historical conceptions of intelligence that have implicitly guided them. We note that in practice, the contemporary AI community still gravitates towards benchmarking intelligence by comparing the skill exhibited by AIs and humans at specific tasks such as board games and video games. We argue that solely measuring skill at any given task falls short of measuring intelligence, because skill is heavily modulated by prior knowledge and experience: unlimited priors or unlimited training data allow experimenters to "buy" arbitrary levels of skills for a system, in a way that masks the system's own generalization power. We then articulate a new formal definition of intelligence based on Algorithmic Information Theory, describing intelligence as skill-acquisition efficiency and highlighting the concepts of scope, generalization difficulty, priors, and experience. Using this definition, we propose a set of guidelines for what a general AI benchmark should look like. Finally, we present a benchmark closely following these guidelines, the Abstraction and Reasoning Corpus (ARC), built upon an explicit set of priors designed to be as close as possible to innate human priors. We argue that ARC can be used to measure a human-like form of general fluid intelligence and that it enables fair general intelligence comparisons between AI systems and humans},
}
@Comment{jabref-meta: databaseType:bibtex;}