-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
825 lines (788 loc) · 57 KB
/
index.html
File metadata and controls
825 lines (788 loc) · 57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
<meta charset="utf-8">
<meta name="generator" content="quarto-1.7.32">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
<meta name="author" content="Victor De Lima">
<meta name="dcterms.date" content="2023-04-27">
<title>Applying Reinforcement Learning to the Traveling Salesman Problem</title>
<style>
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
div.columns{display: flex; gap: min(4vw, 1.5em);}
div.column{flex: auto; overflow-x: auto;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
ul.task-list li input[type="checkbox"] {
width: 0.8em;
margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */
vertical-align: middle;
}
/* CSS for citations */
div.csl-bib-body { }
div.csl-entry {
clear: both;
}
.hanging-indent div.csl-entry {
margin-left:2em;
text-indent:-2em;
}
div.csl-left-margin {
min-width:2em;
float:left;
}
div.csl-right-inline {
margin-left:2em;
padding-left:1em;
}
div.csl-indent {
margin-left: 2em;
}</style>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.min.js" integrity="sha512-bLT0Qm9VnAYZDflyKcBaQ2gg0hSYNQrJ8RilYldYQ1FxQYoCLtUjuuRuZo+fjqhx/qtq/1itJ0C2ejDxltZVFg==" crossorigin="anonymous"></script><script src="index_files/libs/clipboard/clipboard.min.js"></script>
<script src="index_files/libs/quarto-html/quarto.js" type="module"></script>
<script src="index_files/libs/quarto-html/tabsets/tabsets.js" type="module"></script>
<script src="index_files/libs/quarto-html/popper.min.js"></script>
<script src="index_files/libs/quarto-html/tippy.umd.min.js"></script>
<script src="index_files/libs/quarto-html/anchor.min.js"></script>
<link href="index_files/libs/quarto-html/tippy.css" rel="stylesheet">
<link href="index_files/libs/quarto-html/quarto-syntax-highlighting-234273d1456647dabc34a594ac50e507.css" rel="stylesheet" id="quarto-text-highlighting-styles">
<script src="index_files/libs/bootstrap/bootstrap.min.js"></script>
<link href="index_files/libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
<link href="index_files/libs/bootstrap/bootstrap-6504546882648b335aa8eca10682d9d9.min.css" rel="stylesheet" append-hash="true" id="quarto-bootstrap" data-mode="light">
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.6/require.min.js" integrity="sha512-c3Nl8+7g4LMSTdrm621y7kf9v3SDPnhxLNhcjFJbKECVnmZHTdo+IRO05sNLTH/D3vA6u1X32ehoLC7WFVdheg==" crossorigin="anonymous"></script>
<script type="application/javascript">define('jquery', [],function() {return window.jQuery;})</script>
<script src="https://cdnjs.cloudflare.com/polyfill/v3/polyfill.min.js?features=es6"></script>
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml-full.js" type="text/javascript"></script>
<script type="text/javascript">
const typesetMath = (el) => {
if (window.MathJax) {
// MathJax Typeset
window.MathJax.typeset([el]);
} else if (window.katex) {
// KaTeX Render
var mathElements = el.getElementsByClassName("math");
var macros = [];
for (var i = 0; i < mathElements.length; i++) {
var texText = mathElements[i].firstChild;
if (mathElements[i].tagName == "SPAN") {
window.katex.render(texText.data, mathElements[i], {
displayMode: mathElements[i].classList.contains('display'),
throwOnError: false,
macros: macros,
fleqn: false
});
}
}
}
}
window.Quarto = {
typesetMath
};
</script>
<link rel="stylesheet" href="assets/custom.css">
</head>
<body class="quarto-light">
<div id="quarto-content" class="page-columns page-rows-contents page-layout-article">
<div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
<nav id="TOC" role="doc-toc" class="toc-active">
<h2 id="toc-title">Table of contents</h2>
<ul>
<li><a href="#abstract" id="toc-abstract" class="nav-link active" data-scroll-target="#abstract">Abstract</a></li>
<li><a href="#introduction" id="toc-introduction" class="nav-link" data-scroll-target="#introduction"><span class="header-section-number">1</span> Introduction</a></li>
<li><a href="#related-work" id="toc-related-work" class="nav-link" data-scroll-target="#related-work"><span class="header-section-number">2</span> Related Work</a></li>
<li><a href="#methodology" id="toc-methodology" class="nav-link" data-scroll-target="#methodology"><span class="header-section-number">3</span> Methodology</a>
<ul class="collapse">
<li><a href="#problem-formulation" id="toc-problem-formulation" class="nav-link" data-scroll-target="#problem-formulation"><span class="header-section-number">3.1</span> Problem Formulation</a></li>
<li><a href="#data" id="toc-data" class="nav-link" data-scroll-target="#data"><span class="header-section-number">3.2</span> Data</a></li>
</ul></li>
<li><a href="#experiments-and-results" id="toc-experiments-and-results" class="nav-link" data-scroll-target="#experiments-and-results"><span class="header-section-number">4</span> Experiments and Results</a></li>
<li><a href="#conclusions" id="toc-conclusions" class="nav-link" data-scroll-target="#conclusions"><span class="header-section-number">5</span> Conclusions</a></li>
<li><a href="#references" id="toc-references" class="nav-link" data-scroll-target="#references">References</a></li>
</ul>
</nav>
</div>
<main class="content" id="quarto-document-content">
<header id="title-block-header" class="quarto-title-block default">
<div class="quarto-title">
<h1 class="title">Applying Reinforcement Learning to the Traveling Salesman Problem</h1>
</div>
<div class="quarto-title-meta-author">
<div class="quarto-title-meta-heading">Author</div>
<div class="quarto-title-meta-heading">Affiliation</div>
<div class="quarto-title-meta-contents">
<p class="author">Victor De Lima</p>
</div>
<div class="quarto-title-meta-contents">
<p class="affiliation">Georgetown University</p>
</div>
</div>
<div class="quarto-title-meta">
<div>
<div class="quarto-title-meta-heading">Published</div>
<div class="quarto-title-meta-contents">
<p class="date">April 27, 2023</p>
</div>
</div>
<div>
<div class="quarto-title-meta-heading">Project URL</div>
<div class="quarto-title-meta-contents">
<p class="project-url">
<a href="https://purl.org/victordelima/tsb_with_reinforcement_learning">https://purl.org/victordelima/tsb_with_reinforcement_learning</a>
</p>
</div>
</div>
</div>
</header>
<ul>
<li>Visit my <a href="https://www.victordelima.com/">website</a>.</li>
</ul>
<div class="callout callout-style-simple callout-note">
<div class="callout-body d-flex">
<div class="callout-icon-container">
<i class="callout-icon"></i>
</div>
<div class="callout-body-container">
<p>All code used in this report is publicly available on <a href="https://github.com/vdelimad/tsb-with-reinforcement-learning">GitHub</a>.</p>
</div>
</div>
</div>
<section id="abstract" class="level3 unnumbered">
<h3 class="unnumbered anchored" data-anchor-id="abstract">Abstract</h3>
<p>The Traveling Salesman Problem (TSP) is a well-studied optimization problem with a wide variety of applications and approaches to solving it. The methods and tools of Reinforcement Learning (RL) offer a distinctive strategy for approaching the TSP due to the ease with which its reward structure can be modified. Using data obtained from Skyscanner, this study examines how Q-learning, an RL technique, can optimize a travel route through 42 Asian cities. We first provide a theoretical background into the tools discussed, followed by an exposition of the methodology and experiments. The study’s results show that Q-learning is a very effective method for solving the TSP. We also discuss the model’s limitations and how it can be extended in future work.</p>
<div id="fig-travel-agent" class="quarto-float quarto-figure quarto-figure-center anchored" data-fig-align="center">
<figure class="quarto-float quarto-float-fig figure">
<div aria-describedby="fig-travel-agent-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<img src="assets/resources/Images/TravelAgents/travel_agent_1.jpeg" class="img-fluid quarto-figure quarto-figure-center figure-img" style="width:75.0%">
</div>
<figcaption class="quarto-float-caption-bottom quarto-float-caption quarto-float-fig" id="fig-travel-agent-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
Figure 1: Picture of a robot travel agent generated with Stable Diffusion <span class="citation" data-cites="stability_ai_stable_2023">[<a href="#ref-stability_ai_stable_2023" role="doc-biblioref">1</a>]</span> using prompt “<em>a friendly-looking humanoid robot travel agent greeting you on its work desk and a map hanging in the back wall</em>”
</figcaption>
</figure>
</div>
</section>
<section id="introduction" class="level2" data-number="1">
<h2 data-number="1" class="anchored" data-anchor-id="introduction"><span class="header-section-number">1</span> Introduction</h2>
<p>Anyone who has planned a trip that involves visiting several cities will have undoubtedly found themselves wondering what is the best way to visit each location while minimizing the cost of the trip. Each location has several considerations, such as distance, cost, and the importance of visiting specific places. The question can become overwhelming with the realization that we would have to consider each city as both a potential starting point and a potential next stop from any other city. We can analyze such a scenario through the framework of the Traveling Salesman Problem (TSP). Karl Menger, one of the first to study the problem mathematically, defines the TSP as “the task to find, for finitely many points whose pairwise distances are known, the shortest route connecting the points” <span class="citation" data-cites="schrijver_history_2005">[<a href="#ref-schrijver_history_2005" role="doc-biblioref">2</a>]</span>. In other words, the goal of the TSP is to find the shortest possible route that visits every point once and then returns to the origin. The TSP is an NP-hard combinatorial optimization problem. It is simple to solve by brute force for five stops by trying all combinations (5! problem) but already impossible for 50 (50! problem).</p>
<p>The TSP is a well-known and widely studied problem in mathematics and computer science. Beyond travel, the TSP has applications in many fields, including manufacturing, transportation, and engineering. For example, some applications that successfully employ the TSP include overhauling gas turbine engines <span class="citation" data-cites="plante_product_1987">[<a href="#ref-plante_product_1987" role="doc-biblioref">3</a>]</span>, X-Ray crystallography <span class="citation" data-cites="bland_large_1989">[<a href="#ref-bland_large_1989" role="doc-biblioref">4</a>]</span>, and computer wiring <span class="citation" data-cites="lenstra_simple_1975">[<a href="#ref-lenstra_simple_1975" role="doc-biblioref">5</a>]</span>. Although researchers have studied the TSB extensively and developed several discrete optimization techniques to solve it, Reinforcement Learning (RL) offers many advantages compared to classical optimization methods. Most importantly, RL offers a framework in which it is relatively simple to change the reward structure to adjust the optimization problem under changing circumstances.</p>
<p>In this paper, we approach the TSP from an RL perspective and utilize Q-Learning to optimize a 42-city travel route using real prices obtained from SkyScanner. First, we discuss the theoretical background that motivates the experiment. Then, we explore the methodology and data on which we built the analysis. Next, we review the results, which show that Q-learning is a very effective method for solving the TSP. Lastly, we offer a discussion on the model’s limitations and the ways to extend the model in future research.</p>
</section>
<section id="related-work" class="level2" data-number="2">
<h2 data-number="2" class="anchored" data-anchor-id="related-work"><span class="header-section-number">2</span> Related Work</h2>
<p>The TSP is an extension of the Hamiltonian Circuit Problem, a formulation that asks whether a graph contains a cycle that visits each node exactly once, which is itself an NP-complete problem <span class="citation" data-cites="miller_reducibility_1972">[<a href="#ref-miller_reducibility_1972" role="doc-biblioref">6</a>]</span>. The TSP builds on this theory and asks what is the shortest possible Hamiltonian Circuit given a weighted graph. Furthermore, we can broadly divide the TSP into symmetric and asymmetric subcategories in the single-agent case. Given two nodes, A and B, if the costs associated with traveling from A to B are the same as those of traveling from B to A, then the TSP is symmetric. However, if the cost of traveling from A to B differs from that of traveling from B to A, then the TSP is asymmetric <span class="citation" data-cites="davendra_traveling_2010">[<a href="#ref-davendra_traveling_2010" role="doc-biblioref">7</a>]</span>.</p>
<p>The body of literature discussing methods for solving the TSP is extensive. The base method is by brute force (also known as the Naive Approach), which consists of finding all possible route permutations. This approach becomes unfeasible as the number of locations increases. The Branch and Bound method attempts to find a solution by breaking the problem into subproblems. Each subproblem is recursively solved to find an optimal solution <span class="citation" data-cites="little_algorithm_1963">[<a href="#ref-little_algorithm_1963" role="doc-biblioref">8</a>]</span>. The Nearest Neighbor Method is also a widely used heuristic method, which relies on going to the closest location at every step and returning to the starting one once all locations have been visited <span class="citation" data-cites="johnson_traveling_1997">[<a href="#ref-johnson_traveling_1997" role="doc-biblioref">9</a>]</span>.</p>
<p>Additionally, there are extensively studied approximate approaches, such as Simulated Annealing (SA), which is inspired by the materials science process of annealing. <span class="citation" data-cites="kirkpatrick_optimization_1983">[<a href="#ref-kirkpatrick_optimization_1983" role="doc-biblioref">10</a>]</span> initially conceived SA and consists of randomly generating an initial solution and iteratively improving it by selecting a neighboring alternative. At each step, the procedure decides whether to accept or reject the selection based on a specified criterion. Lastly, genetic algorithms (GAs) are also very popular. Starting from <span class="citation" data-cites="brady_optimization_1985">[<a href="#ref-brady_optimization_1985" role="doc-biblioref">11</a>]</span>, GAs build on the idea of natural selection to start with a set of candidate solutions that are evaluated for “fitness.” Then, the process iterates until meeting a stopping criterion.</p>
<p>Lastly, the TSP has also received attention from the rapid increase in reinforcement learning applications in recent decades witnessed in recent decades. Some examples worth mentioning include applying Q-learning and SARSA to optimize vehicle routes with fuel constraints <span class="citation" data-cites="ottoni_reinforcement_2022">[<a href="#ref-ottoni_reinforcement_2022" role="doc-biblioref">12</a>]</span>, and TSP involving truck-drone fleets coordination using deep reinforcement learning <span class="citation" data-cites="bogyrbayeva_deep_2023">[<a href="#ref-bogyrbayeva_deep_2023" role="doc-biblioref">13</a>]</span>. This study builds on this body of literature to apply Q-Leaning to a TSP flight route optimization problem in the asymmetric case.</p>
</section>
<section id="methodology" class="level2" data-number="3">
<h2 data-number="3" class="anchored" data-anchor-id="methodology"><span class="header-section-number">3</span> Methodology</h2>
<section id="problem-formulation" class="level3" data-number="3.1">
<h3 data-number="3.1" class="anchored" data-anchor-id="problem-formulation"><span class="header-section-number">3.1</span> Problem Formulation</h3>
<p>In this paper, we employ the asymmetric case of the TSP since the costs associated with traveling from airport A to airport B are not the same as the costs of traveling from B to A, given differing fare rates. Furthermore, itineraries might include varying stopovers that lead to travel distances between the points that are not symmetrical in the model. We formulate the TSP as a set of locations <span class="math inline">\(V=\left\{v_1, \ldots \ldots, v_n\right\}\)</span>, a set of edges <span class="math inline">\(A=\{(r, s): r, s \in V\}\)</span>, and a set of costs associated with each edge <span class="math inline">\(d_{r s}=d_{s r}\)</span>, where each cost is associated with an edge <span class="math inline">\((r, s) \in A\)</span>. Since this is the asymmetric case, we allow for <span class="math inline">\(d_{r s} \neq d_{s r}\)</span> for any edge <span class="math inline">\((r, s)\)</span><span class="citation" data-cites="davendra_traveling_2010">[<a href="#ref-davendra_traveling_2010" role="doc-biblioref">7</a>]</span>.</p>
<p>In the Q-learning framework, we aim to to learn which action to take in each location (the state). To this end, the model learns the value associated with each transition from one location to another (referred to as Q-values) by sampling the environment and repeatedly applying the update rule for the Q-learning algorithm:</p>
<p><span id="eq-q-update"><span class="math display">\[
\begin{align}\begin{aligned}Q(s, a) = Q(s, a) + \alpha * \\\begin{split} \left( r +\gamma \max_{a'} Q(s', a') - Q(s, a) \right)
\end{split}\end{aligned}\end{align}
\tag{1}\]</span></span></p>
<p>where <span class="math inline">\(Q(s, a)\)</span> is the current estimate of the value of taking action <span class="math inline">\(a\)</span> in state <span class="math inline">\(s\)</span>, <span class="math inline">\(r\)</span> is the reward (cost) from taking an action, <span class="math inline">\(\alpha\)</span> is a learning rate hyperparameter, and <span class="math inline">\(\gamma\)</span> is a discount rate hyperparameter. States and actions denoted with the <span class="math inline">\(\prime\)</span> symbol denote the next state or action. By applying <a href="#eq-q-update" class="quarto-xref">Equation 1</a>, the model can update the existing estimates of <span class="math inline">\(Q(s, a)\)</span>. Then, the agent takes the action with the highest Q-value at every step <span class="citation" data-cites="bilgin_mastering_2020">[<a href="#ref-bilgin_mastering_2020" role="doc-biblioref">14</a>]</span>.</p>
</section>
<section id="data" class="level3" data-number="3.2">
<h3 data-number="3.2" class="anchored" data-anchor-id="data"><span class="header-section-number">3.2</span> Data</h3>
<p>We obtained the initial list of target cities and coordinate data from <span class="citation" data-cites="flagpicturescom_list_2023">[<a href="#ref-flagpicturescom_list_2023" role="doc-biblioref">15</a>]</span>. Pyongyang, Sanaa, Rangoon, Damascus, Kabul, and Thimphu were removed from the list due to limited flight availability, resulting in 42 cities for the study. <a href="#fig-airport-locations" class="quarto-xref">Figure 2</a> shows the airport locations in each city.</p>
<div id="cell-fig-airport-locations" class="cell" data-execution_count="2">
<div id="fig-airport-locations" class="cell-output cell-output-display quarto-float quarto-figure quarto-figure-center anchored" data-execution_count="152">
<figure class="quarto-float quarto-float-fig figure">
<div aria-describedby="fig-airport-locations-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<iframe width="100%" height="500" src="assets/resources/Outputs/Plots/map_of_airport_locations.html" frameborder="0" allowfullscreen=""></iframe>
</div>
<figcaption class="quarto-float-caption-bottom quarto-float-caption quarto-float-fig" id="fig-airport-locations-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
Figure 2: Interactive Folium world map with overlay over Asia showing the cities of interest, with a tile by <span class="citation" data-cites="national_geographic_esrinatgeoworldmap_2023">[<a href="#ref-national_geographic_esrinatgeoworldmap_2023" role="doc-biblioref">16</a>]</span> and administrative boundaries from <span class="citation" data-cites="opendatasoft_world_2022">[<a href="#ref-opendatasoft_world_2022" role="doc-biblioref">17</a>]</span>. <strong>Note</strong>: VPNs may prevent correct map rendering.
</figcaption>
</figure>
</div>
</div>
<p>We collected these cities’ flight prices, duration, and stopover data using the SkyScanner API <span class="citation" data-cites="skyscanner_api_2023">[<a href="#ref-skyscanner_api_2023" role="doc-biblioref">18</a>]</span>. We made several simplifying assumptions in the data collection. We only obtained data for the single, arbitrary date of 22 May 2023. Also, the data does not include the layover time between segments, only the in-flight time<a href="#fn1" class="footnote-ref" id="fnref1" role="doc-noteref"><sup>1</sup></a>. Lastly, the data collected only corresponds to economy cabin class. Relaxation of these assumptions can be explored in future work.</p>
<p>Given the 42 airports, we completed three transition matrices with 1,722 non-zero entries, each matrix corresponding to either flight price, duration, or stopover information. The entries correspond to each of the <span class="math inline">\(42 \times 42\)</span> transitions minus the diagonal, which would correspond to traveling from an airport to itself (<span class="math inline">\(42 * 42 - 42 = 1,722\)</span>). The search resulted in 38,236 segments, narrowed to 15,396 itineraries, and narrowed to the 1,722 ‘best’ flights. To choose the ‘best’ flight, we used the heuristic of valuing each flight hour at $20 and every stopover at $50, resulting in:</p>
<p><span id="eq-best-flight"><span class="math display">\[
\begin{flalign}\begin{aligned} \rm best \, price = fare + flight \,time \\\begin{split} \rm * 20 + stops * 50
\end{split}\end{aligned}\end{flalign}
\tag{2}\]</span></span></p>
<p><a href="#fig-airport-links" class="quarto-xref">Figure 3</a> shows color-coded prices between airports.</p>
<div id="cell-fig-airport-links" class="cell" data-execution_count="3">
<div id="fig-airport-links" class="cell-output cell-output-display quarto-float quarto-figure quarto-figure-center anchored" data-execution_count="153">
<figure class="quarto-float quarto-float-fig figure">
<div aria-describedby="fig-airport-links-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<iframe width="100%" height="500" src="assets/resources/Outputs/Plots/map_of_airport_links.html" frameborder="0" allowfullscreen=""></iframe>
</div>
<figcaption class="quarto-float-caption-bottom quarto-float-caption quarto-float-fig" id="fig-airport-links-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
Figure 3: Interactive Folium world map showing the links between airports with color-coded prices. <strong>Note</strong>: VPNs may prevent correct map rendering.
</figcaption>
</figure>
</div>
</div>
</section>
</section>
<section id="experiments-and-results" class="level2" data-number="4">
<h2 data-number="4" class="anchored" data-anchor-id="experiments-and-results"><span class="header-section-number">4</span> Experiments and Results</h2>
<p>We implemented the model in Python using custom classes and functions. The model was based on the <span class="citation" data-cites="alves_da_costa_delivery_2019">[<a href="#ref-alves_da_costa_delivery_2019" role="doc-biblioref">19</a>]</span> implementation, particularly on the method for tracking previously visited locations and preventing revisiting them. The model constructed a matrix of Q-values of size <span class="math inline">\(42 \times 42\)</span> and employed an e-greedy strategy with epsilon decay at a rate of 0.999. Finally, we ran the model through 200 episodes of Q-learning, with results available in <a href="#fig-experiment-results" class="quarto-xref">Figure 4</a>.</p>
<div id="fig-experiment-results" class="quarto-float quarto-figure quarto-figure-center anchored">
<figure class="quarto-float quarto-float-fig figure">
<div aria-describedby="fig-experiment-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<div id="fig-score-results" class="quarto-float quarto-figure quarto-figure-center anchored" data-fig-align="center">
<figure class="quarto-float quarto-subfloat-fig figure">
<div aria-describedby="fig-score-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<img src="assets/resources/Outputs/Plots/flight_score.png" class="img-fluid quarto-figure quarto-figure-center figure-img" style="width:85.0%" data-ref-parent="fig-experiment-results">
</div>
<figcaption class="quarto-float-caption-bottom quarto-subfloat-caption quarto-subfloat-fig" id="fig-score-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
(a) Flight Score Results
</figcaption>
</figure>
</div>
<div id="fig-expense-results" class="quarto-float quarto-figure quarto-figure-center anchored" data-fig-align="center">
<figure class="quarto-float quarto-subfloat-fig figure">
<div aria-describedby="fig-expense-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<img src="assets/resources/Outputs/Plots/fares_in_usd.png" class="img-fluid quarto-figure quarto-figure-center figure-img" style="width:85.0%" data-ref-parent="fig-experiment-results">
</div>
<figcaption class="quarto-float-caption-bottom quarto-subfloat-caption quarto-subfloat-fig" id="fig-expense-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
(b) Fare Expense Results
</figcaption>
</figure>
</div>
<div id="fig-time-results" class="quarto-float quarto-figure quarto-figure-center anchored" data-fig-align="center">
<figure class="quarto-float quarto-subfloat-fig figure">
<div aria-describedby="fig-time-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<img src="assets/resources/Outputs/Plots/flight_time.png" class="img-fluid quarto-figure quarto-figure-center figure-img" style="width:85.0%" data-ref-parent="fig-experiment-results">
</div>
<figcaption class="quarto-float-caption-bottom quarto-subfloat-caption quarto-subfloat-fig" id="fig-time-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
(c) Flight Time Results
</figcaption>
</figure>
</div>
<div id="fig-stops-results" class="quarto-float quarto-figure quarto-figure-center anchored" data-fig-align="center">
<figure class="quarto-float quarto-subfloat-fig figure">
<div aria-describedby="fig-stops-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<img src="assets/resources/Outputs/Plots/flight_stops.png" class="img-fluid quarto-figure quarto-figure-center figure-img" style="width:85.0%" data-ref-parent="fig-experiment-results">
</div>
<figcaption class="quarto-float-caption-bottom quarto-subfloat-caption quarto-subfloat-fig" id="fig-stops-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
(d) Flight Stops Results
</figcaption>
</figure>
</div>
</div>
<figcaption class="quarto-float-caption-bottom quarto-float-caption quarto-float-fig" id="fig-experiment-results-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
Figure 4: Experiment Results
</figcaption>
</figure>
</div>
<p>The results obtained had several important improvements from the initial randomness, such as:</p>
<ul>
<li>Decreasing overall flight expenditure from around $18,000 to $8000</li>
<li>Decreasing overall flight expenditure from around 20,000 minutes (333.3 hours or 7.93 average hours per flight) to 9000 (133.3 hours or 3.17 average hours per flight)</li>
<li>Decreasing overall stopovers from around 100 to 60.</li>
</ul>
<p>Furthermore, <a href="#fig-first-and-last-episodes" class="quarto-xref">Figure 5</a> shows the chosen trajectories for the first and last episodes, allowing us to observe the choice changes from the untrained and trained models. It is challenging to discern the disparities between these figures through mere observation, underscoring the model’s proficiency in revealing mathematical advantages that may otherwise be arduous to infer by visual inspection alone.</p>
<div id="fig-first-and-last-episodes" class="quarto-float quarto-figure quarto-figure-center anchored" data-layout-ncols="1">
<figure class="quarto-float quarto-float-fig figure">
<div aria-describedby="fig-first-and-last-episodes-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<div id="cell-fig-first-episode-links" class="cell" data-execution_count="4">
<div id="fig-first-episode-links" class="cell-output cell-output-display quarto-float quarto-figure quarto-figure-center anchored" data-execution_count="154">
<figure class="quarto-float quarto-subfloat-fig figure">
<div aria-describedby="fig-first-episode-links-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<iframe width="100%" height="500" src="assets/resources/Outputs/Plots/first_episode_links.html" frameborder="0" allowfullscreen=""></iframe>
</div>
<figcaption class="quarto-float-caption-bottom quarto-subfloat-caption quarto-subfloat-fig" id="fig-first-episode-links-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
(a) Shows the first episode’s links. <strong>Note</strong>: VPNs may prevent correct map rendering.
</figcaption>
</figure>
</div>
</div>
<div id="cell-fig-last-episode-links" class="cell" data-execution_count="5">
<div id="fig-last-episode-links" class="cell-output cell-output-display quarto-float quarto-figure quarto-figure-center anchored" data-execution_count="155">
<figure class="quarto-float quarto-subfloat-fig figure">
<div aria-describedby="fig-last-episode-links-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
<iframe width="100%" height="500" src="assets/resources/Outputs/Plots/last_episode_links.html" frameborder="0" allowfullscreen=""></iframe>
</div>
<figcaption class="quarto-float-caption-bottom quarto-subfloat-caption quarto-subfloat-fig" id="fig-last-episode-links-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
(b) Shows the last episode’s links. <strong>Note</strong>: VPNs may prevent correct map rendering.
</figcaption>
</figure>
</div>
</div>
</div>
<figcaption class="quarto-float-caption-bottom quarto-float-caption quarto-float-fig" id="fig-first-and-last-episodes-caption-0ceaefa1-69ba-4598-a22c-09a6ac19f8ca">
Figure 5: Trayectories of the first and last episodes.
</figcaption>
</figure>
</div>
</section>
<section id="conclusions" class="level2" data-number="5">
<h2 data-number="5" class="anchored" data-anchor-id="conclusions"><span class="header-section-number">5</span> Conclusions</h2>
<p>This study began by explaining the TSP and motivated the use of RL as a method for solving it. Then we reviewed the TSP literature and the wide variety of existing solutions, including heuristic, approximate, and RL methods. We then formalized the problem and explained how the Q-learning process works. Next, we covered how the data for the experiment was collected, the assumptions made, and visualized the search space. Lastly, we discussed the results from the experiment showing that Q-learning is a very effective method for solving the TSP, achieving significant gains in cost reduction and time minimization for the desired route through the Asian cities.</p>
<p>Future work may include the relaxation of several assumptions made in this implementation. For example, future implementations may include additional data to capture varying prices across the expected duration of the trip rather than utilizing prices from a single date. Also, more complicated choices of ‘best’ flight may be considered to explore their impact on the model.</p>
</section>
<section id="references" class="level2 unnumbered">
</section>
<div id="quarto-appendix" class="default"><section class="quarto-appendix-contents" role="doc-bibliography" id="quarto-bibliography"><h2 class="anchored quarto-appendix-heading">References</h2><div id="refs" class="references csl-bib-body" role="list">
<div id="ref-stability_ai_stable_2023" class="csl-entry" role="listitem">
<div class="csl-left-margin">[1] </div><div class="csl-right-inline">Stability AI. Stable <span>Diffusion</span> 2-1. Hugging Face. 2023 [accessed 2023 Apr 25]. <a href="https://huggingface.co/spaces/stabilityai/stable-diffusion">https://huggingface.co/spaces/stabilityai/stable-diffusion</a></div>
</div>
<div id="ref-schrijver_history_2005" class="csl-entry" role="listitem">
<div class="csl-left-margin">[2] </div><div class="csl-right-inline">Schrijver A. On the <span>History</span> of <span>Combinatorial</span> <span>Optimization</span> (<span>Till</span> 1960). In: Handbooks in <span>Operations</span> <span>Research</span> and <span>Management</span> <span>Science</span>. Vol. 12. Elsevier; 2005. p. 1–68. <a href="https://linkinghub.elsevier.com/retrieve/pii/S0927050705120015">https://linkinghub.elsevier.com/retrieve/pii/S0927050705120015</a>. doi:<a href="https://doi.org/10.1016/S0927-0507(05)12001-5">10.1016/S0927-0507(05)12001-5</a></div>
</div>
<div id="ref-plante_product_1987" class="csl-entry" role="listitem">
<div class="csl-left-margin">[3] </div><div class="csl-right-inline">Plante RD, Lowe TJ, Chandrasekaran R. The <span>Product</span> <span>Matrix</span> <span>Traveling</span> <span>Salesman</span> <span>Problem</span>: <span>An</span> <span>Application</span> and <span>Solution</span> <span>Heuristic</span>. Operations Research. 1987 [accessed 2023 Apr 30];35(5):772–783. <a href="https://www.jstor.org/stable/171228">https://www.jstor.org/stable/171228</a></div>
</div>
<div id="ref-bland_large_1989" class="csl-entry" role="listitem">
<div class="csl-left-margin">[4] </div><div class="csl-right-inline">Bland RG, Shallcross DF. Large travelling salesman problems arising from experiments in <span>X</span>-ray crystallography: <span>A</span> preliminary report on computation. Operations Research Letters. 1989 [accessed 2023 Apr 30];8(3):125–128. <a href="https://linkinghub.elsevier.com/retrieve/pii/0167637789900370">https://linkinghub.elsevier.com/retrieve/pii/0167637789900370</a>. doi:<a href="https://doi.org/10.1016/0167-6377(89)90037-0">10.1016/0167-6377(89)90037-0</a></div>
</div>
<div id="ref-lenstra_simple_1975" class="csl-entry" role="listitem">
<div class="csl-left-margin">[5] </div><div class="csl-right-inline">Lenstra JK, Kan AHGR. Some <span>Simple</span> <span>Applications</span> of the <span>Travelling</span> <span>Salesman</span> <span>Problem</span>. Journal of the Operational Research Society. 1975 [accessed 2023 Apr 30];26(4):717–733. <a href="https://www.tandfonline.com/doi/full/10.1057/jors.1975.151">https://www.tandfonline.com/doi/full/10.1057/jors.1975.151</a>. doi:<a href="https://doi.org/10.1057/jors.1975.151">10.1057/jors.1975.151</a></div>
</div>
<div id="ref-miller_reducibility_1972" class="csl-entry" role="listitem">
<div class="csl-left-margin">[6] </div><div class="csl-right-inline">Karp RM. Reducibility among <span>Combinatorial</span> <span>Problems</span>. In: Miller RE, Thatcher JW, Bohlinger JD, editors. Complexity of <span>Computer</span> <span>Computations</span>. Boston, MA: Springer US; 1972. p. 85–103. <a href="http://link.springer.com/10.1007/978-1-4684-2001-2_9">http://link.springer.com/10.1007/978-1-4684-2001-2_9</a>. doi:<a href="https://doi.org/10.1007/978-1-4684-2001-2_9">10.1007/978-1-4684-2001-2_9</a></div>
</div>
<div id="ref-davendra_traveling_2010" class="csl-entry" role="listitem">
<div class="csl-left-margin">[7] </div><div class="csl-right-inline">Matai R, Singh S, Lal M. Traveling <span>Salesman</span> <span>Problem</span>: An <span>Overview</span> of <span>Applications</span>, <span>Formulations</span>, and <span>Solution</span> <span>Approaches</span>. In: Davendra D, editor. Traveling <span>Salesman</span> <span>Problem</span>, <span>Theory</span> and <span>Applications</span>. InTech; 2010. <a href="http://www.intechopen.com/books/traveling-salesman-problem-theory-and-applications/traveling-salesman-problem-an-overview-of-applications-formulations-and-solution-approaches">http://www.intechopen.com/books/traveling-salesman-problem-theory-and-applications/traveling-salesman-problem-an-overview-of-applications-formulations-and-solution-approaches</a>. doi:<a href="https://doi.org/10.5772/12909">10.5772/12909</a></div>
</div>
<div id="ref-little_algorithm_1963" class="csl-entry" role="listitem">
<div class="csl-left-margin">[8] </div><div class="csl-right-inline">Little JDC, Murty KG, Sweeney DW, Karel C. An <span>Algorithm</span> for the <span>Traveling</span> <span>Salesman</span> <span>Problem</span>. Operations Research. 1963 [accessed 2023 May 1];11(6):972–989. <a href="https://pubsonline.informs.org/doi/10.1287/opre.11.6.972">https://pubsonline.informs.org/doi/10.1287/opre.11.6.972</a>. doi:<a href="https://doi.org/10.1287/opre.11.6.972">10.1287/opre.11.6.972</a></div>
</div>
<div id="ref-johnson_traveling_1997" class="csl-entry" role="listitem">
<div class="csl-left-margin">[9] </div><div class="csl-right-inline">Johnson DS, McGeoch LA. The traveling salesman problem: <span>A</span> case study in local optimization. In: Local <span>Search</span> in <span>Combinatorial</span> <span>Optimization</span>. John Wiley; Sons; 1997.</div>
</div>
<div id="ref-kirkpatrick_optimization_1983" class="csl-entry" role="listitem">
<div class="csl-left-margin">[10] </div><div class="csl-right-inline">Kirkpatrick S, Gelatt CD, Vecchi MP. Optimization by <span>Simulated</span> <span>Annealing</span>. Science. 1983 [accessed 2023 May 1];220(4598):671–680. <a href="https://www.science.org/doi/10.1126/science.220.4598.671">https://www.science.org/doi/10.1126/science.220.4598.671</a>. doi:<a href="https://doi.org/10.1126/science.220.4598.671">10.1126/science.220.4598.671</a></div>
</div>
<div id="ref-brady_optimization_1985" class="csl-entry" role="listitem">
<div class="csl-left-margin">[11] </div><div class="csl-right-inline">Brady RM. Optimization strategies gleaned from biological evolution. Nature. 1985 [accessed 2023 May 1];317(6040):804–806. <a href="http://www.nature.com/articles/317804a0">http://www.nature.com/articles/317804a0</a>. doi:<a href="https://doi.org/10.1038/317804a0">10.1038/317804a0</a></div>
</div>
<div id="ref-ottoni_reinforcement_2022" class="csl-entry" role="listitem">
<div class="csl-left-margin">[12] </div><div class="csl-right-inline">Ottoni ALC, Nepomuceno EG, Oliveira MSD, Oliveira DCRD. Reinforcement learning for the traveling salesman problem with refueling. Complex & Intelligent Systems. 2022 [accessed 2023 May 1];8(3):2001–2015. <a href="https://link.springer.com/10.1007/s40747-021-00444-4">https://link.springer.com/10.1007/s40747-021-00444-4</a>. doi:<a href="https://doi.org/10.1007/s40747-021-00444-4">10.1007/s40747-021-00444-4</a></div>
</div>
<div id="ref-bogyrbayeva_deep_2023" class="csl-entry" role="listitem">
<div class="csl-left-margin">[13] </div><div class="csl-right-inline">Bogyrbayeva A, Yoon T, Ko H, Lim S, Yun H, Kwon C. A deep reinforcement learning approach for solving the <span>Traveling</span> <span>Salesman</span> <span>Problem</span> with <span>Drone</span>. Transportation Research Part C: Emerging Technologies. 2023 [accessed 2023 May 1];148:103981. <a href="https://linkinghub.elsevier.com/retrieve/pii/S0968090X22003941">https://linkinghub.elsevier.com/retrieve/pii/S0968090X22003941</a>. doi:<a href="https://doi.org/10.1016/j.trc.2022.103981">10.1016/j.trc.2022.103981</a></div>
</div>
<div id="ref-bilgin_mastering_2020" class="csl-entry" role="listitem">
<div class="csl-left-margin">[14] </div><div class="csl-right-inline">Bilgin E. Mastering <span>Reinforcement</span> <span>Learning</span> with <span>Python</span>. Packt Publishing; 2020.</div>
</div>
<div id="ref-flagpicturescom_list_2023" class="csl-entry" role="listitem">
<div class="csl-left-margin">[15] </div><div class="csl-right-inline">FlagPictures.com. List of <span>Countries</span> <span>Capital</span> <span>Cities</span>. FlagPictures. 2023 [accessed 2023 Apr 25]. <a href="https://www.flagpictures.com/world-capital-cities/">https://www.flagpictures.com/world-capital-cities/</a></div>
</div>
<div id="ref-national_geographic_esrinatgeoworldmap_2023" class="csl-entry" role="listitem">
<div class="csl-left-margin">[16] </div><div class="csl-right-inline">National Geographic, Esri, Garmin, HERE, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, et al. Esri.<span>NatGeoWorldMap</span>. Leaflet-providers preview. 2023 [accessed 2023 Apr 25]. <a href="http://leaflet-extras.github.io/leaflet-providers/preview/#filter=Esri.NatGeoWorldMap">http://leaflet-extras.github.io/leaflet-providers/preview/#filter=Esri.NatGeoWorldMap</a></div>
</div>
<div id="ref-opendatasoft_world_2022" class="csl-entry" role="listitem">
<div class="csl-left-margin">[17] </div><div class="csl-right-inline">Opendatasoft. World <span>Administrative</span> <span>Boundaries</span> - <span>Countries</span> and <span>Territories</span>. Opendatasoft. 2022 [accessed 2023 Apr 26]. <a href="https://public.opendatasoft.com/explore/dataset/world-administrative-boundaries/information/">https://public.opendatasoft.com/explore/dataset/world-administrative-boundaries/information/</a></div>
</div>
<div id="ref-skyscanner_api_2023" class="csl-entry" role="listitem">
<div class="csl-left-margin">[18] </div><div class="csl-right-inline">Skyscanner. <span>API</span> <span>Developer</span> <span>Documentation</span>. Skyscanner Travel APIs. 2023 [accessed 2023 Apr 26]. <a href="https://developers.skyscanner.net/docs/intro">https://developers.skyscanner.net/docs/intro</a></div>
</div>
<div id="ref-alves_da_costa_delivery_2019" class="csl-entry" role="listitem">
<div class="csl-left-margin">[19] </div><div class="csl-right-inline">Alves Da Costa T. Delivery optimization with <span>Reinforcement</span> <span>Learning</span>. GitHub. 2019 [accessed 2023 Apr 25]. <a href="https://github.com/TheoLvs/reinforcement-learning">https://github.com/TheoLvs/reinforcement-learning</a></div>
</div>
</div></section><section id="footnotes" class="footnotes footnotes-end-of-document" role="doc-endnotes"><h2 class="anchored quarto-appendix-heading">Footnotes</h2>
<ol>
<li id="fn1"><p>For clarification, we cannot take the difference between the first segment departure and last segment arrival to calculate the total itinerary time because flight data is provided in local time, which would not account for time zone changes.<a href="#fnref1" class="footnote-back" role="doc-backlink">↩︎</a></p></li>
</ol>
</section></div></main>
<!-- /main column -->
<script id="quarto-html-after-body" type="application/javascript">
window.document.addEventListener("DOMContentLoaded", function (event) {
const icon = "";
const anchorJS = new window.AnchorJS();
anchorJS.options = {
placement: 'right',
icon: icon
};
anchorJS.add('.anchored');
const isCodeAnnotation = (el) => {
for (const clz of el.classList) {
if (clz.startsWith('code-annotation-')) {
return true;
}
}
return false;
}
const onCopySuccess = function(e) {
// button target
const button = e.trigger;
// don't keep focus
button.blur();
// flash "checked"
button.classList.add('code-copy-button-checked');
var currentTitle = button.getAttribute("title");
button.setAttribute("title", "Copied!");
let tooltip;
if (window.bootstrap) {
button.setAttribute("data-bs-toggle", "tooltip");
button.setAttribute("data-bs-placement", "left");
button.setAttribute("data-bs-title", "Copied!");
tooltip = new bootstrap.Tooltip(button,
{ trigger: "manual",
customClass: "code-copy-button-tooltip",
offset: [0, -8]});
tooltip.show();
}
setTimeout(function() {
if (tooltip) {
tooltip.hide();
button.removeAttribute("data-bs-title");
button.removeAttribute("data-bs-toggle");
button.removeAttribute("data-bs-placement");
}
button.setAttribute("title", currentTitle);
button.classList.remove('code-copy-button-checked');
}, 1000);
// clear code selection
e.clearSelection();
}
const getTextToCopy = function(trigger) {
const codeEl = trigger.previousElementSibling.cloneNode(true);
for (const childEl of codeEl.children) {
if (isCodeAnnotation(childEl)) {
childEl.remove();
}
}
return codeEl.innerText;
}
const clipboard = new window.ClipboardJS('.code-copy-button:not([data-in-quarto-modal])', {
text: getTextToCopy
});
clipboard.on('success', onCopySuccess);
if (window.document.getElementById('quarto-embedded-source-code-modal')) {
const clipboardModal = new window.ClipboardJS('.code-copy-button[data-in-quarto-modal]', {
text: getTextToCopy,
container: window.document.getElementById('quarto-embedded-source-code-modal')
});
clipboardModal.on('success', onCopySuccess);
}
var localhostRegex = new RegExp(/^(?:http|https):\/\/localhost\:?[0-9]*\//);
var mailtoRegex = new RegExp(/^mailto:/);
var filterRegex = new RegExp('/' + window.location.host + '/');
var isInternal = (href) => {
return filterRegex.test(href) || localhostRegex.test(href) || mailtoRegex.test(href);
}
// Inspect non-navigation links and adorn them if external
var links = window.document.querySelectorAll('a[href]:not(.nav-link):not(.navbar-brand):not(.toc-action):not(.sidebar-link):not(.sidebar-item-toggle):not(.pagination-link):not(.no-external):not([aria-hidden]):not(.dropdown-item):not(.quarto-navigation-tool):not(.about-link)');
for (var i=0; i<links.length; i++) {
const link = links[i];
if (!isInternal(link.href)) {
// undo the damage that might have been done by quarto-nav.js in the case of
// links that we want to consider external
if (link.dataset.originalHref !== undefined) {
link.href = link.dataset.originalHref;
}
}
}
function tippyHover(el, contentFn, onTriggerFn, onUntriggerFn) {
const config = {
allowHTML: true,
maxWidth: 500,
delay: 100,
arrow: false,
appendTo: function(el) {
return el.parentElement;
},
interactive: true,
interactiveBorder: 10,
theme: 'quarto',
placement: 'bottom-start',
};
if (contentFn) {
config.content = contentFn;
}
if (onTriggerFn) {
config.onTrigger = onTriggerFn;
}
if (onUntriggerFn) {
config.onUntrigger = onUntriggerFn;
}
window.tippy(el, config);
}
const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
for (var i=0; i<noterefs.length; i++) {
const ref = noterefs[i];
tippyHover(ref, function() {
// use id or data attribute instead here
let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
try { href = new URL(href).hash; } catch {}
const id = href.replace(/^#\/?/, "");
const note = window.document.getElementById(id);
if (note) {
return note.innerHTML;
} else {
return "";
}
});
}
const xrefs = window.document.querySelectorAll('a.quarto-xref');
const processXRef = (id, note) => {
// Strip column container classes
const stripColumnClz = (el) => {
el.classList.remove("page-full", "page-columns");
if (el.children) {
for (const child of el.children) {
stripColumnClz(child);
}
}
}
stripColumnClz(note)
if (id === null || id.startsWith('sec-')) {
// Special case sections, only their first couple elements
const container = document.createElement("div");
if (note.children && note.children.length > 2) {
container.appendChild(note.children[0].cloneNode(true));
for (let i = 1; i < note.children.length; i++) {
const child = note.children[i];
if (child.tagName === "P" && child.innerText === "") {
continue;
} else {
container.appendChild(child.cloneNode(true));
break;
}
}
if (window.Quarto?.typesetMath) {
window.Quarto.typesetMath(container);
}
return container.innerHTML
} else {
if (window.Quarto?.typesetMath) {
window.Quarto.typesetMath(note);
}
return note.innerHTML;
}
} else {
// Remove any anchor links if they are present
const anchorLink = note.querySelector('a.anchorjs-link');
if (anchorLink) {
anchorLink.remove();
}
if (window.Quarto?.typesetMath) {
window.Quarto.typesetMath(note);
}
if (note.classList.contains("callout")) {
return note.outerHTML;
} else {
return note.innerHTML;
}
}
}
for (var i=0; i<xrefs.length; i++) {
const xref = xrefs[i];
tippyHover(xref, undefined, function(instance) {
instance.disable();
let url = xref.getAttribute('href');
let hash = undefined;
if (url.startsWith('#')) {
hash = url;
} else {
try { hash = new URL(url).hash; } catch {}
}
if (hash) {
const id = hash.replace(/^#\/?/, "");
const note = window.document.getElementById(id);
if (note !== null) {
try {
const html = processXRef(id, note.cloneNode(true));
instance.setContent(html);
} finally {
instance.enable();
instance.show();
}
} else {
// See if we can fetch this
fetch(url.split('#')[0])
.then(res => res.text())
.then(html => {
const parser = new DOMParser();
const htmlDoc = parser.parseFromString(html, "text/html");
const note = htmlDoc.getElementById(id);
if (note !== null) {
const html = processXRef(id, note);
instance.setContent(html);
}
}).finally(() => {
instance.enable();
instance.show();
});
}
} else {
// See if we can fetch a full url (with no hash to target)
// This is a special case and we should probably do some content thinning / targeting
fetch(url)
.then(res => res.text())
.then(html => {
const parser = new DOMParser();
const htmlDoc = parser.parseFromString(html, "text/html");
const note = htmlDoc.querySelector('main.content');
if (note !== null) {
// This should only happen for chapter cross references
// (since there is no id in the URL)
// remove the first header
if (note.children.length > 0 && note.children[0].tagName === "HEADER") {
note.children[0].remove();
}
const html = processXRef(null, note);
instance.setContent(html);
}
}).finally(() => {
instance.enable();
instance.show();
});
}
}, function(instance) {
});
}
let selectedAnnoteEl;
const selectorForAnnotation = ( cell, annotation) => {
let cellAttr = 'data-code-cell="' + cell + '"';
let lineAttr = 'data-code-annotation="' + annotation + '"';
const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
return selector;
}
const selectCodeLines = (annoteEl) => {
const doc = window.document;
const targetCell = annoteEl.getAttribute("data-target-cell");
const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
const lines = annoteSpan.getAttribute("data-code-lines").split(",");
const lineIds = lines.map((line) => {
return targetCell + "-" + line;
})
let top = null;
let height = null;
let parent = null;
if (lineIds.length > 0) {
//compute the position of the single el (top and bottom and make a div)
const el = window.document.getElementById(lineIds[0]);
top = el.offsetTop;
height = el.offsetHeight;
parent = el.parentElement.parentElement;
if (lineIds.length > 1) {
const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
const bottom = lastEl.offsetTop + lastEl.offsetHeight;
height = bottom - top;
}
if (top !== null && height !== null && parent !== null) {
// cook up a div (if necessary) and position it
let div = window.document.getElementById("code-annotation-line-highlight");
if (div === null) {
div = window.document.createElement("div");
div.setAttribute("id", "code-annotation-line-highlight");
div.style.position = 'absolute';
parent.appendChild(div);
}
div.style.top = top - 2 + "px";
div.style.height = height + 4 + "px";
div.style.left = 0;
let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
if (gutterDiv === null) {
gutterDiv = window.document.createElement("div");
gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
gutterDiv.style.position = 'absolute';
const codeCell = window.document.getElementById(targetCell);
const gutter = codeCell.querySelector('.code-annotation-gutter');
gutter.appendChild(gutterDiv);
}
gutterDiv.style.top = top - 2 + "px";
gutterDiv.style.height = height + 4 + "px";
}
selectedAnnoteEl = annoteEl;
}
};
const unselectCodeLines = () => {
const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
elementsIds.forEach((elId) => {
const div = window.document.getElementById(elId);
if (div) {
div.remove();
}
});
selectedAnnoteEl = undefined;
};
// Handle positioning of the toggle
window.addEventListener(
"resize",
throttle(() => {
elRect = undefined;
if (selectedAnnoteEl) {
selectCodeLines(selectedAnnoteEl);
}
}, 10)
);
function throttle(fn, ms) {
let throttle = false;
let timer;
return (...args) => {
if(!throttle) { // first call gets through
fn.apply(this, args);
throttle = true;
} else { // all the others get throttled
if(timer) clearTimeout(timer); // cancel #2
timer = setTimeout(() => {
fn.apply(this, args);
timer = throttle = false;
}, ms);
}
};
}
// Attach click handler to the DT
const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
for (const annoteDlNode of annoteDls) {
annoteDlNode.addEventListener('click', (event) => {
const clickedEl = event.target;
if (clickedEl !== selectedAnnoteEl) {
unselectCodeLines();
const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
if (activeEl) {
activeEl.classList.remove('code-annotation-active');
}
selectCodeLines(clickedEl);
clickedEl.classList.add('code-annotation-active');
} else {
// Unselect the line
unselectCodeLines();
clickedEl.classList.remove('code-annotation-active');
}
});
}
const findCites = (el) => {
const parentEl = el.parentElement;
if (parentEl) {
const cites = parentEl.dataset.cites;
if (cites) {
return {
el,
cites: cites.split(' ')
};
} else {
return findCites(el.parentElement)
}
} else {
return undefined;
}
};
var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
for (var i=0; i<bibliorefs.length; i++) {
const ref = bibliorefs[i];
const citeInfo = findCites(ref);
if (citeInfo) {
tippyHover(citeInfo.el, function() {
var popup = window.document.createElement('div');
citeInfo.cites.forEach(function(cite) {
var citeDiv = window.document.createElement('div');
citeDiv.classList.add('hanging-indent');
citeDiv.classList.add('csl-entry');
var biblioDiv = window.document.getElementById('ref-' + cite);
if (biblioDiv) {
citeDiv.innerHTML = biblioDiv.innerHTML;
}
popup.appendChild(citeDiv);
});
return popup.innerHTML;
});
}
}
});
</script>
</div> <!-- /content -->
</body></html>