Skip to content

Commit 13741ea

Browse files
committed
fix doc and fmts
1 parent 4b75e4d commit 13741ea

File tree

6 files changed

+83
-92
lines changed

6 files changed

+83
-92
lines changed

lib/bap_demangle/bap_demangle.mli

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,9 @@ module Std : sig
8888
[@@deprecated "since 2022-07 use [create] and/or [install]"]
8989
(** [register demangler] DEPRECATED.
9090
91-
@before 2.5.0 registers new demangler.
92-
@after 2.5.0 no longer needed and does nothing, all demanglers
93-
are automatically registered on creation. *)
91+
@before 2.5.0
92+
registers new demangler. after 2.5.0 no longer needed and does
93+
nothing, all demanglers are automatically registered on creation. *)
9494

9595
val lookup : ?package:string -> string -> demangler option
9696
(** [lookup ?package name] lookups in the registry for the demangler with

lib/bap_future/bap_future.mli

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -574,25 +574,33 @@ module Std : sig
574574

575575
val foldw :
576576
?stride:int -> 'a t -> int -> init:'b -> f:('b -> 'a -> 'b) -> 'b t
577-
(** [foldw ss n ~init ~f] performs a windowed fold of the stream.
578-
A function [f] is folded over [n] consecutive elements of [ss],
579-
then the result is produced into the output stream, the window
580-
is shifted by [stride] (defaults to one) and function [f]
581-
applied to the next [n] elements. For example, if stream [ss]
582-
produced the following sequence of elements:
577+
(** [foldw ss n ~init ~f] performs a windowed fold of the stream. A function
578+
[f] is folded over [n] consecutive elements of [ss], then the result is
579+
produced into the output stream, the window is shifted by [stride]
580+
(defaults to one) and function [f] applied to the next [n] elements. For
581+
example, if stream [ss] produced the following sequence of elements:
583582
584-
{[1,2,3,4,5,6,7,8]}
583+
{[
584+
1, 2, 3, 4, 5, 6, 7, 8
585+
]}
585586
586-
and windows length [n] is equal to [3], then the function [f]
587-
will be applied to a sequences:
588-
{[[1,2,3], [2,3,4], [3,4,5], [4,5,6], [5,6,7], [6,7,8]]}.
587+
and windows length [n] is equal to [3], then the function [f] will be
588+
applied to a sequences:
589+
{[
590+
[ (1, 2, 3) ],
591+
[ (2, 3, 4) ],
592+
[ (3, 4, 5) ],
593+
[ (4, 5, 6) ],
594+
[ (5, 6, 7) ],
595+
[ (6, 7, 8) ]
596+
]}
597+
.
589598
590599
Example, a moving average filter implemented with [foldw]:
591600
592601
{[
593602
let moving_average ss n =
594603
Float.(foldw ss n ~init:zero ~f:(+) >>| fun s / of_int n)
595-
596604
]} *)
597605

598606
val frame : clk:unit t -> 'a t -> init:'b -> f:('b -> 'a -> 'b) -> 'b t

lib/bap_image/bap_table.ml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ let pp_elt f fmt = function
7070
| None -> Format.fprintf fmt "None"
7171
| Some (x, _) -> Format.fprintf fmt "Some %a" Addr.pp (f x)
7272

73-
(** @pre [x <= y] *)
73+
(** pre [x <= y] *)
7474
let intersects x y = Addr.(Mem.max_addr x >= Mem.min_addr y)
7575

7676
let prev_key map key = Map.closest_key map `Less_than key

lib/bap_sema/bap_sema_taint.ml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ class context =
8989
{<tas = tas'>}
9090

9191
method reg_taints r = get_taints tvs (Bil.Result.id r)
92-
(** T(r) = { t : t |-> v} *)
92+
(** T(r) = [ t : t |-> v ] *)
9393

9494
method ptr_taints r = get_taints tas r
9595
method all_taints = Set.union (collect_taints tvs) (collect_taints tas)

plugins/cache/bap_cache_gc.ml

Lines changed: 58 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -1,77 +1,61 @@
1-
(**
2-
3-
The main goal is to delete files randomly and prioritizing larger files,
4-
but still giving the probability for all files to be deleted.
5-
6-
Notation:
7-
1. s(i) - the size of i-th file, where i = 0..m-1 with m being the
8-
total number of files;
9-
2. Sum(x(i)) = x(0) + ... x(m-1) - is the sigma operator;
10-
3. T = Sum(s(i)) - the total size of the cache;
11-
4. p(i) = s(i)/T - the discrete probability distrubution of the file
12-
sizes in cache, likelihood that a randomly chosen file from the
13-
cache will have size s(i).
14-
5. F(i) = p(i) + p(i-1) + ... + p(0)
15-
cumulative discrete distribution function (CDF).
16-
F(i) we can generate a random number u in range 0..1,
17-
using a uniform random number generator, and then find such k that
18-
F(k-1) < u <= F(k).
19-
6. |s| = Sum(p(i) * s(i)) = (1/T) * Sum(s(i)^2) - the expected value
20-
of the size of a cache entry
21-
7. |n| = t/|s| - the expected number of deletions that we need to
22-
make to delete t bytes, e.g. if we want to delete half:
23-
|n| = T^2 / (2*Sum(s(i)^2)
24-
25-
Example:
26-
sizes = {4, 6, 3, 1, 6}
27-
the total size of the cache is Sum(sizes(i)) = 20
28-
the PDF is p(i) = {4/20; 6/20; 3/20; 1/20; 6/20}
29-
and CDF is F(i) = {4/20; 10/20; 13/20; 14/20; 20/20}
30-
31-
We don't want to use floating points, there will be too many big and
32-
small numbers and overflows and we finally want to get an
33-
index. We will use rational numbers, since formulas 4. and 5. have the
34-
same denominator (namely T) we can use only numenators.
35-
36-
On the high-level, we need to generate a random value between 0 and
37-
T, and find such k that F(k-1) < S <= F(k), the k-th file will be
38-
our candidate for removal. We can repeat sampling until we get |n|
39-
files (of course deleting the same file twice won't free twice of
40-
its size, so we had to keep in mind which files we already selected
41-
and repeat until we get |n| distinct files)
42-
Of course, we don't want to have a linear search for intervals, but
43-
we can see, that F(i) partitions the set of sizes (0...T) into m-1
44-
subsets, so we can represent F as a finite mapping, e.g., with our
45-
example,
46-
47-
[0,3] -> 0
48-
[4,9] -> 1
49-
[10,12] -> 2
50-
[13,13] -> 3
51-
[14,19] -> 4
52-
53-
Since intervals are not intersecting, we don't need to use
54-
Interval_map here, we just need to use the common Map from core
55-
with the closest_key (`Less_or_equal_to`` function. So once we
56-
generated a random size u we call for the closest_key for u and
57-
pick the associated value as the index of the file that we will
58-
delete. E.g., let's choose randomly a value from the range of
59-
0...19, if it in range from 0..3 we will pick the first file, or if
60-
it is in range from 4,9, e.g., 5, then closest_key will return 4,1,
61-
so we will remove the second file. So we managed to get away from
62-
ugly floats and got the desired distribution with no rounding
63-
errors.
64-
65-
Now, after we have selected |n| distinct files we can shuffle them and
66-
delete without worrying that some other process already deleted one
67-
of those files. All the processes are using the same sequence of
68-
pseudorandom files, so they will select approximately equal files
69-
for deletion.
70-
71-
And finally, we don't want to make our recursive selection depend
72-
from |n|, so instead of selecting |n| files for removal we will
73-
select as many files as we need to remove requested size.
74-
*)
1+
(** The main goal is to delete files randomly and prioritizing larger files, but
2+
still giving the probability for all files to be deleted.
3+
4+
Notation: 1. s(i) - the size of i-th file, where i = 0..m-1 with m being the
5+
total number of files; 2. Sum(x(i)) = x(0) + ... x(m-1) - is the sigma
6+
operator; 3. T = Sum(s(i)) - the total size of the cache; 4. p(i) = s(i)/T -
7+
the discrete probability distrubution of the file sizes in cache, likelihood
8+
that a randomly chosen file from the cache will have size s(i). 5. F(i) =
9+
p(i) + p(i-1) + ... + p(0) cumulative discrete distribution function (CDF).
10+
F(i) we can generate a random number u in range 0..1, using a uniform random
11+
number generator, and then find such k that F(k-1) < u <= F(k). 6. |s| =
12+
Sum(p(i) * s(i)) = (1/T) * Sum(s(i)^2) - the expected value of the size of a
13+
cache entry 7. |n| = t/|s| - the expected number of deletions that we need
14+
to make to delete t bytes, e.g. if we want to delete half: |n| = T^2 /
15+
(2*Sum(s(i)^2)
16+
17+
Example:
18+
{v
19+
sizes = {4, 6, 3, 1, 6}
20+
the total size of the cache is Sum(sizes(i)) = 20
21+
the PDF is p(i) = {4/20; 6/20; 3/20; 1/20; 6/20}
22+
and CDF is F(i) = {4/20; 10/20; 13/20; 14/20; 20/20}
23+
v}
24+
25+
We don't want to use floating points, there will be too many big and small
26+
numbers and overflows and we finally want to get an index. We will use
27+
rational numbers, since formulas 4. and 5. have the same denominator (namely
28+
T) we can use only numenators.
29+
30+
On the high-level, we need to generate a random value between 0 and T, and
31+
find such k that F(k-1) < S <= F(k), the k-th file will be our candidate for
32+
removal. We can repeat sampling until we get |n| files (of course deleting
33+
the same file twice won't free twice of its size, so we had to keep in mind
34+
which files we already selected and repeat until we get |n| distinct files)
35+
Of course, we don't want to have a linear search for intervals, but we can
36+
see, that F(i) partitions the set of sizes (0...T) into m-1 subsets, so we
37+
can represent F as a finite mapping, e.g., with our example,
38+
39+
[0,3] -> 0 [4,9] -> 1 [10,12] -> 2 [13,13] -> 3 [14,19] -> 4
40+
41+
Since intervals are not intersecting, we don't need to use Interval_map
42+
here, we just need to use the common Map from core with the closest_key
43+
(`Less_or_equal_to`` function. So once we generated a random size u we call
44+
for the closest_key for u and pick the associated value as the index of the
45+
file that we will delete. E.g., let's choose randomly a value from the range
46+
of 0...19, if it in range from 0..3 we will pick the first file, or if it is
47+
in range from 4,9, e.g., 5, then closest_key will return 4,1, so we will
48+
remove the second file. So we managed to get away from ugly floats and got
49+
the desired distribution with no rounding errors.
50+
51+
Now, after we have selected |n| distinct files we can shuffle them and
52+
delete without worrying that some other process already deleted one of those
53+
files. All the processes are using the same sequence of pseudorandom files,
54+
so they will select approximately equal files for deletion.
55+
56+
And finally, we don't want to make our recursive selection depend from |n|,
57+
so instead of selecting |n| files for removal we will select as many files
58+
as we need to remove requested size. *)
7559

7660
open Core
7761
open Bap.Std

plugins/x86/x86_legacy_bil_pp.ml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
(* Copyright (C) 2017 ForAllSecure, Inc. - All Rights Reserved.*)
22
(** Pretty printing
33
4-
@todo Write .mli
5-
*)
4+
TODO: Write .mli *)
65

76
module Bil = X86_legacy_bil
87
open Big_int_Z

0 commit comments

Comments
 (0)