1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
use Rng;
use distributions::{Distribution, Bernoulli, Cauchy};
use distributions::utils::log_gamma;
#[derive(Clone, Copy, Debug)]
pub struct Binomial {
n: u64,
p: f64,
}
impl Binomial {
pub fn new(n: u64, p: f64) -> Binomial {
assert!(p >= 0.0, "Binomial::new called with p < 0");
assert!(p <= 1.0, "Binomial::new called with p > 1");
Binomial { n, p }
}
}
impl Distribution<u64> for Binomial {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
if self.p == 0.0 {
return 0;
} else if self.p == 1.0 {
return self.n;
}
if self.n < 300 {
let mut result = 0;
let d = Bernoulli::new(self.p);
for _ in 0 .. self.n {
result += rng.sample(d) as u32;
}
return result as u64;
}
let p = if self.p <= 0.5 {
self.p
} else {
1.0 - self.p
};
let float_n = self.n as f64;
let ln_fact_n = log_gamma(float_n + 1.0);
let pc = 1.0 - p;
let log_p = p.ln();
let log_pc = pc.ln();
let expected = self.n as f64 * p;
let sq = (expected * (2.0 * pc)).sqrt();
let mut lresult;
let cauchy = Cauchy::new(0.0, 1.0);
loop {
let mut comp_dev: f64;
loop {
comp_dev = rng.sample(cauchy);
lresult = expected + sq * comp_dev;
if lresult >= 0.0 && lresult < float_n + 1.0 {
break;
}
}
lresult = lresult.floor();
let log_binomial_dist = ln_fact_n - log_gamma(lresult+1.0) -
log_gamma(float_n - lresult + 1.0) + lresult*log_p + (float_n - lresult)*log_pc;
let comparison_coeff = (log_binomial_dist.exp() * sq) * (1.2 * (1.0 + comp_dev*comp_dev));
if comparison_coeff >= rng.gen() {
break;
}
}
if p != self.p {
self.n - lresult as u64
} else {
lresult as u64
}
}
}
#[cfg(test)]
mod test {
use Rng;
use distributions::Distribution;
use super::Binomial;
fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
let binomial = Binomial::new(n, p);
let expected_mean = n as f64 * p;
let expected_variance = n as f64 * p * (1.0 - p);
let mut results = [0.0; 1000];
for i in results.iter_mut() { *i = binomial.sample(rng) as f64; }
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>()
/ results.len() as f64;
assert!((variance - expected_variance).abs() < expected_variance / 10.0);
}
#[test]
fn test_binomial() {
let mut rng = ::test::rng(351);
test_binomial_mean_and_variance(150, 0.1, &mut rng);
test_binomial_mean_and_variance(70, 0.6, &mut rng);
test_binomial_mean_and_variance(40, 0.5, &mut rng);
test_binomial_mean_and_variance(20, 0.7, &mut rng);
test_binomial_mean_and_variance(20, 0.5, &mut rng);
}
#[test]
fn test_binomial_end_points() {
let mut rng = ::test::rng(352);
assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
}
#[test]
#[should_panic]
fn test_binomial_invalid_lambda_neg() {
Binomial::new(20, -10.0);
}
}