Files
adler32
backtrace
backtrace_sys
base64
bigtable
bitflags
byteorder
bytes
cfg_if
cookie
cookie_store
crc32fast
crossbeam_deque
crossbeam_epoch
crossbeam_queue
crossbeam_utils
curl
curl_sys
dtoa
either
encoding_rs
error_chain
failure
failure_derive
flate2
fnv
foreign_types
foreign_types_shared
futures
futures_cpupool
goauth
h2
http
http_body
httparse
hyper
hyper_tls
idna
indexmap
iovec
itoa
lazy_static
libc
libz_sys
lock_api
log
matches
maybe_uninit
memoffset
mime
mime_guess
miniz_oxide
mio
native_tls
net2
num_cpus
num_traits
openssl
openssl_probe
openssl_sys
parking_lot
parking_lot_core
percent_encoding
proc_macro2
protobuf
protobuf_json
publicsuffix
quote
rand
rand_chacha
rand_core
rand_hc
rand_isaac
rand_jitter
rand_os
rand_pcg
rand_xorshift
regex
regex_syntax
reqwest
rustc_demangle
rustc_serialize
ryu
scopeguard
serde
serde_codegen_internals
serde_derive
serde_json
serde_urlencoded
slab
smallvec
smpl_jwt
socket2
string
syn
synom
synstructure
time
tokio
tokio_buf
tokio_current_thread
tokio_executor
tokio_io
tokio_reactor
tokio_sync
tokio_tcp
tokio_threadpool
tokio_timer
try_from
try_lock
unicase
unicode_bidi
unicode_normalization
unicode_xid
url
uuid
want
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a wrapper
//! around `AtomicUsize`. On 32 bit platforms, this is implemented using a
//! `Mutex`.
//!
//! This file can be removed if/when `AtomicU64` lands in `std`.

pub use self::imp::AtomicU64;

#[cfg(target_pointer_width = "64")]
mod imp {
    use std::sync::atomic::{AtomicUsize, Ordering};

    #[derive(Debug)]
    pub struct AtomicU64 {
        inner: AtomicUsize,
    }

    impl AtomicU64 {
        pub fn new(val: u64) -> AtomicU64 {
            AtomicU64 {
                inner: AtomicUsize::new(val as usize),
            }
        }

        pub fn load(&self, ordering: Ordering) -> u64 {
            self.inner.load(ordering) as u64
        }

        pub fn store(&self, val: u64, ordering: Ordering) {
            self.inner.store(val as usize, ordering)
        }

        pub fn fetch_or(&self, val: u64, ordering: Ordering) -> u64 {
            self.inner.fetch_or(val as usize, ordering) as u64
        }

        pub fn compare_and_swap(&self, old: u64, new: u64, ordering: Ordering) -> u64 {
            self.inner
                .compare_and_swap(old as usize, new as usize, ordering) as u64
        }
    }
}

#[cfg(not(target_pointer_width = "64"))]
mod imp {
    use std::sync::atomic::Ordering;
    use std::sync::Mutex;

    #[derive(Debug)]
    pub struct AtomicU64 {
        inner: Mutex<u64>,
    }

    impl AtomicU64 {
        pub fn new(val: u64) -> AtomicU64 {
            AtomicU64 {
                inner: Mutex::new(val),
            }
        }

        pub fn load(&self, _: Ordering) -> u64 {
            *self.inner.lock().unwrap()
        }

        pub fn store(&self, val: u64, _: Ordering) {
            *self.inner.lock().unwrap() = val;
        }

        pub fn fetch_or(&self, val: u64, _: Ordering) -> u64 {
            let mut lock = self.inner.lock().unwrap();
            let prev = *lock;
            *lock = prev | val;
            prev
        }

        pub fn compare_and_swap(&self, old: u64, new: u64, _: Ordering) -> u64 {
            let mut lock = self.inner.lock().unwrap();
            let prev = *lock;

            if prev != old {
                return prev;
            }

            *lock = new;
            prev
        }
    }
}