Skip to content

Commit 96927dc

Browse files
committed
Put some more pieces into place
1 parent d3e053b commit 96927dc

31 files changed

+6121
-0
lines changed

build.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
extern crate lalrpop;
2+
3+
fn main() {
4+
lalrpop::process_root().unwrap();
5+
}

src/args.rs

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
const USAGE: &str = "Usage: puke [--max-machines=<#>] [--reorder] [--fail-io] [module file.pk]
2+
3+
Falls back to repl if no module is provided.
4+
5+
Options:
6+
--max-machines=<#> Maximum concurrency bound [default: number of cores * 1.2].
7+
--scramble Scramble in-flight messages and scheduling orders.
8+
--fail-io=<#> Causes random IO failures on 1 out of every N IO operations.
9+
--fuzz=<corpus path> Feeds the provided module fuzzed inputs derived from the provided corpus directory.
10+
";
11+
12+
/// Args for the puke `Interpreter`.
13+
#[derive(Debug, Clone)]
14+
pub struct Args {
15+
pub max_machines: usize,
16+
pub scramble: bool,
17+
pub fail_io: Option<usize>,
18+
pub fuzz: Option<String>,
19+
pub module: Option<String>,
20+
}
21+
22+
impl Default for Args {
23+
fn default() -> Args {
24+
Args {
25+
max_machines: ((num_cpus::get_physical() as f64) * 1.2) as usize,
26+
scramble: false,
27+
fail_io: None,
28+
fuzz: None,
29+
module: None,
30+
}
31+
}
32+
}
33+
34+
fn parse<'a, I, T>(mut iter: I) -> T
35+
where
36+
I: Iterator<Item = &'a str>,
37+
T: std::str::FromStr,
38+
<T as std::str::FromStr>::Err: std::fmt::Debug,
39+
{
40+
iter.next().expect(USAGE).parse().expect(USAGE)
41+
}
42+
43+
impl Args {
44+
pub fn parse() -> Args {
45+
let mut args = Args::default();
46+
for raw_arg in std::env::args().skip(1) {
47+
let mut splits = raw_arg[2..].split('=');
48+
match splits.next().unwrap() {
49+
"max-machines" => args.max_machines = parse(&mut splits),
50+
"scramble" => args.scramble = true,
51+
"fail-io" => args.fail_io = Some(parse(&mut splits)),
52+
"fuzz" => args.fuzz = Some(parse(&mut splits)),
53+
other => panic!("unknown option: {}, {}", other, USAGE),
54+
}
55+
}
56+
args
57+
}
58+
}

src/ast.rs

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
pub struct Module {
2+
behavior: Behavior,
3+
items: Vec<Item>,
4+
}
5+
6+
pub struct Struct {
7+
name: String,
8+
fields: Vec<Field>,
9+
}
10+
11+
pub struct Enum {
12+
name: String,
13+
variants: Vec<Struct>,
14+
}
15+
16+
pub struct Field {
17+
pub name: String,
18+
pub ty: Type,
19+
}
20+
21+
pub struct Type;
22+
23+
pub struct Function {
24+
subfunctions: Vec<SubFunction>,
25+
}
26+
27+
pub struct SubFunction {
28+
// implicit arity in arguments length
29+
name: String,
30+
ret: Type,
31+
arguments: Vec<Field>,
32+
statements: Vec<Statement>,
33+
}
34+
35+
pub enum Statement {
36+
Assign(String, Expression),
37+
Expression(Expression),
38+
}
39+
40+
pub enum Expression {
41+
Call,
42+
Case,
43+
}
44+
45+
pub enum Item {
46+
Import { path: String },
47+
Export { name: String, arity: usize },
48+
Struct(Struct),
49+
Function(Function),
50+
}
51+
52+
pub enum Behavior {
53+
Http,
54+
Grpc,
55+
StateMachine,
56+
}

src/cache_padded.rs

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
/// Vendored and simplified from crossbeam-utils.
2+
use core::fmt;
3+
use core::ops::{Deref, DerefMut};
4+
5+
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
6+
// lines at a time, so we have to align to 128 bytes rather than 64.
7+
//
8+
// Sources:
9+
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
10+
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
11+
//
12+
// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128 byte cache line size
13+
// Sources:
14+
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
15+
//
16+
#[cfg_attr(
17+
any(target_arch = "x86_64", target_arch = "aarch64"),
18+
repr(align(128))
19+
)]
20+
#[cfg_attr(
21+
not(any(target_arch = "x86_64", target_arch = "aarch64")),
22+
repr(align(64))
23+
)]
24+
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
25+
pub struct CachePadded<T> {
26+
value: T,
27+
}
28+
29+
#[allow(unsafe_code)]
30+
unsafe impl<T: Send> Send for CachePadded<T> {}
31+
32+
#[allow(unsafe_code)]
33+
unsafe impl<T: Sync> Sync for CachePadded<T> {}
34+
35+
impl<T> CachePadded<T> {
36+
/// Pads and aligns a value to the length of a cache line.
37+
pub const fn new(t: T) -> CachePadded<T> {
38+
CachePadded::<T> { value: t }
39+
}
40+
}
41+
42+
impl<T> Deref for CachePadded<T> {
43+
type Target = T;
44+
45+
fn deref(&self) -> &T {
46+
&self.value
47+
}
48+
}
49+
50+
impl<T> DerefMut for CachePadded<T> {
51+
fn deref_mut(&mut self) -> &mut T {
52+
&mut self.value
53+
}
54+
}
55+
56+
impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
57+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
58+
f.debug_struct("CachePadded").field("value", &self.value).finish()
59+
}
60+
}
61+
62+
impl<T> From<T> for CachePadded<T> {
63+
fn from(t: T) -> Self {
64+
CachePadded::new(t)
65+
}
66+
}

src/debug_delay.rs

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
#![allow(clippy::float_arithmetic)]
2+
3+
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
4+
5+
use crate::Lazy;
6+
7+
/// This function is useful for inducing random jitter into our atomic
8+
/// operations, shaking out more possible interleavings quickly. It gets
9+
/// fully eliminated by the compiler in non-test code.
10+
pub fn debug_delay() {
11+
use std::thread;
12+
use std::time::Duration;
13+
14+
static GLOBAL_DELAYS: AtomicUsize = AtomicUsize::new(0);
15+
16+
static INTENSITY: Lazy<u32, fn() -> u32> = Lazy::new(|| {
17+
std::env::var("SLED_LOCK_FREE_DELAY_INTENSITY")
18+
.unwrap_or_else(|_| "100".into())
19+
.parse()
20+
.expect(
21+
"SLED_LOCK_FREE_DELAY_INTENSITY must be set to a \
22+
non-negative integer (ideally below 1,000,000)",
23+
)
24+
});
25+
26+
static CRASH_CHANCE: Lazy<u32, fn() -> u32> = Lazy::new(|| {
27+
std::env::var("SLED_CRASH_CHANCE")
28+
.unwrap_or_else(|_| "0".into())
29+
.parse()
30+
.expect(
31+
"SLED_CRASH_CHANCE must be set to a \
32+
non-negative integer (ideally below 50,000)",
33+
)
34+
});
35+
36+
thread_local!(
37+
static LOCAL_DELAYS: std::cell::RefCell<usize> = std::cell::RefCell::new(0)
38+
);
39+
40+
if cfg!(feature = "miri_optimizations") {
41+
// Each interaction with LOCAL_DELAYS adds more stacked borrows
42+
// tracking information, and Miri is single-threaded anyway.
43+
return;
44+
}
45+
46+
let global_delays = GLOBAL_DELAYS.fetch_add(1, Relaxed);
47+
let local_delays = LOCAL_DELAYS.with(|ld| {
48+
let mut ld = ld.borrow_mut();
49+
let old = *ld;
50+
*ld = std::cmp::max(global_delays + 1, *ld + 1);
51+
old
52+
});
53+
54+
if *CRASH_CHANCE > 0 && random(*CRASH_CHANCE) == 0 {
55+
std::process::exit(9)
56+
}
57+
58+
if global_delays == local_delays {
59+
// no other threads seem to be
60+
// calling this, so we may as
61+
// well skip it
62+
return;
63+
}
64+
65+
if random(1000) == 1 {
66+
let duration = random(*INTENSITY);
67+
68+
#[allow(clippy::cast_possible_truncation)]
69+
#[allow(clippy::cast_sign_loss)]
70+
thread::sleep(Duration::from_micros(u64::from(duration)));
71+
}
72+
73+
if random(2) == 0 {
74+
thread::yield_now();
75+
}
76+
}
77+
78+
/// Generates a random number in `0..n`.
79+
fn random(n: u32) -> u32 {
80+
use std::cell::Cell;
81+
use std::num::Wrapping;
82+
83+
thread_local! {
84+
static RNG: Cell<Wrapping<u32>> = Cell::new(Wrapping(1_406_868_647));
85+
}
86+
87+
#[allow(clippy::cast_possible_truncation)]
88+
RNG.try_with(|rng| {
89+
// This is the 32-bit variant of Xorshift.
90+
//
91+
// Source: https://en.wikipedia.org/wiki/Xorshift
92+
let mut x = rng.get();
93+
x ^= x << 13;
94+
x ^= x >> 17;
95+
x ^= x << 5;
96+
rng.set(x);
97+
98+
// This is a fast alternative to `x % n`.
99+
//
100+
// Author: Daniel Lemire
101+
// Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
102+
(u64::from(x.0).wrapping_mul(u64::from(n)) >> 32) as u32
103+
})
104+
.unwrap_or(0)
105+
}

0 commit comments

Comments
 (0)