Etherll/Mellum-4b-sft-rust-GGUF
Text Generation
•
Updated
•
496
•
2
file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?; | }
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
} | Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect() | random_line_split |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self |
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
} | identifier_body |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn | (e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| or | identifier_name |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() |
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) &...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
return Err(serde::de::Error::custom("invalid string index"));
} | conditional_block |
xterm.rs | use with [`OwoColorize::color`](OwoColorize::color)
/// or [`OwoColorize::on_color`](OwoColorize::on_color)
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum XtermColors {
$(
#[allow(missing_docs)]
$name,
)*
}
impl crate::DynColor for XtermColors {
fn fmt_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[38;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[48;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("38;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("48;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
#[doc(hidden)]
fn get_dyncolors_fg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
#[doc(hidden)]
fn get_dyncolors_bg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
}
impl From<u8> for XtermColors {
fn from(x: u8) -> Self {
match x {
$(
$xterm_num => XtermColors::$name,
)*
}
}
}
| fn from(color: XtermColors) -> Self {
match color {
$(
XtermColors::$name => $xterm_num,
)*
}
}
}
}
$(
#[allow(missing_docs)]
pub struct $name;
impl crate::Color for $name {
const ANSI_FG: &'static str = concat!("\x1b[38;5;", stringify!($xterm_num), "m");
const ANSI_BG: &'static str = concat!("\x1b[48;5;", stringify!($xterm_num), "m");
const RAW_ANSI_BG: &'static str = concat!("48;5;", stringify!($xterm_num));
const RAW_ANSI_FG: &'static str = concat!("48;5;", stringify!($xterm_num));
#[doc(hidden)]
type DynEquivelant = dynamic::XtermColors;
#[doc(hidden)]
const DYN_EQUIVELANT: Self::DynEquivelant = dynamic::XtermColors::$name;
#[doc(hidden)]
fn into_dyncolors() -> crate::DynColors {
crate::DynColors::Xterm(dynamic::XtermColors::$name)
}
}
)*
};
}
xterm_colors! {
0 UserBlack (0,0,0)
1 UserRed (128,0,0)
2 UserGreen (0,128,0)
3 UserYellow (128,128,0)
4 UserBlue (0,0,128)
5 UserMagenta (128,0,128)
6 UserCyan (0,128,128)
7 UserWhite (192,192,192)
8 UserBrightBlack (128,128,128)
9 UserBrightRed (255,0,0)
10 UserBrightGreen (0,255,0)
11 UserBrightYellow (255,255,0)
12 UserBrightBlue (0,0,255)
13 UserBrightMagenta (255,0,255)
14 UserBrightCyan (0,255,255)
15 UserBrightWhite (255,255,255)
16 Black (0,0,0)
17 StratosBlue (0,0,95)
18 NavyBlue (0,0,135)
19 MidnightBlue (0,0,175)
20 DarkBlue (0,0,215)
21 Blue (0,0,255)
22 CamaroneGreen (0,95,0)
23 BlueStone (0,95,95)
24 OrientBlue (0,95,135)
25 EndeavourBlue (0,95,175)
26 ScienceBlue (0,95,215)
27 BlueRibbon (0,95,255)
28 JapaneseLaurel (0,135,0)
29 DeepSeaGreen (0,135,95)
30 Teal (0,135,135)
31 DeepCerulean (0,135,175)
32 LochmaraBlue (0,135,215)
33 AzureRadiance (0,135,255)
34 LightJapaneseLaurel (0,175,0)
35 Jade (0,175,95)
36 PersianGreen (0,175,135)
37 BondiBlue (0,175,175)
38 Cerulean (0,175,215)
39 LightAzureRadiance (0,175,255)
40 DarkGreen (0,215,0)
41 Malachite (0,215,95)
42 CaribbeanGreen (0,215,135)
43 LightCaribbeanGreen (0,215,175)
44 RobinEggBlue (0,215,215)
45 Aqua (0,215,255)
46 Green (0,255,0)
47 DarkSpringGreen (0,255,95)
48 SpringGreen (0,255,135)
49 LightSpringGreen (0,255,175)
50 BrightTurquoise (0,255,215)
51 Cyan (0,255,255)
52 Rosewood (95,0,0)
53 PompadourMagenta (95,0,95)
54 PigmentIndigo (95,0,135)
55 DarkPurple (95,0,175)
56 ElectricIndigo (95,0,215)
57 ElectricPurple (95,0,255)
58 VerdunGreen (95,95,0)
59 ScorpionOlive (95,95,95)
60 Lilac (95,95,135)
61 ScampiIndigo (95,95,175)
62 Indigo (95,95,215)
63 DarkCornflowerBlue (95,95,255)
64 DarkLimeade (95,135,0)
65 GladeGreen (95,135,95)
66 JuniperGreen (95,135,135)
67 HippieBlue (95,135,175)
68 HavelockBlue (95,135,215)
69 CornflowerBlue (95,135,255)
70 Limeade (95,175,0)
71 FernGreen (95,175,95)
72 SilverTree (95,175,135)
73 Tradewind (95,175,175)
74 ShakespeareBlue (95,175,215)
75 DarkMalibuBlue (95,175,255)
76 DarkBrightGreen (95,215,0)
77 DarkPastelGreen (95,215,95)
78 PastelGreen (95,215,135)
79 DownyTeal (95,215,175)
80 Viking (95,215,215)
81 MalibuBlue (95,215,255)
82 BrightGreen (95,255,0)
83 DarkScreaminGreen (95,255,95)
84 ScreaminGreen (95,255,135)
85 DarkAquamarine (95,255,175)
86 Aquamarine (95,255,215)
87 LightAquamarine (95,255,255)
88 Maroon (135,0,0)
89 DarkFreshEggplant (135,0,95)
90 LightFreshEggplant (135,0,135)
91 Purple (135,0,175)
92 ElectricViolet (135,0,215)
93 LightElectricViolet (135,0,255)
94 Brown (135,95,0)
95 CopperRose (135,95,95)
96 StrikemasterPurple (135,95,135)
97 DelugePurple (135,95,175)
98 DarkMediumPurple (135,95,215)
99 DarkHeliotropePurple (135,95,255)
100 Olive (135,135,0)
101 ClayCreekOlive (135,135,95)
102 DarkGray (135,135,135)
103 WildBlueYonder (135,135,175)
104 ChetwodeBlue (135,135,215)
105 SlateBlue (135,135,255)
106 LightLimeade (135,175,0)
107 ChelseaCucumber (135,175,95)
108 BayLeaf (135,175,135)
109 GulfStream (135,175,175)
110 PoloBlue (135,175,215)
111 LightMalibuBlue (135,175,255)
112 Pistachio (135,215,0)
113 LightPastelGreen (135,215,95)
114 DarkFeijoaGreen (135,215,135)
115 VistaBlue (135,215,175)
116 Bermuda (135,215,215)
117 DarkAnakiwaBlue (135,215,255)
118 ChartreuseGreen (135,255,0)
119 LightScreaminGreen (135,255,95)
120 DarkMintGreen (135,255,135)
121 MintGreen (135,255,175)
122 LighterAquamarine (135,255,215)
123 AnakiwaBlue (135,255,255)
124 BrightRed (175,0,0)
125 DarkFlirt (175,0,95)
126 Flirt (175,0,135)
127 LightFlirt (175,0,175)
128 DarkViolet (175,0,215)
129 BrightElectricViolet (175,0,255)
130 RoseofSharonOrange (175,95,0)
131 MatrixPink (175,95,95)
132 TapestryPink (175,95,135)
133 FuchsiaPink (175,95,175)
134 MediumPurple (175,95,215)
135 Heliotrope (175,95,255)
136 PirateGold (175,135,0)
137 MuesliOrange (175,135,95)
138 PharlapPink (175,135,135)
139 Bouquet (175,135,175)
140 Lavender (175,135,215)
141 LightHeliotrope (175,135,255)
142 BuddhaGold (175,175,0)
143 OliveGreen (175,175,95)
144 HillaryOlive (175,175,135)
145 SilverChalice (175,175,175)
146 WistfulLilac (175,175,215)
147 MelroseLilac (175,175,255)
148 RioGrandeGreen (175,215,0)
149 ConiferGreen (175,215,95)
150 Feijoa (175,215,135)
151 PixieGreen (175,215,175)
152 JungleMist (175,215,215)
153 LightAnakiwaBlue (175,215,255)
154 Lime (175,255,0)
155 GreenYellow (175,255,95)
156 LightMintGreen (175,255,135)
157 Celadon (175,255,175)
158 AeroBlue (175,255,215)
159 FrenchPassLightBlue (175,255,255)
160 GuardsmanRed (215,0,0)
161 RazzmatazzCerise (215,0,95)
162 MediumVioletRed (215,0,135)
163 HollywoodCerise (215,0,175)
164 DarkPurplePizzazz (215,0,215)
165 BrighterElectricViolet (215,0,255)
166 TennOrange (215,95,0)
167 RomanOrange (215,95,95)
168 CranberryPink (215,95,135)
169 HopbushPink (215,95,175)
170 Orchid (215,95,215)
171 LighterHeliotrope (215,95,255)
172 MangoTango (215,135,0)
173 Copperfield (215,135,95)
174 SeaPink (215,135,135)
175 CanCanPink (215,135,175)
176 LightOrchid (215,135,215)
177 BrightHeliotrope (215,135,255)
178 DarkCorn (215,175,0)
179 DarkTachaOrange (215,175,95)
180 TanBeige (215,175,135)
181 ClamShell (215,175,175)
182 ThistlePink (215,175,215)
183 Mauve (215,175,255)
184 Corn (215,215,0)
185 TachaOrange (215,215,95)
186 DecoOrange (215,215,135)
187 PaleGoldenrod (215,215,175)
188 AltoBeige (215,215,215)
189 FogPink (215,215,255)
190 ChartreuseYellow (215,255,0)
191 Canary (215,255,95)
192 Honeysuckle (215,255,135)
193 ReefPaleYellow (215,255,175)
194 SnowyMint (215,255,215)
195 OysterBay (215,255,255)
196 Red (255,0,0)
197 DarkRose (255,0,95)
198 Rose (255,0,135)
199 LightHollywoodCerise (255,0,175)
200 PurplePizzazz (255,0,215)
201 Fuchsia (255,0,255)
202 BlazeOrange (255,95,0)
203 BittersweetOrange (255,95,95)
204 WildWatermelon (255,95,135)
205 DarkHotPink (255,95,175)
206 HotPink (255,95,215)
207 PinkFlamingo (255,95,255)
208 FlushOrange (255,135,0)
209 Salmon (255,135,95)
210 VividTangerine (255,135,135)
211 PinkSalmon (255,135,175)
212 DarkLavenderRose (255,135,215)
213 BlushPink (255,135,255)
214 YellowSea (255,175,0)
215 TexasRose (255,175,95)
216 Tacao (255,175,135)
217 Sundown (255,175,175)
218 CottonCandy (255,175,215)
219 LavenderRose (255,175,255)
220 Gold (255,215,0)
221 Dandelion (255,215,95)
222 GrandisCaramel (255,215,135)
223 Caramel (255,215,175)
224 CosmosSalmon (255,215,215)
225 PinkLace (255,215,255)
226 Yellow (255,255,0)
227 LaserLemon (255,255,95)
228 DollyYellow (255,255,135)
229 PortafinoYellow (255,255,175)
230 Cumulus (255,255,215)
231 White (255,255,255)
232 DarkCodGray (8,8,8)
233 CodGray (18,18,18)
234 LightCodGray (28,28,28)
235 DarkMineShaft (38,38,38)
236 MineShaft (48,48,48)
237 LightMineShaft (58,58,58)
238 DarkTundora (68,68,68)
239 Tundora (78,78,78)
240 ScorpionGray (88,88,88)
241 DarkDoveGray (98,98,98)
242 DoveGray | impl From<XtermColors> for u8 { | random_line_split |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new() | t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if!self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
} | }
}
pub struct KnowledgeBase<T: ReteIntrospection> { | random_line_split |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) | else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if!self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} | conditional_block |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn | <I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if!self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| insert | identifier_name |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)|!intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x|!max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store:!hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)|!layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))|!info1.dependents.is_empty() && layout_map.contains_key(&info1.id) &&!layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)|!shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x|!shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store =!info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))|!rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)|!intersect.is_empty())
.max_by_key(|&(pos, ref intersect)|!intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x|!intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> |
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| {
let rc = Rc::new(val);
if !self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
} | identifier_body |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else |
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
} | conditional_block |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
| /// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
} | /// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the | random_line_split |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf |
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| {
self.destruct().unwrap_err()
} | identifier_body |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| fmt | identifier_name |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> | .find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() &&!values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if!descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter() | identifier_body |