surwiki/src/state.rs

351 lines
12 KiB
Rust
Raw Normal View History

use std;
2017-09-05 18:07:57 +03:00
use diesel;
use diesel::sqlite::SqliteConnection;
2017-08-21 00:44:52 +03:00
use diesel::prelude::*;
2017-09-08 17:21:24 +03:00
use futures_cpupool::{self, CpuFuture};
use r2d2::Pool;
use r2d2_diesel::ConnectionManager;
2017-08-20 23:17:16 +03:00
use models;
use schema::*;
2017-08-20 23:17:16 +03:00
#[derive(Clone)]
pub struct State {
2017-09-08 17:21:24 +03:00
connection_pool: Pool<ConnectionManager<SqliteConnection>>,
cpu_pool: futures_cpupool::CpuPool,
}
pub type Error = Box<std::error::Error + Send + Sync>;
pub enum SlugLookup {
Miss,
Hit {
article_id: i32,
revision: i32,
},
Redirect(String),
}
#[derive(Insertable)]
#[table_name="article_revisions"]
struct NewRevision<'a> {
article_id: i32,
revision: i32,
slug: &'a str,
title: &'a str,
body: &'a str,
2017-10-18 17:33:21 +03:00
author: Option<&'a str>,
latest: bool,
}
fn decide_slug(conn: &SqliteConnection, article_id: i32, prev_title: &str, title: &str, prev_slug: Option<&str>) -> Result<String, Error> {
let base_slug = ::slug::slugify(title);
if let Some(prev_slug) = prev_slug {
if prev_slug == "" {
// Never give a non-empty slug to the front page
return Ok(String::new());
}
if title == prev_title {
return Ok(prev_slug.to_owned());
}
if base_slug == prev_slug {
return Ok(base_slug);
}
}
let base_slug = if base_slug.is_empty() { "article" } else { &base_slug };
use schema::article_revisions;
let mut slug = base_slug.to_owned();
let mut disambiguator = 1;
loop {
let slug_in_use = article_revisions::table
.filter(article_revisions::article_id.ne(article_id))
.filter(article_revisions::slug.eq(&slug))
.filter(article_revisions::latest.eq(true))
.count()
.first::<i64>(conn)? != 0;
if !slug_in_use {
break Ok(slug);
}
disambiguator += 1;
slug = format!("{}-{}", base_slug, disambiguator);
}
}
impl State {
2017-09-10 13:29:33 +03:00
pub fn new(connection_pool: Pool<ConnectionManager<SqliteConnection>>, cpu_pool: futures_cpupool::CpuPool) -> State {
2017-09-08 17:21:24 +03:00
State {
connection_pool,
2017-09-10 13:29:33 +03:00
cpu_pool,
2017-09-08 17:21:24 +03:00
}
}
pub fn get_article_slug(&self, article_id: i32) -> CpuFuture<Option<String>, Error> {
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
use schema::article_revisions;
Ok(article_revisions::table
.filter(article_revisions::article_id.eq(article_id))
.filter(article_revisions::latest.eq(true))
.select((article_revisions::slug))
.first::<String>(&*connection_pool.get()?)
.optional()?)
})
}
pub fn get_article_revision(&self, article_id: i32, revision: i32) -> CpuFuture<Option<models::ArticleRevision>, Error> {
2017-09-08 17:21:24 +03:00
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
2017-09-08 16:58:15 +03:00
use schema::article_revisions;
Ok(article_revisions::table
.filter(article_revisions::article_id.eq(article_id))
.filter(article_revisions::revision.eq(revision))
.first::<models::ArticleRevision>(&*connection_pool.get()?)
.optional()?)
2017-09-08 17:21:24 +03:00
})
2017-08-21 00:44:52 +03:00
}
2017-09-05 18:07:57 +03:00
2017-10-20 21:48:38 +03:00
pub fn query_article_revision_stubs<F>(&self, f: F) -> CpuFuture<Vec<models::ArticleRevisionStub>, Error>
where
F: 'static + Send + Sync,
for <'a> F:
FnOnce(article_revisions::BoxedQuery<'a, diesel::sqlite::Sqlite>) ->
article_revisions::BoxedQuery<'a, diesel::sqlite::Sqlite>,
{
2017-10-03 11:37:18 +03:00
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
2017-10-20 21:48:38 +03:00
use schema::article_revisions::dsl::*;
2017-10-03 11:37:18 +03:00
2017-10-20 21:48:38 +03:00
Ok(f(article_revisions.into_boxed())
2017-10-03 11:37:18 +03:00
.select((
2017-10-20 21:48:38 +03:00
sequence_number,
article_id,
revision,
created,
slug,
title,
latest,
author,
2017-10-03 11:37:18 +03:00
))
2017-10-20 21:48:38 +03:00
.load(&*connection_pool.get()?)?
)
2017-10-03 11:37:18 +03:00
})
}
2017-10-13 16:06:16 +03:00
pub fn get_latest_article_revision_stubs(&self) -> CpuFuture<Vec<models::ArticleRevisionStub>, Error> {
2017-10-24 22:23:18 +03:00
self.query_article_revision_stubs(|query| {
query
2017-10-13 16:06:16 +03:00
.filter(article_revisions::latest.eq(true))
.order(article_revisions::title.asc())
})
}
pub fn lookup_slug(&self, slug: String) -> CpuFuture<SlugLookup, Error> {
#[derive(Queryable)]
struct ArticleRevisionStub {
article_id: i32,
revision: i32,
latest: bool,
}
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
let conn = connection_pool.get()?;
conn.transaction(|| {
use schema::article_revisions;
Ok(match article_revisions::table
.filter(article_revisions::slug.eq(slug))
.order(article_revisions::sequence_number.desc())
.select((
article_revisions::article_id,
article_revisions::revision,
article_revisions::latest,
))
.first::<ArticleRevisionStub>(&*conn)
.optional()?
{
None => SlugLookup::Miss,
Some(ref stub) if stub.latest => SlugLookup::Hit {
article_id: stub.article_id,
revision: stub.revision,
},
Some(stub) => SlugLookup::Redirect(
article_revisions::table
.filter(article_revisions::latest.eq(true))
.filter(article_revisions::article_id.eq(stub.article_id))
.select(article_revisions::slug)
.first::<String>(&*conn)?
)
})
})
})
}
2017-10-18 17:33:21 +03:00
pub fn update_article(&self, article_id: i32, base_revision: i32, title: String, body: String, author: Option<String>)
-> CpuFuture<models::ArticleRevision, Error>
{
2017-09-08 17:21:24 +03:00
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
let conn = connection_pool.get()?;
conn.transaction(|| {
use schema::article_revisions;
let (latest_revision, prev_title, prev_slug) = article_revisions::table
2017-09-08 17:21:24 +03:00
.filter(article_revisions::article_id.eq(article_id))
.order(article_revisions::revision.desc())
.select((
article_revisions::revision,
article_revisions::title,
article_revisions::slug,
))
.first::<(i32, String, String)>(&*conn)?;
2017-09-08 17:21:24 +03:00
if latest_revision != base_revision {
// TODO: If it is the same edit repeated, just respond OK
// TODO: If there is a conflict, transform the edit to work seamlessly
unimplemented!("TODO Missing handling of revision conflicts");
}
let new_revision = base_revision + 1;
let slug = decide_slug(&*conn, article_id, &prev_title, &title, Some(&prev_slug))?;
diesel::update(
article_revisions::table
.filter(article_revisions::article_id.eq(article_id))
.filter(article_revisions::revision.eq(base_revision))
)
.set(article_revisions::latest.eq(false))
.execute(&*conn)?;
2017-09-08 17:21:24 +03:00
diesel::insert(&NewRevision {
article_id,
revision: new_revision,
slug: &slug,
2017-09-08 17:21:24 +03:00
title: &title,
body: &body,
2017-10-18 17:33:21 +03:00
author: author.as_ref().map(|x| &**x),
latest: true,
2017-09-08 17:21:24 +03:00
})
.into(article_revisions::table)
.execute(&*conn)?;
Ok(article_revisions::table
.filter(article_revisions::article_id.eq(article_id))
.filter(article_revisions::revision.eq(new_revision))
.first::<models::ArticleRevision>(&*conn)?
2017-09-08 17:21:24 +03:00
)
})
2017-09-08 17:21:24 +03:00
})
2017-09-05 18:07:57 +03:00
}
2017-10-18 17:33:21 +03:00
pub fn create_article(&self, target_slug: Option<String>, title: String, body: String, author: Option<String>)
-> CpuFuture<models::ArticleRevision, Error>
{
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
let conn = connection_pool.get()?;
conn.transaction(|| {
#[derive(Insertable)]
#[table_name="articles"]
struct NewArticle {
id: Option<i32>
}
let article_id = {
use diesel::expression::sql_literal::sql;
// Diesel and SQLite are a bit in disagreement for how this should look:
sql::<(diesel::types::Integer)>("INSERT INTO articles VALUES (null)")
.execute(&*conn)?;
sql::<(diesel::types::Integer)>("SELECT LAST_INSERT_ROWID()")
.load::<i32>(&*conn)?
.pop().expect("Statement must evaluate to an integer")
};
let slug = decide_slug(&*conn, article_id, "", &title, target_slug.as_ref().map(|x| &**x))?;
let new_revision = 1;
diesel::insert(&NewRevision {
article_id,
revision: new_revision,
slug: &slug,
title: &title,
body: &body,
2017-10-18 17:33:21 +03:00
author: author.as_ref().map(|x| &**x),
latest: true,
})
.into(article_revisions::table)
.execute(&*conn)?;
Ok(article_revisions::table
.filter(article_revisions::article_id.eq(article_id))
.filter(article_revisions::revision.eq(new_revision))
.first::<models::ArticleRevision>(&*conn)?
)
})
})
}
2017-10-24 23:15:42 +03:00
pub fn search_query(&self, query_string: String, limit: i32, offset: i32, snippet_size: i32) -> CpuFuture<Vec<models::SearchResult>, Error> {
let connection_pool = self.connection_pool.clone();
self.cpu_pool.spawn_fn(move || {
use diesel::expression::sql_literal::sql;
2017-10-24 23:15:42 +03:00
use diesel::types::{Integer, Text};
fn fts_quote(src: &str) -> String {
format!("\"{}\"", src.replace('\"', "\"\""))
}
let words = query_string
.split_whitespace()
.map(fts_quote)
.collect::<Vec<_>>();
let query = if words.len() > 1 {
format!("NEAR({})", words.join(" "))
} else if words.len() == 1 {
format!("{}*", words[0])
} else {
"\"\"".to_owned()
};
Ok(
sql::<(Text, Text, Text)>(
2017-10-24 23:15:42 +03:00
"SELECT title, snippet(article_search, 1, '', '', '\u{2026}', ?), slug \
FROM article_search \
WHERE article_search MATCH ? \
2017-10-24 23:15:42 +03:00
ORDER BY rank \
LIMIT ? OFFSET ?"
)
2017-10-24 23:15:42 +03:00
.bind::<Integer, _>(snippet_size)
.bind::<Text, _>(query)
2017-10-24 23:15:42 +03:00
.bind::<Integer, _>(limit)
.bind::<Integer, _>(offset)
.load(&*connection_pool.get()?)?)
})
}
}