Adding a rate limiter, and a blank query catcher.

- Fixes #66
- Fixes #67
This commit is contained in:
Dessalines 2019-11-07 15:02:09 -08:00
parent 946b641845
commit 772dd190c7
5 changed files with 1234 additions and 935 deletions

View File

@ -1,5 +1,3 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]] [[package]]
name = "adler32" name = "adler32"
version = "1.0.3" version = "1.0.3"

View File

@ -0,0 +1 @@
tab_spaces = 2

1940
server/service/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -4,12 +4,14 @@ version = "0.1.0"
authors = ["Dessalines <happydooby@gmail.com>"] authors = ["Dessalines <happydooby@gmail.com>"]
[dependencies] [dependencies]
actix-web = "*" actix-web = "1.0"
actix-files = "0.1.7"
serde = "*" serde = "*"
serde_json = "*" serde_json = "*"
serde_derive = "*" serde_derive = "*"
time = "*" time = "*"
failure = "*"
[dependencies.rusqlite] [dependencies.rusqlite]
version = "0.15.0" version = "0.15.0"
features = ["bundled"] features = ["bundled"]

View File

@ -1,3 +1,4 @@
extern crate actix_files;
extern crate actix_web; extern crate actix_web;
extern crate serde; extern crate serde;
extern crate serde_json; extern crate serde_json;
@ -5,31 +6,55 @@ extern crate serde_json;
extern crate serde_derive; extern crate serde_derive;
extern crate rusqlite; extern crate rusqlite;
extern crate time; extern crate time;
#[macro_use]
extern crate failure;
use actix_web::{fs, fs::NamedFile, http, server, App, HttpRequest, HttpResponse, Query}; use actix_files as fs;
use actix_files::NamedFile;
use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer};
use failure::Error;
use std::collections::HashMap;
use std::env; use std::env;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Mutex;
use std::time::SystemTime;
use rusqlite::{Connection}; use rusqlite::Connection;
const RATE_LIMIT: i32 = 10;
const RATE_LIMIT_PER_SECOND: i32 = 60;
pub struct State {
rate_limits: Mutex<HashMap<String, RateLimitBucket>>,
}
#[derive(Debug)]
pub struct RateLimitBucket {
last_checked: SystemTime,
allowance: f64,
}
fn main() { fn main() {
println!("Access me at {}", endpoint()); println!("Access me at {}", endpoint());
server::new(|| {
let shared_data = web::Data::new(State {
rate_limits: Mutex::new(HashMap::new()),
});
HttpServer::new(move || {
App::new() App::new()
.route("/service/search", http::Method::GET, search) .route("/", web::get().to(index))
.resource("/", |r| r.f(index)) .service(fs::Files::new("/static", front_end_dir()))
.handler( .register_data(shared_data.clone())
"/static", .route("/service/search", web::get().to(search))
fs::StaticFiles::new(front_end_dir()) })
.unwrap() .bind(endpoint())
) .unwrap()
.finish() .run()
}).bind(endpoint()) .unwrap()
.unwrap()
.run();
} }
fn index(_req: &HttpRequest) -> Result<NamedFile, actix_web::error::Error> { fn index() -> Result<NamedFile, actix_web::error::Error> {
Ok(NamedFile::open(front_end_dir() + "/index.html")?) Ok(NamedFile::open(front_end_dir() + "/index.html")?)
} }
@ -44,28 +69,58 @@ fn torrents_db_file() -> String {
fn endpoint() -> String { fn endpoint() -> String {
env::var("TORRENTS_CSV_ENDPOINT").unwrap_or("0.0.0.0:8080".to_string()) env::var("TORRENTS_CSV_ENDPOINT").unwrap_or("0.0.0.0:8080".to_string())
} }
#[derive(Deserialize)] #[derive(Deserialize)]
struct SearchQuery { struct SearchQuery {
q: String, q: String,
page: Option<usize>, page: Option<usize>,
size: Option<usize>, size: Option<usize>,
type_: Option<String> type_: Option<String>,
} }
fn search(query: Query<SearchQuery>) -> HttpResponse { fn search(
HttpResponse::Ok() req: HttpRequest,
.header("Access-Control-Allow-Origin", "*") data: web::Data<State>,
.content_type("application/json") query: web::Query<SearchQuery>,
.body(search_query(query)) ) -> HttpResponse {
let ip = req
.connection_info()
.remote()
.unwrap_or("127.0.0.1:12345")
.split(":")
.next()
.unwrap_or("127.0.0.1")
.to_string();
if query.q.is_empty() {
return HttpResponse::BadRequest()
.header("Access-Control-Allow-Origin", "*")
.content_type("application/json")
.body(format!("{{\"error\": \"{}\"}}", "Empty query".to_string()));
}
match check_rate_limit_full(data, &ip, RATE_LIMIT, RATE_LIMIT_PER_SECOND) {
Ok(_) => HttpResponse::Ok()
.header("Access-Control-Allow-Origin", "*")
.content_type("application/json")
.body(search_query(query)),
Err(e) => HttpResponse::BadRequest()
.header("Access-Control-Allow-Origin", "*")
.content_type("application/json")
.body(format!("{{\"error\": \"{}\"}}", e.to_string())),
}
} }
fn search_query(query: Query<SearchQuery>) -> String { fn search_query(query: web::Query<SearchQuery>) -> String {
let page = query.page.unwrap_or(1); let page = query.page.unwrap_or(1);
let size = query.size.unwrap_or(10); let size = query.size.unwrap_or(10);
let type_ = query.type_.as_ref().map_or("torrent", String::deref); let type_ = query.type_.as_ref().map_or("torrent", String::deref);
let offset = size * (page - 1); let offset = size * (page - 1);
println!("query = {}, type = {}, page = {}, size = {}", query.q, type_, page, size); println!(
"query = {}, type = {}, page = {}, size = {}",
query.q, type_, page, size
);
if type_ == "file" { if type_ == "file" {
let results = torrent_file_search(&query.q, size, offset); let results = torrent_file_search(&query.q, size, offset);
@ -93,20 +148,24 @@ fn torrent_search(query: &str, size: usize, offset: usize) -> Vec<Torrent> {
let conn = Connection::open(torrents_db_file()).unwrap(); let conn = Connection::open(torrents_db_file()).unwrap();
let mut stmt = conn.prepare(&stmt_str).unwrap(); let mut stmt = conn.prepare(&stmt_str).unwrap();
let torrent_iter = stmt let torrent_iter = stmt
.query_map(&[ .query_map(
query.replace(" ", "%"), &[
size.to_string(), query.replace(" ", "%"),
offset.to_string(), size.to_string(),
], |row| Torrent { offset.to_string(),
infohash: row.get(0), ],
name: row.get(1), |row| Torrent {
size_bytes: row.get(2), infohash: row.get(0),
created_unix: row.get(3), name: row.get(1),
seeders: row.get(4), size_bytes: row.get(2),
leechers: row.get(5), created_unix: row.get(3),
completed: row.get(6), seeders: row.get(4),
scraped_date: row.get(7), leechers: row.get(5),
}).unwrap(); completed: row.get(6),
scraped_date: row.get(7),
},
)
.unwrap();
let mut torrents = Vec::new(); let mut torrents = Vec::new();
for torrent in torrent_iter { for torrent in torrent_iter {
@ -133,21 +192,25 @@ fn torrent_file_search(query: &str, size: usize, offset: usize) -> Vec<File> {
let conn = Connection::open(torrents_db_file()).unwrap(); let conn = Connection::open(torrents_db_file()).unwrap();
let mut stmt = conn.prepare(&stmt_str).unwrap(); let mut stmt = conn.prepare(&stmt_str).unwrap();
let file_iter = stmt let file_iter = stmt
.query_map(&[ .query_map(
query.replace(" ", "%"), &[
size.to_string(), query.replace(" ", "%"),
offset.to_string(), size.to_string(),
], |row| File { offset.to_string(),
infohash: row.get(0), ],
index_: row.get(1), |row| File {
path: row.get(2), infohash: row.get(0),
size_bytes: row.get(3), index_: row.get(1),
created_unix: row.get(4), path: row.get(2),
seeders: row.get(5), size_bytes: row.get(3),
leechers: row.get(6), created_unix: row.get(4),
completed: row.get(7), seeders: row.get(5),
scraped_date: row.get(8), leechers: row.get(6),
}).unwrap(); completed: row.get(7),
scraped_date: row.get(8),
},
)
.unwrap();
let mut files = Vec::new(); let mut files = Vec::new();
for file in file_iter { for file in file_iter {
@ -156,6 +219,56 @@ fn torrent_file_search(query: &str, size: usize, offset: usize) -> Vec<File> {
files files
} }
fn check_rate_limit_full(
state: web::Data<State>,
ip: &str,
rate: i32,
per: i32,
) -> Result<(), Error> {
let mut rate_limits = state.rate_limits.lock().unwrap();
if rate_limits.get_mut(ip).is_none() {
rate_limits.insert(
ip.to_string(),
RateLimitBucket {
last_checked: SystemTime::now(),
allowance: -2f64,
},
);
}
if let Some(rate_limit) = rate_limits.get_mut(ip) {
// The initial value
if rate_limit.allowance == -2f64 {
rate_limit.allowance = rate as f64;
};
let current = SystemTime::now();
let time_passed = current.duration_since(rate_limit.last_checked)?.as_secs() as f64;
rate_limit.last_checked = current;
rate_limit.allowance += time_passed * (rate as f64 / per as f64);
if rate_limit.allowance > rate as f64 {
rate_limit.allowance = rate as f64;
}
if rate_limit.allowance < 1.0 {
println!(
"Rate limited IP: {}, time_passed: {}, allowance: {}",
&ip, time_passed, rate_limit.allowance
);
Err(format_err!(
"Too many requests for IP: {}. {} per {} seconds",
&ip,
rate,
per
))?
} else {
rate_limit.allowance -= 1.0;
Ok(())
}
} else {
Ok(())
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@ -164,8 +277,7 @@ mod tests {
#[test] #[test]
fn test() { fn test() {
let start = PreciseTime::now(); let start = PreciseTime::now();
let results = let results = super::torrent_search("sherlock", 10, 0);
super::torrent_search("sherlock", 10, 0);
assert!(results.len() > 2); assert!(results.len() > 2);
let end = PreciseTime::now(); let end = PreciseTime::now();
println!("Query took {} seconds.", start.to(end)); println!("Query took {} seconds.", start.to(end));