diff --git a/.gitignore b/.gitignore index 058002c..7608143 100644 --- a/.gitignore +++ b/.gitignore @@ -12,5 +12,9 @@ Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk +# Exclude IntelliJ files +.idea + +test_db.json # End of https://www.gitignore.io/api/rust \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a7b3538..fa0babd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,17 +7,72 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [Unreleased] -## [0.6.0] - 2018-08-23 +## [0.8.0] - 2020-09-26 + +- the `find()` operations takes a FindQuery + +## [0.7.18] - 2020-09-25 + +- Views now use QueryParams instead of an untyped map. +- Views can now return the optional "doc" item. +- BREAKING CHANGE: `execute_view` has been removed. Use `query` instead. + +## [0.7.17] - 2020-09-14 + +- Sort takes an array of key/value pairs, like: [{"first_name":"desc"}] + +## [0.7.16] - 2020-09-14 + +- Make total_rows in ViewCollection optional. + +## [0.7.15] - 2020-09-14 + +- Make id in ViewItem optional. + +## [0.7.14] - 2020-09-14 +- Return value in ViewItem as a Value, not String + +## [0.7.13] - 2020-09-11 +- Use reqwest's `error_for_status()` on responses, where we are not actively checking the result. +- Return an Error when one occurs during batch reading. +- Removed the `'static` lifetime on some of the `str` parameters; contribution from kallisti5 +- Included `execute_update()` operation; contribution from horacimacias + +## [0.7.12] - 2020-09-10 +- Check response success for create_view() + +## [0.7.11] - 2020-09-09 +- Allow to query a view with a different design name + +## [0.7.10] - 2020-09-09 +- BREAKING CHANGE: get_all_params now takes a typed QueryParams as input. +- get_all_params uses POST, instead of GET, for greater flexibility. + +## [0.7.9] - 2020-09-09 +- `json_extr!` does not panic when called on a non-existent field. Like in find for _id, + when the find result does not include an _id. + +## [0.7.8] - 2020-09-09 +- Implemented Display for FindQuery + +## [0.7.7] - 2020-09-09 +- Allow FindQuery to be converted to Value + +## [0.7.6] - 2020-09-09 +- Added `find_batched` to allow asynchronous customized searches + +## [0.7.0] - 2020-02-03 ### Added -- Added `failure` dependency - Added `Client::make_db` - Added `docker-compose.yml` - Added `.rustfmt.toml` ### Changed +- Updated to the Rust 2018 edition standards +- Compiles against the latest reqwest and serde libraries - Optimized memory consumption by moving `iter()` calls to `into_iter()` where needed - Changed `SofaError` to derive `failure` - Changed `Client::check_status` signature to remove potentially panicking `unwrap()` calls @@ -45,3 +100,4 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ### Removed - Removed env files that were necessary for single-threaded test run. Added section in README to reflect that. +- Removed the `failure` dependency diff --git a/Cargo.toml b/Cargo.toml index 82a20fa..3153cb3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "sofa" -version = "0.6.0" -authors = ["Mathieu Amiot "] +version = "0.8.0" +authors = ["Mathieu Amiot ", "mibes "] license = "MIT/Apache-2.0" description = "Sofa - CouchDB for Rust" readme = "README.md" @@ -10,17 +10,22 @@ homepage = "https://github.com/YellowInnovation/sofa" repository = "https://github.com/YellowInnovation/sofa" keywords = ["couchdb", "orm", "database", "nosql"] categories = ["database"] +edition = "2018" include = [ "**/*.rs", "Cargo.toml" ] [dependencies] -failure = "0.1" -serde = "1.0" -serde_derive = "1.0" -serde_json = "1.0" -reqwest = "0.8" +serde = { version = "^1.0.114", features = ["derive"] } +serde_json = "^1.0.56" +url = "^2.1.1" +tokio = { version = "^0.2.22", features = ["full"] } + +[dependencies.reqwest] +version = "^0.10.7" +features = ["json", "gzip", "cookies"] [dev-dependencies] -pretty_assertions = "0.5" +pretty_assertions = "^0.6.1" +tokio = { version = "^0.2.22", features = ["full"] } diff --git a/README.md b/README.md index e438772..d2c6f8a 100644 --- a/README.md +++ b/README.md @@ -1,67 +1,95 @@ -# Sofa - CouchDB for Rust - -[![Crates.io](https://img.shields.io/crates/v/sofa.svg)](https://crates.io/crates/sofa) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_shield) - -[![docs.rs](https://docs.rs/sofa/badge.svg)](https://docs.rs/sofa) - -![sofa-logo](https://raw.githubusercontent.com/YellowInnovation/sofa/master/docs/logo-sofa.png "Logo Sofa") - -## Documentation - -Here: [http://docs.rs/sofa](http://docs.rs/sofa) - -## Installation - -```toml -[dependencies] -sofa = "0.6" -``` - -## Description - -This crate is an interface to CouchDB HTTP REST API. Works with stable Rust. - -Does not support `#![no_std]` - -After trying most crates for CouchDB in Rust (`chill`, `couchdb` in particular), none of them fit our needs hence the need to create our own. - -No async I/O (yet), uses a mix of Reqwest and Serde under the hood, with a few nice abstractions out there. - -**NOT 1.0 YET, so expect changes** - -**Supports CouchDB 2.0 and up.** - -Be sure to check [CouchDB's Documentation](http://docs.couchdb.org/en/latest/index.html) in detail to see what's possible. - -## Running tests - -Make sure that you have an instance of CouchDB 2.0+ running, either via the supplied `docker-compose.yml` file or by yourself. It must be listening on the default port. - -And then -`cargo test -- --test-threads=1` - -Single-threading the tests is very important because we need to make sure that the basic features are working before actually testing features on dbs/documents. - -## Why the name "Sofa" - -CouchDB has a nice name, and I wanted to reflect that. - -## License - -Licensed under either of these: - -* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -* MIT license ([LICENSE-MIT](LICENSE-MIT) or - [https://opensource.org/licenses/MIT](https://opensource.org/licenses/MIT)) - +# Sofa - CouchDB for Rust + +[![Crates.io](https://img.shields.io/crates/v/sofa.svg)](https://crates.io/crates/sofa) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_shield) + +[![docs.rs](https://docs.rs/sofa/badge.svg)](https://docs.rs/sofa) + +![sofa-logo](https://raw.githubusercontent.com/mibes/sofa/master/docs/logo-sofa.png "Logo Sofa") + +## Migration Notice + +Please note that we are no longer maintaining this fork, but have spun off a new project: +[couch-rs](https://github.com/mibes/couch-rs) + +## Documentation + +Here: [http://docs.rs/sofa](http://docs.rs/sofa) + +## Installation + +If you want to use this particular fork, include this dependency in the Cargo.toml file: +```toml +[dependencies] +sofa = { git = "https://github.com/mibes/sofa.git", version = "0.8.0" } +``` + +If you want to continue to use the "old" 0.6 version use this dependency instead: +```toml +[dependencies] +sofa = "0.6" +``` + +## Description + +This crate is an interface to CouchDB HTTP REST API. Works with stable Rust. + +After trying most crates for CouchDB in Rust (`chill`, `couchdb` in particular), none of them fit our needs hence the need to create our own. + +Uses async I/O, with a mix of Reqwest and Serde under the hood, and a few nice abstractions out there. + +**NOT 1.0 YET, so expect changes** + +**Supports CouchDB 2.3.0 and up, including the newly released 3.0 version.** + +Be sure to check [CouchDB's Documentation](http://docs.couchdb.org/en/latest/index.html) in detail to see what's possible. + +The 0.7 version is based on the 0.6 release from https://github.com/YellowInnovation/sofa. +It has been updated to the Rust 2018 edition standards, uses async I/O, and compiles against the latest serde and +reqwest libraries. + +## Example code + +You can launch the included example with: +```shell script +cargo run --example basic_operations +``` + +## Running tests + +Make sure that you have an instance of CouchDB 2.0+ running, either via the supplied `docker-compose.yml` file or by yourself. It must be listening on the default port. +Since Couch 3.0 the "Admin Party" mode is no longer supported. This means you need to provide a username and password during launch. +The tests and examples assume an "admin" CouchDB user with a "password" CouchDB password. Docker run command: + +```shell script +docker run --rm -p 5984:5984 -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password couchdb:3 +``` + +And then +`cargo test -- --test-threads=1` + +Single-threading the tests is very important because we need to make sure that the basic features are working before actually testing features on dbs/documents. + +## Why the name "Sofa" + +CouchDB has a nice name, and I wanted to reflect that. + +## License + +Licensed under either of these: + +* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) +* MIT license ([LICENSE-MIT](LICENSE-MIT) or + [https://opensource.org/licenses/MIT](https://opensource.org/licenses/MIT)) + [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_large) -## Yellow Innovation - -Yellow Innovation is the innovation laboratory of the French postal service: La Poste. - -We create innovative user experiences and journeys through services with a focus on IoT lately. - +## Yellow Innovation + +Yellow Innovation is the innovation laboratory of the French postal service: La Poste. + +We create innovative user experiences and journeys through services with a focus on IoT lately. + [Yellow Innovation's website and works](http://yellowinnovation.fr/en/) \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index fe740b5..1a280d7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,7 +3,7 @@ version: '3' services: couchdb: - image: couchdb:2.2 + image: couchdb:3 restart: always ports: - '5984:5984' diff --git a/examples/async_batch_read/main.rs b/examples/async_batch_read/main.rs new file mode 100644 index 0000000..59c2768 --- /dev/null +++ b/examples/async_batch_read/main.rs @@ -0,0 +1,54 @@ +extern crate sofa; + +use sofa::document::DocumentCollection; +use std::fs::File; +use std::io::prelude::*; +use std::time::SystemTime; +use tokio::sync::mpsc; +use tokio::sync::mpsc::{Receiver, Sender}; + +const DB_HOST: &str = "http://admin:password@localhost:5984"; +const TEST_DB: &str = "test_db"; + +#[tokio::main] +async fn main() { + println!("Connecting..."); + let now = SystemTime::now(); + + // Create a sender and receiver channel pair + let (tx, mut rx): (Sender, Receiver) = mpsc::channel(100); + + // Spawn a separate thread to retrieve the batches from Couch + let t = tokio::spawn(async move { + let client = sofa::Client::new_with_timeout(DB_HOST, 120).unwrap(); + let db = client.db(TEST_DB).await.unwrap(); + + if let Err(err) = db.get_all_batched(tx, 0, 0).await { + println!("error during batch read: {:?}", err); + } + }); + + // Open a file for writing + let mut file = File::create("test_db.json").unwrap(); + + // Loop until the receiving channel is closed. + while let Some(all_docs) = rx.recv().await { + println!("Received {} docs", all_docs.total_rows); + + // unmarshal the documents and write them to a file. + // (there is probably a more efficient way of doing this...) + for row in all_docs.rows { + file.write_all(serde_json::to_string(&row.doc).unwrap().as_bytes()) + .unwrap(); + } + } + + // Make sure the file is written before exiting + file.sync_all().unwrap(); + + let elapsed = now.elapsed().unwrap_or_default(); + println!("{} ms", elapsed.as_millis()); + + // Wait for the spawned task to finish (should be done by now). + t.await.unwrap(); +} diff --git a/examples/basic_operations/main.rs b/examples/basic_operations/main.rs new file mode 100644 index 0000000..a36b7d8 --- /dev/null +++ b/examples/basic_operations/main.rs @@ -0,0 +1,90 @@ +/// This example demonstrates some basic Couch operations: connecting, listing databases and +/// inserting some documents in bulk. +/// +/// The easiest way to get this example to work, is to connect it to a running CouchDB Docker +/// container: +/// +/// ``` +/// docker run --rm -p5984:5984 couchdb:2.3.1 +/// ``` +/// +/// Depending on the Docker framework you are using it may listen to "localhost" or to some other +/// automatically assigned IP address. Minikube for example generates a unique IP on start-up. You +/// can obtain it with: `minikube ip` +extern crate sofa; + +use serde_json::{json, Value}; +use sofa::types::find::FindQuery; +use std::error::Error; + +/// Update DB_HOST to point to your running Couch instance +const DB_HOST: &str = "http://admin:password@localhost:5984"; +const TEST_DB: &str = "test_db"; + +/// test_docs generates a bunch of documents that can be used in the _bulk_docs operation. +fn test_docs(amount: i32) -> Vec { + let mut result: Vec = vec![]; + + for _i in 0..amount { + result.push(json!({"name": "Marcel"})) + } + + result +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("Connecting..."); + + // Prepare the Sofa client + let client = sofa::Client::new(DB_HOST).unwrap(); + + // This command gets a reference to an existing database, or it creates a new one when it does + // not yet exist. + let db = client.db(TEST_DB).await.unwrap(); + // List the existing databases. The db_initialized is superfluous, since we just created it in + // the previous step. It is for educational purposes only. + let dbs = client.list_dbs().await?; + let mut db_initialized: bool = false; + println!("Existing databases:"); + for db in dbs { + println!("Couch DB {}", db); + if db == TEST_DB { + db_initialized = true; + } + } + + if !db_initialized { + println!("{} not found", TEST_DB); + return Ok(()); + } + + println!("--- Creating ---"); + + // let's add some docs + match db.bulk_docs(test_docs(100)).await { + Ok(resp) => { + println!("Bulk docs completed"); + + for r in resp { + println!( + "Id: {}, OK?: {}", + r.id.unwrap_or_else(|| "--".to_string()), + r.ok.unwrap_or(false) + ) + } + } + Err(err) => println!("Oops: {:?}", err), + } + + println!("--- Finding ---"); + + let find_all = FindQuery::find_all(); + let docs = db.find(&find_all).await?; + if let Some(row) = docs.rows.iter().next() { + println!("First document: {}", row.doc.get_data().to_string()) + } + + println!("All operations are done"); + Ok(()) +} diff --git a/examples/typed_documents/main.rs b/examples/typed_documents/main.rs new file mode 100644 index 0000000..3585178 --- /dev/null +++ b/examples/typed_documents/main.rs @@ -0,0 +1,70 @@ +extern crate sofa; + +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use sofa::types::document::DocumentId; + +/// Update DB_HOST to point to your running Couch instance +const DB_HOST: &str = "http://admin:password@localhost:5984"; +const TEST_DB: &str = "test_db"; + +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] +pub struct TestDoc { + /// _ids are are the only unique enforced value within CouchDB so you might as well make use of this. + /// CouchDB stores its documents in a B+ tree. Each additional or updated document is stored as + /// a leaf node, and may require re-writing intermediary and parent nodes. You may be able to take + /// advantage of sequencing your own ids more effectively than the automatically generated ids if + /// you can arrange them to be sequential yourself. (https://docs.couchdb.org/en/stable/best-practices/documents.html) + pub _id: DocumentId, + + /// Document Revision, provided by CouchDB, helps negotiating conflicts + #[serde(skip_serializing)] + pub _rev: String, + + pub first_name: String, + pub last_name: String, +} + +#[tokio::main] +async fn main() { + println!("Connecting..."); + + // Prepare the Sofa client + let client = sofa::Client::new(DB_HOST).unwrap(); + + // This command gets a reference to an existing database, or it creates a new one when it does + // not yet exist. + let db = client.db(TEST_DB).await.unwrap(); + + let td = TestDoc { + _id: "1234".to_string(), + _rev: "".to_string(), + first_name: "John".to_string(), + last_name: "Doe".to_string(), + }; + + // check if the document already exists + match db.get("1234".to_string()).await { + Ok(existing) => { + println!("Document has been previously created with Rev: {}", existing._rev); + let e: TestDoc = serde_json::from_value(existing.get_data()).unwrap(); + println!("Name: {} {}", e.first_name, e.last_name); + } + Err(e) => { + match e.status { + StatusCode::NOT_FOUND => { + // create the document + match db.create(serde_json::to_value(td).unwrap()).await { + Ok(r) => println!("Document was created with ID: {} and Rev: {}", r._id, r._rev), + Err(err) => println!("Oops: {:?}", err), + } + } + _ => { + println!("Unexpected error: {:?}", e); + } + } + } + } + + println!("All operations are done") +} diff --git a/src/client.rs b/src/client.rs index 7c21680..cc50da2 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,13 +1,23 @@ use std::collections::HashMap; use std::time::Duration; -use failure::Error; -use serde_json::from_reader; +use reqwest::RequestBuilder; +use reqwest::{self, Url, Method, StatusCode}; +use reqwest::header::{HeaderMap, HeaderValue, USER_AGENT, CONTENT_TYPE, REFERER}; +use crate::database::Database; +use crate::error::CouchError; +use crate::types::system::{CouchResponse, CouchStatus}; -use reqwest::{self, Url, Method, RequestBuilder, StatusCode}; +fn construct_json_headers(uri: Option<&str>) -> HeaderMap { + let mut headers = HeaderMap::new(); + headers.insert(USER_AGENT, HeaderValue::from_static("reqwest")); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); -use ::database::*; -use ::types::*; -use ::error::SofaError; + if let Some(u) = uri { + headers.insert(REFERER, HeaderValue::from_str(u).unwrap()); + } + + headers +} /// Client handles the URI manipulation logic and the HTTP calls to the CouchDB REST API. /// It is also responsible for the creation/access/destruction of databases. @@ -16,32 +26,40 @@ pub struct Client { _client: reqwest::Client, dbs: Vec<&'static str>, _gzip: bool, - _timeout: u8, + _timeout: u64, pub uri: String, - pub db_prefix: String + pub db_prefix: String, } impl Client { - pub fn new(uri: String) -> Result { + /// new creates a new Couch client with a default timeout of 5 seconds. + /// The URI has to be in this format: http://hostname:5984, for example: http://192.168.64.5:5984 + pub fn new(uri: &str) -> Result { + Client::new_with_timeout(uri, 10) + } + + /// new_with_timeout creates a new Couch client. The URI has to be in this format: http://hostname:5984, + /// timeout is in seconds. + pub fn new_with_timeout(uri: &str, timeout: u64) -> Result { let client = reqwest::Client::builder() .gzip(true) - .timeout(Duration::new(4, 0)) + .timeout(Duration::new(timeout, 0)) .build()?; Ok(Client { _client: client, - uri: uri, + uri: uri.to_string(), _gzip: true, - _timeout: 4, + _timeout: timeout, dbs: Vec::new(), - db_prefix: String::new() + db_prefix: String::new(), }) } - fn create_client(&self) -> Result { + fn create_client(&self) -> Result { let client = reqwest::Client::builder() .gzip(self._gzip) - .timeout(Duration::new(self._timeout as u64, 0)) + .timeout(Duration::new(self._timeout, 0)) .build()?; Ok(client) @@ -61,32 +79,32 @@ impl Client { self } - pub fn gzip(&mut self, enabled: bool) -> Result<&Self, Error> { + pub fn gzip(&mut self, enabled: bool) -> Result<&Self, CouchError> { self._gzip = enabled; self._client = self.create_client()?; Ok(self) } - pub fn timeout(&mut self, to: u8) -> Result<&Self, Error> { + pub fn timeout(&mut self, to: u64) -> Result<&Self, CouchError> { self._timeout = to; self._client = self.create_client()?; Ok(self) } - pub fn list_dbs(&self) -> Result, Error> { - let mut response = self.get(String::from("/_all_dbs"), None)?.send()?; - let data = response.json::>()?; + pub async fn list_dbs(&self) -> Result, CouchError> { + let response = self.get(String::from("/_all_dbs"), None)?.send().await?; + let data = response.json().await?; Ok(data) } - fn build_dbname(&self, dbname: &'static str) -> String { + fn build_dbname(&self, dbname: &str) -> String { self.db_prefix.clone() + dbname } - pub fn db(&self, dbname: &'static str) -> Result { + pub async fn db(&self, dbname: &str) -> Result { let name = self.build_dbname(dbname); let db = Database::new(name.clone(), self.clone()); @@ -94,16 +112,16 @@ impl Client { let path = self.create_path(name, None)?; let head_response = self._client.head(&path) - .header(reqwest::header::ContentType::json()) - .send()?; + .headers(construct_json_headers(None)) + .send().await?; match head_response.status() { - StatusCode::Ok => Ok(db), - _ => self.make_db(dbname), + StatusCode::OK => Ok(db), + _ => self.make_db(dbname).await, } } - pub fn make_db(&self, dbname: &'static str) -> Result { + pub async fn make_db(&self, dbname: &str) -> Result { let name = self.build_dbname(dbname); let db = Database::new(name.clone(), self.clone()); @@ -111,37 +129,38 @@ impl Client { let path = self.create_path(name, None)?; let put_response = self._client.put(&path) - .header(reqwest::header::ContentType::json()) - .send()?; + .headers(construct_json_headers(None)) + .send().await?; - let s: CouchResponse = from_reader(put_response)?; + let status = put_response.status(); + let s: CouchResponse = put_response.json().await?; match s.ok { Some(true) => Ok(db), - Some(false) | _ => { - let err = s.error.unwrap_or(s!("unspecified error")); - Err(SofaError(err).into()) + _ => { + let err = s.error.unwrap_or_else(|| s!("unspecified error")); + Err(CouchError::new(err, status)) }, } } - pub fn destroy_db(&self, dbname: &'static str) -> Result { + pub async fn destroy_db(&self, dbname: &str) -> Result { let path = self.create_path(self.build_dbname(dbname), None)?; let response = self._client.delete(&path) - .header(reqwest::header::ContentType::json()) - .send()?; + .headers(construct_json_headers(None)) + .send().await?; - let s: CouchResponse = from_reader(response)?; + let s: CouchResponse = response.json().await?; Ok(s.ok.unwrap_or(false)) } - pub fn check_status(&self) -> Result { + pub async fn check_status(&self) -> Result { let response = self._client.get(&self.uri) - .header(reqwest::header::ContentType::json()) - .send()?; + .headers(construct_json_headers(None)) + .send().await?; - let status = from_reader(response)?; + let status = response.json().await?; Ok(status) } @@ -149,7 +168,7 @@ impl Client { fn create_path(&self, path: String, args: Option> - ) -> Result { + ) -> Result { let mut uri = Url::parse(&self.uri)?.join(&path)?; if let Some(ref map) = args { @@ -166,36 +185,35 @@ impl Client { method: Method, path: String, opts: Option> - ) -> Result { + ) -> Result { let uri = self.create_path(path, opts)?; - let mut req = self._client.request(method, &uri); - req.header(reqwest::header::Referer::new(uri.clone())); - req.header(reqwest::header::ContentType::json()); + let req = self._client.request(method, &uri). + headers(construct_json_headers(Some(&uri))); + + // req.header(reqwest::header::Referer::new(uri.clone())); Ok(req) } - pub fn get(&self, path: String, args: Option>) -> Result { - Ok(self.req(Method::Get, path, args)?) + pub fn get(&self, path: String, args: Option>) -> Result { + Ok(self.req(Method::GET, path, args)?) } - pub fn post(&self, path: String, body: String) -> Result { - let mut req = self.req(Method::Post, path, None)?; - req.body(body); + pub fn post(&self, path: String, body: String) -> Result { + let req = self.req(Method::POST, path, None)?.body(body); Ok(req) } - pub fn put(&self, path: String, body: String) -> Result { - let mut req = self.req(Method::Put, path, None)?; - req.body(body); + pub fn put(&self, path: String, body: String) -> Result { + let req = self.req(Method::PUT, path, None)?.body(body); Ok(req) } - pub fn head(&self, path: String, args: Option>) -> Result { - Ok(self.req(Method::Head, path, args)?) + pub fn head(&self, path: String, args: Option>) -> Result { + Ok(self.req(Method::HEAD, path, args)?) } - pub fn delete(&self, path: String, args: Option>) -> Result { - Ok(self.req(Method::Delete, path, args)?) + pub fn delete(&self, path: String, args: Option>) -> Result { + Ok(self.req(Method::DELETE, path, args)?) } } diff --git a/src/database.rs b/src/database.rs index bb482b0..b04c8f5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,15 +1,16 @@ +use crate::client::Client; +use crate::document::{Document, DocumentCollection}; +use crate::error::CouchError; +use crate::types::design::DesignCreated; +use crate::types::document::{DocumentCreatedResult, DocumentId}; +use crate::types::find::{FindQuery, FindResult}; +use crate::types::index::{DatabaseIndexList, IndexFields}; +use crate::types::query::QueryParams; +use crate::types::view::ViewCollection; +use reqwest::{RequestBuilder, StatusCode}; +use serde_json::{json, to_string, Value}; use std::collections::HashMap; - -use reqwest::StatusCode; - -use failure::Error; -use serde_json; -use serde_json::{from_reader, to_string, Value}; - -use client::*; -use document::*; -use error::SofaError; -use types::*; +use tokio::sync::mpsc::Sender; /// Database holds the logic of making operations on a CouchDB Database /// (sometimes called Collection in other NoSQL flavors such as MongoDB). @@ -21,10 +22,12 @@ pub struct Database { impl Database { pub fn new(name: String, client: Client) -> Database { - Database { - _client: client, - name: name, - } + Database { _client: client, name } + } + + // convenience function to retrieve the name of the database + pub fn name(&self) -> &str { + &self.name } fn create_document_path(&self, id: DocumentId) -> String { @@ -34,7 +37,6 @@ impl Database { result } - #[allow(dead_code)] fn create_design_path(&self, id: DocumentId) -> String { let mut result: String = self.name.clone(); result.push_str("/_design/"); @@ -42,94 +44,138 @@ impl Database { result } - fn create_compact_path(&self, design_name: &'static str) -> String { + fn create_query_view_path(&self, design_id: DocumentId, view_id: DocumentId) -> String { + let mut result: String = self.name.clone(); + result.push_str("/_design/"); + result.push_str(&design_id); + result.push_str("/_view/"); + result.push_str(&view_id); + result + } + + fn create_execute_update_path( + &self, + design_id: DocumentId, + update_id: DocumentId, + document_id: DocumentId, + ) -> String { + let mut result: String = self.name.clone(); + result.push_str("/_design/"); + result.push_str(&design_id); + result.push_str("/_update/"); + result.push_str(&update_id); + result.push_str("/"); + result.push_str(&document_id); + result + } + + fn create_compact_path(&self, design_name: &str) -> String { let mut result: String = self.name.clone(); result.push_str("/_compact/"); result.push_str(design_name); result } + async fn is_accepted(&self, request: Result) -> bool { + if let Ok(req) = request { + if let Ok(res) = req.send().await { + return res.status() == StatusCode::ACCEPTED; + } + } + + false + } + + async fn is_ok(&self, request: Result) -> bool { + if let Ok(req) = request { + if let Ok(res) = req.send().await { + return match res.status() { + StatusCode::OK | StatusCode::NOT_MODIFIED => true, + _ => false, + }; + } + } + + false + } + /// Launches the compact process - pub fn compact(&self) -> bool { + pub async fn compact(&self) -> bool { let mut path: String = self.name.clone(); path.push_str("/_compact"); let request = self._client.post(path, "".into()); - - request - .and_then(|mut req| { - Ok(req.send() - .and_then(|res| Ok(res.status() == StatusCode::Accepted)) - .unwrap_or(false)) - }) - .unwrap_or(false) + self.is_accepted(request).await } /// Starts the compaction of all views - pub fn compact_views(&self) -> bool { + pub async fn compact_views(&self) -> bool { let mut path: String = self.name.clone(); path.push_str("/_view_cleanup"); let request = self._client.post(path, "".into()); - - request - .and_then(|mut req| { - Ok(req.send() - .and_then(|res| Ok(res.status() == StatusCode::Accepted)) - .unwrap_or(false)) - }) - .unwrap_or(false) + self.is_accepted(request).await } /// Starts the compaction of a given index - pub fn compact_index(&self, index: &'static str) -> bool { + pub async fn compact_index(&self, index: &str) -> bool { let request = self._client.post(self.create_compact_path(index), "".into()); - - request - .and_then(|mut req| { - Ok(req.send() - .and_then(|res| Ok(res.status() == StatusCode::Accepted)) - .unwrap_or(false)) - }) - .unwrap_or(false) + self.is_accepted(request).await } /// Checks if a document ID exists - pub fn exists(&self, id: DocumentId) -> bool { + pub async fn exists(&self, id: DocumentId) -> bool { let request = self._client.head(self.create_document_path(id), None); - - request - .and_then(|mut req| { - Ok(req.send() - .and_then(|res| { - Ok(match res.status() { - StatusCode::Ok | StatusCode::NotModified => true, - _ => false, - }) - }) - .unwrap_or(false)) - }) - .unwrap_or(false) + self.is_ok(request).await } /// Gets one document - pub fn get(&self, id: DocumentId) -> Result { - let response = self._client.get(self.create_document_path(id), None)?.send()?; - - Ok(Document::new(from_reader(response)?)) + pub async fn get(&self, id: DocumentId) -> Result { + let response = self + ._client + .get(self.create_document_path(id), None)? + .send() + .await? + .error_for_status()?; + Ok(Document::new(response.json().await?)) } /// Gets documents in bulk with provided IDs list - pub fn get_bulk(&self, ids: Vec) -> Result { - self.get_bulk_params(ids, None) + pub async fn get_bulk(&self, ids: Vec) -> Result { + self.get_bulk_params(ids, None).await + } + + /// Each time a document is stored or updated in CouchDB, the internal B-tree is updated. + /// Bulk insertion provides efficiency gains in both storage space, and time, + /// by consolidating many of the updates to intermediate B-tree nodes. + /// + /// See the documentation on how to use bulk_docs here: https://docs.couchdb.org/en/stable/api/database/bulk-api.html#db-bulk-docs + /// + /// raw_docs is a vector of documents with or without an ID + /// + /// This endpoint can also be used to delete a set of documents by including "_deleted": true, in the document to be deleted. + /// When deleting or updating, both _id and _rev are mandatory. + pub async fn bulk_docs(&self, raw_docs: Vec) -> Result, CouchError> { + let mut body = HashMap::new(); + body.insert(s!("docs"), raw_docs); + + let response = self + ._client + .post(self.create_document_path("_bulk_docs".into()), to_string(&body)?)? + .send() + .await?; + + let data: Vec = response.json().await?; + + Ok(data) } /// Gets documents in bulk with provided IDs list, with added params. Params description can be found here: Parameters description can be found here: http://docs.couchdb.org/en/latest/api/ddoc/views.html#api-ddoc-view - pub fn get_bulk_params( + pub async fn get_bulk_params( &self, ids: Vec, params: Option>, - ) -> Result { + ) -> Result { let mut options; if let Some(opts) = params { options = opts; @@ -142,43 +188,122 @@ impl Database { let mut body = HashMap::new(); body.insert(s!("keys"), ids); - let response = self._client - .get(self.create_document_path("_all_docs".into()), Some(options))? - .body(to_string(&body)?) - .send()?; + let response = self + ._client + .post(self.create_document_path("_all_docs".into()), to_string(&body)?)? + .query(&options) + .send() + .await? + .error_for_status()?; - Ok(DocumentCollection::new(from_reader(response)?)) + Ok(DocumentCollection::new(response.json().await?)) } /// Gets all the documents in database - pub fn get_all(&self) -> Result { - self.get_all_params(None) + pub async fn get_all(&self) -> Result { + self.get_all_params(None).await + } + + /// Gets all documents in the database, using bookmarks to iterate through all the documents. + /// Results are returned through an mpcs channel for async processing. Use this for very large + /// databases only. Batch size can be requested. A value of 0, means the default batch_size of + /// 1000 is used. max_results of 0 means all documents will be returned. A given max_results is + /// always rounded *up* to the nearest multiplication of batch_size. + /// This operation is identical to find_batched(FindQuery::find_all(), tx, batch_size, max_results) + pub async fn get_all_batched( + &self, + tx: Sender, + batch_size: u64, + max_results: u64, + ) -> Result { + let query = FindQuery::find_all(); + self.find_batched(query, tx, batch_size, max_results).await + } + + /// Finds documents in the database, using bookmarks to iterate through all the documents. + /// Results are returned through an mpcs channel for async processing. Use this for very large + /// databases only. Batch size can be requested. A value of 0, means the default batch_size of + /// 1000 is used. max_results of 0 means all documents will be returned. A given max_results is + /// always rounded *up* to the nearest multiplication of batch_size. + pub async fn find_batched( + &self, + mut query: FindQuery, + mut tx: Sender, + batch_size: u64, + max_results: u64, + ) -> Result { + let mut bookmark = Option::None; + let limit = if batch_size > 0 { batch_size } else { 1000 }; + + let mut results: u64 = 0; + query.limit = Option::Some(limit); + + let maybe_err = loop { + let mut segment_query = query.clone(); + segment_query.bookmark = bookmark.clone(); + let all_docs = match self.find(&query).await { + Ok(docs) => docs, + Err(err) => break Some(err), + }; + + if all_docs.total_rows == 0 { + // no more rows + break None; + } + + if all_docs.bookmark.is_some() && all_docs.bookmark != bookmark { + bookmark.replace(all_docs.bookmark.clone().unwrap_or_default()); + } else { + // no bookmark, break the query loop + break None; + } + + results += all_docs.total_rows as u64; + + tx.send(all_docs).await.unwrap(); + + if max_results > 0 && results >= max_results { + break None; + } + }; + + if let Some(err) = maybe_err { + Err(err) + } else { + Ok(results) + } } /// Gets all the documents in database, with applied parameters. Parameters description can be found here: http://docs.couchdb.org/en/latest/api/ddoc/views.html#api-ddoc-view - pub fn get_all_params(&self, params: Option>) -> Result { + pub async fn get_all_params(&self, params: Option) -> Result { let mut options; if let Some(opts) = params { options = opts; } else { - options = HashMap::new(); + options = QueryParams::default(); } - options.insert(s!("include_docs"), s!("true")); + options.include_docs = Some(true); - let response = self._client - .get(self.create_document_path("_all_docs".into()), Some(options))? - .send()?; + // we use POST here, because this allows for a larger set of keys to be provided, compared + // to a GET call. It provides the same functionality + let response = self + ._client + .post(self.create_document_path("_all_docs".into()), js!(&options))? + .send() + .await? + .error_for_status()?; - Ok(DocumentCollection::new(from_reader(response)?)) + Ok(DocumentCollection::new(response.json().await?)) } - /// Finds a document in the database through a Mango query. Parameters here http://docs.couchdb.org/en/latest/api/database/find.html - pub fn find(&self, params: Value) -> Result { + /// Finds a document in the database through a Mango query. + pub async fn find(&self, query: &FindQuery) -> Result { let path = self.create_document_path("_find".into()); - let response = self._client.post(path, js!(¶ms))?.send()?; + let response = self._client.post(path, js!(query))?.send().await?; + let status = response.status(); + let data: FindResult = response.json().await.unwrap(); - let data: FindResult = from_reader(response)?; if let Some(doc_val) = data.docs { let documents: Vec = doc_val .into_iter() @@ -187,27 +312,38 @@ impl Database { let id: String = json_extr!(d["_id"]); !id.starts_with('_') }) - .map(|v| Document::new(v.clone())) + .map(Document::new) .collect(); - Ok(DocumentCollection::new_from_documents(documents)) + let mut bookmark = Option::None; + let returned_bookmark = data.bookmark.unwrap_or_default(); + + if returned_bookmark != "nil" && returned_bookmark != "" { + // a valid bookmark has been returned + bookmark.replace(returned_bookmark); + } + + Ok(DocumentCollection::new_from_documents(documents, bookmark)) } else if let Some(err) = data.error { - Err(SofaError(err).into()) + Err(CouchError::new(err, status)) } else { Ok(DocumentCollection::default()) } } /// Updates a document - pub fn save(&self, doc: Document) -> Result { + pub async fn save(&self, doc: Document) -> Result { let id = doc._id.to_owned(); let raw = doc.get_data(); - let response = self._client + let response = self + ._client .put(self.create_document_path(id), to_string(&raw)?)? - .send()?; + .send() + .await?; - let data: DocumentCreatedResult = from_reader(response)?; + let status = response.status(); + let data: DocumentCreatedResult = response.json().await?; match data.ok { Some(true) => { @@ -216,29 +352,34 @@ impl Database { Ok(Document::new(val)) } - Some(false) | _ => { - let err = data.error.unwrap_or(s!("unspecified error")); - return Err(SofaError(err).into()); + _ => { + let err = data.error.unwrap_or_else(|| s!("unspecified error")); + Err(CouchError::new(err, status)) } } } /// Creates a document from a raw JSON document Value. - pub fn create(&self, raw_doc: Value) -> Result { - let response = self._client.post(self.name.clone(), to_string(&raw_doc)?)?.send()?; + pub async fn create(&self, raw_doc: Value) -> Result { + let response = self + ._client + .post(self.name.clone(), to_string(&raw_doc)?)? + .send() + .await?; - let data: DocumentCreatedResult = from_reader(response)?; + let status = response.status(); + let data: DocumentCreatedResult = response.json().await?; match data.ok { Some(true) => { let data_id = match data.id { Some(id) => id, - _ => return Err(SofaError(s!("invalid id")).into()), + _ => return Err(CouchError::new(s!("invalid id"), status)), }; let data_rev = match data.rev { Some(rev) => rev, - _ => return Err(SofaError(s!("invalid rev")).into()), + _ => return Err(CouchError::new(s!("invalid rev"), status)), }; let mut val = raw_doc.clone(); @@ -247,15 +388,82 @@ impl Database { Ok(Document::new(val)) } - Some(false) | _ => { - let err = data.error.unwrap_or(s!("unspecified error")); - return Err(SofaError(err).into()); + _ => { + let err = data.error.unwrap_or_else(|| s!("unspecified error")); + Err(CouchError::new(err, status)) + } + } + } + + /// Creates a view document. + pub async fn create_view(&self, design_name: String, doc: Value) -> Result { + let response = self + ._client + .put(self.create_design_path(design_name), to_string(&doc)?)? + .send() + .await?; + + let response_status = response.status(); + let result: DesignCreated = response.json().await?; + + if response_status.is_success() { + Ok(result) + } else { + match result.error { + Some(e) => Err(CouchError { + status: response_status, + message: e, + }), + None => Err(CouchError { + status: response_status, + message: s!("unspecified error"), + }), } } } + /// Executes a query against a view. + pub async fn query( + &self, + design_name: String, + view_name: String, + options: Option, + ) -> Result { + let response = self + ._client + .post(self.create_query_view_path(design_name, view_name), js!(&options))? + .send() + .await? + .error_for_status()?; + + Ok(response.json().await?) + } + + /// Convenience function to execute an update function whose name matches design name. + pub async fn execute_update( + &self, + design_id: String, + name: String, + document_id: String, + body: Option, + ) -> Result { + let body = match body { + Some(v) => to_string(&v)?, + None => "".to_string(), + }; + + let response = self + ._client + .put(self.create_execute_update_path(design_id, name, document_id), body)? + .send() + .await? + .error_for_status()?; + + Ok(response.text().await?) + } + /// Removes a document from the database. Returns success in a `bool` - pub fn remove(&self, doc: Document) -> bool { + pub async fn remove(&self, doc: Document) -> bool { let request = self._client.delete( self.create_document_path(doc._id.clone()), Some({ @@ -265,57 +473,51 @@ impl Database { }), ); - request - .and_then(|mut req| { - Ok(req.send() - .and_then(|res| { - Ok(match res.status() { - StatusCode::Ok | StatusCode::Accepted => true, - _ => false, - }) - }) - .unwrap_or(false)) - }) - .unwrap_or(false) + self.is_ok(request).await } /// Inserts an index in a naive way, if it already exists, will throw an /// `Err` - pub fn insert_index(&self, name: String, spec: IndexFields) -> Result { - let response = self._client + pub async fn insert_index(&self, name: String, spec: IndexFields) -> Result { + let response = self + ._client .post( self.create_document_path("_index".into()), js!(json!({ - "name": name, - "index": spec - })), + "name": name, + "index": spec + })), )? - .send()?; + .send() + .await?; - let data: IndexCreated = from_reader(response)?; + let status = response.status(); + let data: DesignCreated = response.json().await?; if data.error.is_some() { - let err = data.error.unwrap_or(s!("unspecified error")); - Err(SofaError(err).into()) + let err = data.error.unwrap_or_else(|| s!("unspecified error")); + Err(CouchError::new(err, status)) } else { Ok(data) } } /// Reads the database's indexes and returns them - pub fn read_indexes(&self) -> Result { - let response = self._client + pub async fn read_indexes(&self) -> Result { + let response = self + ._client .get(self.create_document_path("_index".into()), None)? - .send()?; + .send() + .await?; - Ok(from_reader(response)?) + Ok(response.json().await?) } /// Method to ensure an index is created on the database with the following /// spec. Returns `true` when we created a new one, or `false` when the /// index was already existing. - pub fn ensure_index(&self, name: String, spec: IndexFields) -> Result { - let db_indexes = self.read_indexes()?; + pub async fn ensure_index(&self, name: String, spec: IndexFields) -> Result { + let db_indexes = self.read_indexes().await?; // We look for our index for i in db_indexes.indexes.into_iter() { @@ -326,9 +528,14 @@ impl Database { } // Let's create it then - let _ = self.insert_index(name, spec)?; - - // Created and alright - Ok(true) + let result: DesignCreated = self.insert_index(name, spec).await?; + match result.error { + Some(e) => Err(CouchError { + status: reqwest::StatusCode::INTERNAL_SERVER_ERROR, + message: e, + }), + // Created and alright + None => Ok(true), + } } } diff --git a/src/document.rs b/src/document.rs index ca33f78..0e381d5 100644 --- a/src/document.rs +++ b/src/document.rs @@ -1,8 +1,8 @@ -use database::*; -use serde_json; use serde_json::Value; use std::ops::{Index, IndexMut}; -use types::*; +use serde::{Serialize, Deserialize}; +use crate::types::document::{DocumentId}; +use crate::database::Database; /// Document abstracts the handling of JSON values and provides direct access /// and casting to the fields of your documents You can get access to the @@ -25,7 +25,7 @@ impl Document { Document { _id: json_extr!(doc["_id"]), _rev: json_extr!(doc["_rev"]), - doc: doc, + doc, } } @@ -68,19 +68,19 @@ impl Document { /// Recursively populates field (must be an array of IDs from another /// database) with provided database documents - pub fn populate(&mut self, field: &String, db: Database) -> &Self { - let ref val = self[field].clone(); + pub async fn populate(&mut self, field: &str, db: Database) -> &Self { + let val = &self[field].clone(); if *val == Value::Null { return self; } let ids = val.as_array() .unwrap_or(&Vec::new()) - .into_iter() + .iter() .map(|v| s!(v.as_str().unwrap_or(""))) .collect(); - let data = db.get_bulk(ids).and_then(|docs| Ok(docs.get_data())); + let data = db.get_bulk(ids).await.map(|docs| docs.get_data()); match data { Ok(data) => { @@ -139,7 +139,7 @@ pub struct DocumentCollectionItem { impl DocumentCollectionItem { pub fn new(doc: Document) -> DocumentCollectionItem { let id = doc._id.clone(); - DocumentCollectionItem { doc: doc, id: id } + DocumentCollectionItem { doc, id } } } @@ -148,9 +148,10 @@ impl DocumentCollectionItem { /// implementation of `Index` and `IndexMut` #[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct DocumentCollection { - pub offset: u32, + pub offset: Option, pub rows: Vec, pub total_rows: u32, + pub bookmark: Option, } impl DocumentCollection { @@ -158,9 +159,15 @@ impl DocumentCollection { let rows: Vec = json_extr!(doc["rows"]); let items: Vec = rows.into_iter() .filter(|d| { - // Remove _design documents - let id: String = json_extr!(d["doc"]["_id"]); - !id.starts_with('_') + let maybe_err: Option = json_extr!(d["error"]); + if maybe_err.is_some() { + // remove errors + false + } else { + // Remove _design documents + let id: String = json_extr!(d["doc"]["_id"]); + !id.starts_with('_') + } }) .map(|d| { let document: Value = json_extr!(d["doc"]); @@ -172,16 +179,18 @@ impl DocumentCollection { offset: json_extr!(doc["offset"]), total_rows: items.len() as u32, rows: items, + bookmark: Option::None, } } - pub fn new_from_documents(docs: Vec) -> DocumentCollection { + pub fn new_from_documents(docs: Vec, bookmark: Option) -> DocumentCollection { let len = docs.len() as u32; DocumentCollection { - offset: 0, + offset: Some(0), total_rows: len, - rows: docs.into_iter().map(|d| DocumentCollectionItem::new(d)).collect(), + rows: docs.into_iter().map(DocumentCollectionItem::new).collect(), + bookmark, } } diff --git a/src/error.rs b/src/error.rs index bc57522..9a72c5a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,3 +1,62 @@ -#[derive(Fail, Debug)] -#[fail(display = "Custom error: {}", _0)] -pub struct SofaError(pub String); +use std::error; +use std::fmt; + +// Define our error types. These may be customized for our error handling cases. +// Now we will be able to write our own errors, defer to an underlying error +// implementation, or do something in between. +#[derive(Debug, Clone)] +pub struct CouchError { + pub status: reqwest::StatusCode, + pub message: String, +} + +impl CouchError { + pub fn new(message: String, status: reqwest::StatusCode) -> CouchError { + CouchError { + message, + status, + } + } +} + +impl fmt::Display for CouchError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}: {}", self.status, self.message) + } +} + +// This is important for other errors to wrap this one. +impl error::Error for CouchError { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + // Generic error, underlying cause isn't tracked. + None + } +} + +impl std::convert::From for CouchError { + fn from(err: reqwest::Error) -> Self { + CouchError { + status: err.status().unwrap_or(reqwest::StatusCode::NOT_IMPLEMENTED), + message: err.to_string(), + } + } +} + +impl std::convert::From for CouchError { + fn from(err: serde_json::Error) -> Self { + CouchError { + status: reqwest::StatusCode::NOT_IMPLEMENTED, + message: err.to_string(), + } + } +} + +impl std::convert::From for CouchError { + fn from(err: url::ParseError) -> Self { + CouchError { + status: reqwest::StatusCode::NOT_IMPLEMENTED, + message: err.to_string(), + } + } +} + diff --git a/src/lib.rs b/src/lib.rs index ad38104..4c89f28 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,11 @@ //! # Sofa - CouchDB for Rust //! //! [![Crates.io](https://img.shields.io/crates/v/sofa.svg)](https://crates.io/crates/sofa) +//! [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_shield) +//! //! [![docs.rs](https://docs.rs/sofa/badge.svg)](https://docs.rs/sofa) //! -//! ![sofa-logo](https://raw.githubusercontent.com/YellowInnovation/sofa/master/docs/logo-sofa.png "Logo Sofa") +//! ![sofa-logo](https://raw.githubusercontent.com/mibes/sofa/master/docs/logo-sofa.png "Logo Sofa") //! //! ## Documentation //! @@ -11,6 +13,13 @@ //! //! ## Installation //! +//! If you want to use this particular fork, include this dependency in the Cargo.toml file: +//! ```toml +//! [dependencies.sofa] +//! git = "https://github.com/mibes/sofa.git" +//! ``` +//! +//! If you want to continue to use the "old" 0.6 version use this dependency instead: //! ```toml //! [dependencies] //! sofa = "0.6" @@ -20,32 +29,35 @@ //! //! This crate is an interface to CouchDB HTTP REST API. Works with stable Rust. //! -//! Does not support `#![no_std]` -//! -//! After trying most crates for CouchDB in Rust (`chill`, `couchdb` in particular), none of them fit our needs hence -//! the need to create our own. +//! After trying most crates for CouchDB in Rust (`chill`, `couchdb` in particular), none of them fit our needs hence the need to create our own. //! -//! No async I/O (yet), uses a mix of Reqwest and Serde under the hood, with a -//! few nice abstractions out there. +//! Uses async I/O, with a mix of Reqwest and Serde under the hood, and a few nice abstractions out there. //! //! **NOT 1.0 YET, so expect changes** //! -//! **Supports CouchDB 2.0 and up.** +//! **Supports CouchDB 2.3.0 and up.** //! //! Be sure to check [CouchDB's Documentation](http://docs.couchdb.org/en/latest/index.html) in detail to see what's possible. //! +//! The 0.7 version is based on the 0.6 release from https://github.com/YellowInnovation/sofa. +//! It has been updated to the Rust 2018 edition standards, uses async I/O, and compiles against the latest serde and +//! reqwest libraries. +//! +//! ## Example code +//! +//! You can launch the included example with: +//! ```shell script +//! cargo run --example basic_operations +//! ``` +//! //! ## Running tests //! -//! Make sure that you have an instance of CouchDB 2.0+ running, either via the -//! supplied `docker-compose.yml` file or by yourself. It must be listening on -//! the default port. +//! Make sure that you have an instance of CouchDB 2.0+ running, either via the supplied `docker-compose.yml` file or by yourself. It must be listening on the default port. //! //! And then //! `cargo test -- --test-threads=1` //! -//! Single-threading the tests is very important because we need to make sure -//! that the basic features are working before actually testing features on -//! dbs/documents. +//! Single-threading the tests is very important because we need to make sure that the basic features are working before actually testing features on dbs/documents. //! //! ## Why the name "Sofa" //! @@ -60,29 +72,17 @@ //! * MIT license ([LICENSE-MIT](LICENSE-MIT) or //! [https://opensource.org/licenses/MIT](https://opensource.org/licenses/MIT)) //! +//! +//! [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_large) +//! //! ## Yellow Innovation //! -//! Yellow Innovation is the innovation laboratory of the French postal -//! service: La Poste. +//! Yellow Innovation is the innovation laboratory of the French postal service: La Poste. //! -//! We create innovative user experiences and journeys through services with a -//! focus on IoT lately. +//! We create innovative user experiences and journeys through services with a focus on IoT lately. //! //! [Yellow Innovation's website and works](http://yellowinnovation.fr/en/) -#[macro_use] -extern crate failure; -extern crate reqwest; -extern crate serde; -#[macro_use] -extern crate serde_json; -#[macro_use] -extern crate serde_derive; - -#[cfg(test)] -#[macro_use] -extern crate pretty_assertions; - /// Macros that the crate exports to facilitate most of the /// doc-to-json-to-string-related tasks #[allow(unused_macros)] @@ -96,10 +96,11 @@ mod macros { }; } - /// Extracts a JSON Value to a defined Struct + /// Extracts a JSON Value to a defined Struct; Returns the default value when the field can not be found + /// or converted macro_rules! json_extr { ($e:expr) => { - serde_json::from_value($e.to_owned()).unwrap() + serde_json::from_value($e.to_owned()).unwrap_or_default() }; } @@ -141,45 +142,52 @@ mod macros { } } -mod_use!(client); -mod_use!(database); -mod_use!(document); -mod_use!(error); +mod client; +pub mod database; +pub mod document; +pub mod error; +pub mod model; pub mod types; -mod_use!(model); + +pub use client::Client; #[allow(unused_mut, unused_variables)] #[cfg(test)] mod sofa_tests { mod a_sys { - use *; + const DB_HOST: &str = "http://admin:password@localhost:5984"; + + use crate::client::Client; + use serde_json::json; - #[test] - fn a_should_check_couchdbs_status() { - let client = Client::new("http://localhost:5984".into()).unwrap(); - let status = client.check_status(); + #[tokio::test] + async fn a_should_check_couchdbs_status() { + let client = Client::new(DB_HOST).unwrap(); + let status = client.check_status().await; assert!(status.is_ok()); } - #[test] - fn b_should_create_sofa_test_db() { - let client = Client::new("http://localhost:5984".into()).unwrap(); - let dbw = client.db("b_should_create_sofa_test_db"); + #[tokio::test] + async fn b_should_create_sofa_test_db() { + let client = Client::new(DB_HOST).unwrap(); + let dbw = client.db("b_should_create_sofa_test_db").await; assert!(dbw.is_ok()); let _ = client.destroy_db("b_should_create_sofa_test_db"); } - #[test] - fn c_should_create_a_document() { - let client = Client::new("http://localhost:5984".into()).unwrap(); - let dbw = client.db("c_should_create_a_document"); + #[tokio::test] + async fn c_should_create_a_document() { + let client = Client::new(DB_HOST).unwrap(); + let dbw = client.db("c_should_create_a_document").await; assert!(dbw.is_ok()); let db = dbw.unwrap(); - let ndoc_result = db.create(json!({ - "thing": true - })); + let ndoc_result = db + .create(json!({ + "thing": true + })) + .await; assert!(ndoc_result.is_ok()); @@ -189,27 +197,37 @@ mod sofa_tests { let _ = client.destroy_db("c_should_create_a_document"); } - #[test] - fn d_should_destroy_the_db() { - let client = Client::new("http://localhost:5984".into()).unwrap(); - let _ = client.db("d_should_destroy_the_db"); + #[tokio::test] + async fn d_should_destroy_the_db() { + let client = Client::new(DB_HOST).unwrap(); + let _ = client.db("d_should_destroy_the_db").await; - assert!(client.destroy_db("d_should_destroy_the_db").unwrap()); + assert!(client.destroy_db("d_should_destroy_the_db").await.unwrap()); } } mod b_db { - use *; - - fn setup(dbname: &'static str) -> (Client, Database, Document) { - let client = Client::new("http://localhost:5984".into()).unwrap(); - let dbw = client.db(dbname); + use crate::client::Client; + use crate::database::Database; + use crate::document::Document; + use crate::types; + use crate::types::find::FindQuery; + use crate::types::query::QueryParams; + use serde_json::json; + + const DB_HOST: &str = "http://admin:password@localhost:5984"; + + async fn setup(dbname: &str) -> (Client, Database, Document) { + let client = Client::new(DB_HOST).unwrap(); + let dbw = client.db(dbname).await; assert!(dbw.is_ok()); let db = dbw.unwrap(); - let ndoc_result = db.create(json!({ - "thing": true - })); + let ndoc_result = db + .create(json!({ + "thing": true + })) + .await; assert!(ndoc_result.is_ok()); @@ -219,87 +237,84 @@ mod sofa_tests { (client, db, doc) } - fn teardown(client: Client, dbname: &'static str) { - assert!(client.destroy_db(dbname).unwrap()) + async fn teardown(client: Client, dbname: &str) { + assert!(client.destroy_db(dbname).await.unwrap()) } - #[test] - fn a_should_update_a_document() { - let (client, db, mut doc) = setup("a_should_update_a_document"); + #[tokio::test] + async fn a_should_update_a_document() { + let (client, db, mut doc) = setup("a_should_update_a_document").await; doc["thing"] = json!(false); - let save_result = db.save(doc); + let save_result = db.save(doc).await; assert!(save_result.is_ok()); let new_doc = save_result.unwrap(); assert_eq!(new_doc["thing"], json!(false)); - teardown(client, "a_should_update_a_document"); + teardown(client, "a_should_update_a_document").await; } - #[test] - fn b_should_remove_a_document() { - let (client, db, doc) = setup("b_should_remove_a_document"); - assert!(db.remove(doc)); + #[tokio::test] + async fn b_should_remove_a_document() { + let (client, db, doc) = setup("b_should_remove_a_document").await; + assert!(db.remove(doc).await); - teardown(client, "b_should_remove_a_document"); + teardown(client, "b_should_remove_a_document").await; } - #[test] - fn c_should_get_a_single_document() { - let (client, ..) = setup("c_should_get_a_single_document"); - assert!(true); - teardown(client, "c_should_get_a_single_document"); + #[tokio::test] + async fn c_should_get_a_single_document() { + let (client, ..) = setup("c_should_get_a_single_document").await; + teardown(client, "c_should_get_a_single_document").await; } - fn setup_create_indexes(dbname: &'static str) -> (Client, Database, Document) { - let (client, db, doc) = setup(dbname); + async fn setup_create_indexes(dbname: &str) -> (Client, Database, Document) { + let (client, db, doc) = setup(dbname).await; - let spec = types::IndexFields::new(vec![types::SortSpec::Simple(s!("thing"))]); + let spec = types::index::IndexFields::new(vec![types::find::SortSpec::Simple(s!("thing"))]); - let res = db.insert_index("thing-index".into(), spec); + let res = db.insert_index("thing-index".into(), spec).await; assert!(res.is_ok()); (client, db, doc) } - #[test] - fn d_should_create_index_in_db() { - let (client, db, _) = setup_create_indexes("d_should_create_index_in_db"); - assert!(true); - teardown(client, "d_should_create_index_in_db"); + #[tokio::test] + async fn d_should_create_index_in_db() { + let (client, db, _) = setup_create_indexes("d_should_create_index_in_db").await; + teardown(client, "d_should_create_index_in_db").await; } - #[test] - fn e_should_list_indexes_in_db() { - let (client, db, _) = setup_create_indexes("e_should_list_indexes_in_db"); + #[tokio::test] + async fn e_should_list_indexes_in_db() { + let (client, db, _) = setup_create_indexes("e_should_list_indexes_in_db").await; - let index_list = db.read_indexes().unwrap(); + let index_list = db.read_indexes().await.unwrap(); assert!(index_list.indexes.len() > 1); - let ref findex = index_list.indexes[1]; + let findex = &index_list.indexes[1]; assert_eq!(findex.name.as_str(), "thing-index"); - teardown(client, "e_should_list_indexes_in_db"); + teardown(client, "e_should_list_indexes_in_db").await; } - #[test] - fn f_should_ensure_index_in_db() { - let (client, db, _) = setup("f_should_ensure_index_in_db"); + #[tokio::test] + async fn f_should_ensure_index_in_db() { + let (client, db, _) = setup("f_should_ensure_index_in_db").await; - let spec = types::IndexFields::new(vec![types::SortSpec::Simple(s!("thing"))]); + let spec = types::index::IndexFields::new(vec![types::find::SortSpec::Simple(s!("thing"))]); - let res = db.ensure_index("thing-index".into(), spec); + let res = db.ensure_index("thing-index".into(), spec).await; assert!(res.is_ok()); - teardown(client, "f_should_ensure_index_in_db"); + teardown(client, "f_should_ensure_index_in_db").await; } - #[test] - fn g_should_find_documents_in_db() { - let (client, db, doc) = setup_create_indexes("g_should_find_documents_in_db"); - - let documents_res = db.find(json!({ + #[tokio::test] + async fn g_should_find_documents_in_db() { + let (client, db, doc) = setup_create_indexes("g_should_find_documents_in_db").await; + let query = FindQuery::new_from_value(json!({ "selector": { "thing": true }, @@ -309,11 +324,52 @@ mod sofa_tests { }] })); + let documents_res = db.find(&query).await; + assert!(documents_res.is_ok()); let documents = documents_res.unwrap(); assert_eq!(documents.rows.len(), 1); - teardown(client, "g_should_find_documents_in_db"); + teardown(client, "g_should_find_documents_in_db").await; + } + + #[tokio::test] + async fn h_should_bulk_get_a_document() { + let (client, db, doc) = setup("h_should_bulk_get_a_document").await; + let id = doc._id.clone(); + + let collection = db.get_bulk(vec![id]).await.unwrap(); + assert_eq!(collection.rows.len(), 1); + assert!(db.remove(doc).await); + + teardown(client, "h_should_bulk_get_a_document").await; + } + + #[tokio::test] + async fn i_should_bulk_get_invalid_documents() { + let (client, db, doc) = setup("i_should_bulk_get_invalid_documents").await; + let id = doc._id.clone(); + let invalid_id = "does_not_exist".to_string(); + + let collection = db.get_bulk(vec![id, invalid_id]).await.unwrap(); + assert_eq!(collection.rows.len(), 1); + assert!(db.remove(doc).await); + + teardown(client, "i_should_bulk_get_invalid_documents").await; + } + + #[tokio::test] + async fn j_should_get_all_documents_with_keys() { + let (client, db, doc) = setup("j_should_get_all_documents_with_keys").await; + let id = doc._id.clone(); + + let params = QueryParams::from_keys(vec![id]); + + let collection = db.get_all_params(Some(params)).await.unwrap(); + assert_eq!(collection.rows.len(), 1); + assert!(db.remove(doc).await); + + teardown(client, "j_should_get_all_documents_with_keys").await; } } } diff --git a/src/model.rs b/src/model.rs index ddf9d5b..f1cd680 100644 --- a/src/model.rs +++ b/src/model.rs @@ -1,8 +1,8 @@ use serde::de::DeserializeOwned; use serde::ser::Serialize; use serde_json::{from_value, to_value}; -use ::document::*; use std::marker::Sized; +use crate::document::Document; /// Trait that provides methods that can be used to switch between abstract `Document` and concrete `Model` implementors (such as your custom data models) pub trait Model { diff --git a/src/types/design.rs b/src/types/design.rs new file mode 100644 index 0000000..8826cee --- /dev/null +++ b/src/types/design.rs @@ -0,0 +1,11 @@ +use serde::{Serialize, Deserialize}; + +/// Design document created abstraction +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +pub struct DesignCreated { + pub result: Option, + pub id: Option, + pub name: Option, + pub error: Option, + pub reason: Option +} \ No newline at end of file diff --git a/src/types/document.rs b/src/types/document.rs index 706bbdc..6d210cd 100644 --- a/src/types/document.rs +++ b/src/types/document.rs @@ -1,3 +1,5 @@ +use serde::{Serialize, Deserialize}; + /// String that represents a Document ID in CouchDB pub type DocumentId = String; diff --git a/src/types/find.rs b/src/types/find.rs index 7cccd0a..e88b8f8 100644 --- a/src/types/find.rs +++ b/src/types/find.rs @@ -1,32 +1,36 @@ -use serde_json::{Value}; +use serde::export::Formatter; +use serde::{Deserialize, Serialize}; +use serde_json::Value; use std::collections::HashMap; +use std::fmt::Display; /// Sort direction abstraction #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[serde(untagged)] pub enum SortDirection { + #[serde(rename = "desc")] Desc, - Asc + #[serde(rename = "asc")] + Asc, } impl From for SortDirection { fn from(original: String) -> SortDirection { match original.as_ref() { "desc" => SortDirection::Desc, - "asc" | _ => SortDirection::Asc + _ => SortDirection::Asc, } } } /// Sort spec content abstraction -pub type SortSpecContent = HashMap; +pub type SortSpecContent = HashMap; /// Sort spec abstraction #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] #[serde(untagged)] pub enum SortSpec { Simple(String), - Complex(SortSpecContent) + Complex(SortSpecContent), } /// Index spec abstraction @@ -34,18 +38,47 @@ pub enum SortSpec { #[serde(untagged)] pub enum IndexSpec { DesignDocument(String), - IndexName((String, String)) + IndexName((String, String)), } /// Find query abstraction +/// Parameters here http://docs.couchdb.org/en/latest/api/database/find.html #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct FindQuery { pub selector: Value, + + #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option, + + #[serde(skip_serializing_if = "Option::is_none")] pub skip: Option, - pub sort: Option, + + #[serde(skip_serializing_if = "Vec::is_empty")] + pub sort: Vec, + + #[serde(skip_serializing_if = "Option::is_none")] pub fields: Option>, - pub use_index: Option + + #[serde(skip_serializing_if = "Option::is_none")] + pub use_index: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub r: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub bookmark: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub update: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub stable: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub stale: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_stats: Option, } /// Find result abstraction @@ -54,9 +87,12 @@ pub struct FindResult { pub docs: Option>, pub warning: Option, pub error: Option, - pub reason: Option + pub reason: Option, + pub bookmark: Option, } +//todo: include status on structs + /// Explain result abstraction #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct ExplainResult { @@ -67,5 +103,168 @@ pub struct ExplainResult { pub limit: u32, pub skip: u64, pub fields: Vec, - pub range: Value + pub range: Value, +} + +pub type RegEx = HashMap; +pub type FieldFilter = HashMap; + +#[derive(Serialize, Deserialize)] +pub struct NotEqual { + #[serde(rename = "$ne")] + pub ne: Option, +} + +#[derive(Serialize, Deserialize)] +pub struct SelectAll { + #[serde(rename = "_id")] + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option, +} + +// Little helper to create a select all query. +impl Default for SelectAll { + fn default() -> Self { + SelectAll { + id: Some(NotEqual { ne: None }), + } + } +} + +impl SelectAll { + pub fn as_value(&self) -> Value { + self.into() + } +} + +impl Into for &SelectAll { + fn into(self) -> Value { + serde_json::to_value(&self).expect("can not convert into json") + } +} + +impl From for SelectAll { + fn from(value: Value) -> Self { + serde_json::from_value(value).expect("json Value is not a valid Selector") + } +} + +/// Returns all documents +#[macro_export] +macro_rules! find_all_selector { + () => { + FindQuery::find_all().as_value() + }; +} + +impl FindQuery { + pub fn new_from_value(query: Value) -> Self { + query.into() + } + + // Create a new FindQuery from a valid selector. The selector syntax is documented here: + // https://docs.couchdb.org/en/latest/api/database/find.html#find-selectors + pub fn new(selector: Value) -> Self { + FindQuery { + selector, + limit: None, + skip: None, + sort: vec![], + fields: None, + use_index: None, + r: None, + bookmark: None, + update: None, + stable: None, + stale: None, + execution_stats: None, + } + } + + pub fn find_all() -> Self { + Self::new(SelectAll::default().as_value()) + } + + pub fn as_value(&self) -> Value { + self.into() + } +} + +impl Into for FindQuery { + fn into(self) -> Value { + serde_json::to_value(&self).expect("can not convert into json") + } +} + +impl Into for &FindQuery { + fn into(self) -> Value { + serde_json::to_value(&self).expect("can not convert into json") + } +} + +impl From for FindQuery { + fn from(value: Value) -> Self { + serde_json::from_value(value).expect("json Value is not a valid FindQuery") + } +} + +impl Display for FindQuery { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let json: Value = self.into(); + f.write_str(&json.to_string()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_convert_to_value() { + let mut sort = HashMap::new(); + sort.insert("first_name".to_string(), SortDirection::Desc); + + let mut query = FindQuery::find_all(); + query.limit = Some(10); + query.skip = Some(20); + query.sort = vec![SortSpec::Complex(sort)]; + let json = query.to_string(); + assert_eq!( + r#"{"limit":10,"selector":{"_id":{"$ne":null}},"skip":20,"sort":[{"first_name":"desc"}]}"#, + json + ) + } + + #[test] + fn test_default_select_all() { + let selector = FindQuery::find_all().as_value().to_string(); + assert_eq!(selector, r#"{"selector":{"_id":{"$ne":null}}}"#) + } + + #[test] + fn test_from_json() { + let query = FindQuery::new_from_value(json!({ + "selector": { + "thing": true + }, + "limit": 1, + "sort": [{ + "thing": "desc" + }] + })); + + let selector = query.selector.to_string(); + assert_eq!(selector, r#"{"thing":true}"#); + assert_eq!(query.limit, Some(1)); + assert_eq!(query.sort.len(), 1); + let first_sort = query.sort.get(0).unwrap(); + if let SortSpec::Complex(spec) = first_sort { + assert!(spec.contains_key("thing")); + let direction = spec.get("thing").unwrap(); + assert_eq!(direction, &SortDirection::Desc); + } else { + panic!("unexpected sort spec"); + } + } } diff --git a/src/types/index.rs b/src/types/index.rs index dc9e85e..e67ba4a 100644 --- a/src/types/index.rs +++ b/src/types/index.rs @@ -1,4 +1,7 @@ use super::*; +use serde::{Serialize, Deserialize}; +use find::{SortSpec}; +use document::{DocumentId}; /// Index fields abstraction #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] @@ -9,7 +12,7 @@ pub struct IndexFields { impl IndexFields { pub fn new(fields: Vec) -> IndexFields { IndexFields { - fields: fields + fields } } } @@ -24,16 +27,6 @@ pub struct Index { pub def: IndexFields } -/// Index created abstraction -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -pub struct IndexCreated { - pub result: Option, - pub id: Option, - pub name: Option, - pub error: Option, - pub reason: Option -} - /// Database index list abstraction #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct DatabaseIndexList { diff --git a/src/types/mod.rs b/src/types/mod.rs index fd7be42..aaf852b 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -1,4 +1,7 @@ -mod_use!(system); -mod_use!(document); -mod_use!(find); -mod_use!(index); +pub mod view; +pub mod design; +pub mod system; +pub mod document; +pub mod find; +pub mod index; +pub mod query; diff --git a/src/types/query.rs b/src/types/query.rs new file mode 100644 index 0000000..ce119fe --- /dev/null +++ b/src/types/query.rs @@ -0,0 +1,128 @@ +use serde::{Serialize, Deserialize}; + +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] +pub struct QueryParams { + #[serde(skip_serializing_if = "Option::is_none")] + pub conflicts: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub descending: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub end_key: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub end_key_doc_id: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub group: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub group_level: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub include_docs: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub attachments: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub att_encoding_info: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub inclusive_end: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option, + + #[serde(skip_serializing_if = "Vec::is_empty")] + pub keys: Vec, + + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub reduce: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub skip: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub sorted: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub stable: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub stale: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub start_key: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub start_key_doc_id: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub update: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub update_seq: Option, +} + +impl Default for QueryParams { + fn default() -> Self { + QueryParams { + conflicts: None, + descending: None, + end_key: None, + end_key_doc_id: None, + group: None, + group_level: None, + include_docs: None, + attachments: None, + att_encoding_info: None, + inclusive_end: None, + key: None, + keys: vec![], + limit: None, + reduce: None, + skip: None, + sorted: None, + stable: None, + stale: None, + start_key: None, + start_key_doc_id: None, + update: None, + update_seq: None, + } + } +} + +impl QueryParams { + pub fn from_keys(keys: Vec) -> Self { + QueryParams { + conflicts: None, + descending: None, + end_key: None, + end_key_doc_id: None, + group: None, + group_level: None, + include_docs: None, + attachments: None, + att_encoding_info: None, + inclusive_end: None, + key: None, + keys, + limit: None, + reduce: None, + skip: None, + sorted: None, + stable: None, + stale: None, + start_key: None, + start_key_doc_id: None, + update: None, + update_seq: None, + } + } +} diff --git a/src/types/system.rs b/src/types/system.rs index 69e1667..414d842 100644 --- a/src/types/system.rs +++ b/src/types/system.rs @@ -1,3 +1,5 @@ +use serde::{Serialize, Deserialize}; + /// Couch vendor abstraction #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] pub struct CouchVendor { diff --git a/src/types/view.rs b/src/types/view.rs new file mode 100644 index 0000000..6a1c6e3 --- /dev/null +++ b/src/types/view.rs @@ -0,0 +1,18 @@ +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] +pub struct ViewCollection { + pub offset: Option, + pub rows: Vec, + pub total_rows: Option, +} + +#[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] +pub struct ViewItem { + pub key: String, + pub value: Value, + pub id: Option, + // docs field, populated if query was ran with 'include_docs' + pub doc: Option, +}