1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
//! [`Store`] is a holder to create, read, write, and remove
//! [`Document`](crate::store::collection::document::DittoDocument)s from a Ditto peer.

use_prelude!();

use ffi_sdk::{COrderByParam, FsComponent, WriteStrategyRs};

use crate::{
    disk_usage::DiskUsage,
    error::{DittoError, ErrorKind},
};

pub mod batch;
pub mod collection;
pub mod collections;
pub mod ditto_attachment;
pub mod ditto_attachment_fetch_event;
pub mod ditto_attachment_fetcher;
pub mod ditto_attachment_token;
pub mod live_query;
#[cfg(feature = "timeseries")]
pub mod timeseries;
pub mod update;

use collections::pending_collections_operation::PendingCollectionsOperation;

#[derive(Clone)]
/// `Store` provides access to [`Collection`](crate::prelude::Collection)s and a
/// write transaction API.
pub struct Store {
    ditto: Arc<ffi_sdk::BoxedDitto>,
    disk_usage: Arc<DiskUsage>,
}

impl Store {
    // TODO(pub_check)
    pub fn new(ditto: Arc<ffi_sdk::BoxedDitto>) -> Self {
        let disk_usage = Arc::new(DiskUsage::new(ditto.retain(), FsComponent::Store));
        Self { ditto, disk_usage }
    }

    // Note this method's logic will be moved into the core ditto library
    // in the future
    fn validate_collection_name(name: &str) -> Result<(), DittoError> {
        let mut result = Ok(());

        if name.is_empty() {
            result = Err(DittoError::new(
                ErrorKind::InvalidInput,
                String::from("Collection name can not be empty"),
            ));
        }

        if name.split_whitespace().next().is_none() {
            result = Err(DittoError::new(
                ErrorKind::InvalidInput,
                String::from("Collection name can not only contain whitespace"),
            ));
        }

        result
    }

    /// Returns a [`Collection`](crate::prelude::Collection) with the provided name.
    /// A collection name is valid if :
    /// * its length is less than 100
    /// * it is not empty
    /// * it does not contain the char '\0'
    /// * it does not begin with "$TS_"
    pub fn collection(&self, collection_name: &'_ str) -> Result<Collection, DittoError> {
        Self::validate_collection_name(collection_name)?;
        let c_name = char_p::new(collection_name);
        let status = { ffi_sdk::ditto_collection(&*self.ditto, c_name.as_ref()) };
        if status != 0 {
            return Err(DittoError::from_ffi(ErrorKind::InvalidInput));
        }
        Ok(Collection {
            ditto: Arc::downgrade(&self.ditto),
            collection_name: c_name,
        })
    }

    /// Returns an object that lets you fetch or observe the collections in the
    /// store.
    pub fn collections(&self) -> PendingCollectionsOperation<'_> {
        PendingCollectionsOperation::<'_>::new(Arc::downgrade(&self.ditto))
    }

    /// Allows you to group multiple operations together that affect multiple
    /// documents, potentially across multiple collections, without
    /// auto-committing on each operation.
    ///
    /// At the end of the batch of operations, either
    /// [`batch.commit_changes`](crate::store::batch::ScopedStore::commit_changes)
    /// or
    /// [`batch.revert_changes`](crate::store::batch::ScopedStore::revert_changes)
    /// must be called.
    ///
    /// ## Example
    ///
    /// ```rust
    /// # macro_rules! ignore {($($__:tt)*) => ()} ignore! {
    /// ditto.store().with_batched_write(|batch| {
    ///     let mut foo_coll = batch.collection("foo");
    ///     foo_coll.find...().remove();
    ///     let mut bar_coll = batch.collection("bar");
    ///     // Expensive multi-mutation op:
    ///     for _ in 0 .. 10_000 {
    ///         let doc = ...;
    ///         bar_coll.insert(doc, None, false);
    ///     }
    ///     // At this point, we must say whether we commit or revert
    ///     // these changes:
    ///     batch.commit_changes()
    /// })
    /// # }
    /// ```
    pub fn with_batched_write<F>(
        &self,
        f: F,
    ) -> Result<Vec<batch::WriteTransactionResult>, DittoError>
    where
        for<'batch> F: FnOnce(batch::ScopedStore<'batch>) -> batch::Action<'batch>,
    {
        batch::with_batched_write(self, f)
    }

    /// Returns a list of the names of collections in the local store.
    pub fn collection_names(&self) -> Result<Vec<String>, DittoError> {
        let c_collections = { ffi_sdk::ditto_get_collection_names(&*self.ditto).ok()? };

        Ok(c_collections
            .iter()
            .map(|x: &char_p::Box| -> String { x.clone().into_string() })
            .collect())
    }

    /// Returns a hash representing the current version of the given queries.
    /// When a document matching such queries gets mutated, the hash will change
    /// as well.
    ///
    /// Please note that the hash depends on how queries are constructed, so you
    /// should make sure to always compare hashes generated with the same set of
    /// queries.
    pub fn queries_hash(&self, live_queries: &[LiveQuery]) -> Result<u64, DittoError> {
        let (coll_names, queries): (Vec<_>, Vec<_>) = live_queries
            .iter()
            .map(|lq| (lq.collection_name.as_ref(), lq.query.as_ref()))
            .unzip();

        {
            ffi_sdk::ditto_queries_hash(&self.ditto, coll_names[..].into(), queries[..].into()).ok()
        }
    }

    /// Returns a sequence of English words representing the current version of
    /// the given queries. When a document matching such queries gets mutated,
    /// the words will change as well.
    ///
    /// Please note that the resulting sequence of words depends on how queries
    /// are constructed, so you should make sure to always compare hashes
    /// generated with the same set of queries.
    pub fn queries_hash_mnemonic(&self, live_queries: &[LiveQuery]) -> Result<String, DittoError> {
        let (coll_names, queries): (Vec<_>, Vec<_>) = live_queries
            .iter()
            .map(|lq| (lq.collection_name.as_ref(), lq.query.as_ref()))
            .unzip();

        {
            ffi_sdk::ditto_queries_hash_mnemonic(
                &self.ditto,
                coll_names[..].into(),
                queries[..].into(),
            )
            .ok()
            .map(|c_str| c_str.into_string())
        }
    }

    /// Start all live query webhooks.
    pub fn start_all_live_query_webhooks(&self) -> Result<(), DittoError> {
        {
            let ret = ffi_sdk::ditto_live_query_webhook_start_all(&self.ditto);
            if ret != 0 {
                return Err(DittoError::from_ffi(ErrorKind::Internal));
            }
        }
        Ok(())
    }

    /// Start a live query webhooks by its id.
    pub fn start_live_query_webhook_by_id(&self, doc_id: DocumentId) -> Result<(), DittoError> {
        {
            let ret =
                ffi_sdk::ditto_live_query_webhook_start_by_id(&self.ditto, doc_id.bytes[..].into());
            if ret != 0 {
                return Err(DittoError::from_ffi(ErrorKind::Internal));
            }
        }
        Ok(())
    }

    /// Register a new live query webhook
    pub fn register_live_query_webhook(
        &self,
        collection_name: &str,
        query: &str,
        url: &str,
    ) -> Result<DocumentId, DittoError> {
        let c_collection_name = char_p::new(collection_name);
        let c_query = char_p::new(query);
        let c_url = char_p::new(url);
        let order_definitions: Vec<COrderByParam<'_>> = Vec::with_capacity(0);
        let doc_id = {
            ffi_sdk::ditto_live_query_webhook_register_str(
                &self.ditto,
                c_collection_name.as_ref(),
                c_query.as_ref(),
                order_definitions[..].into(),
                -1,
                0,
                c_url.as_ref(),
            )
            .ok()?
            .to::<Box<[u8]>>()
            .into()
        };

        Ok(doc_id)
    }

    /// Generate a new API secret for live query webhook
    pub fn live_query_webhook_generate_new_api_secret(&self) -> Result<(), DittoError> {
        {
            let ret = ffi_sdk::ditto_live_query_webhook_generate_new_api_secret(&self.ditto);
            if ret != 0 {
                return Err(DittoError::from_ffi(ErrorKind::Internal));
            }
        }
        Ok(())
    }

    /// Returns a [`TimeSeries`](crate::prelude::TimeSeries) with the provided name.
    pub fn timeseries(&self, ts_name: &'_ str) -> Result<TimeSeries, DittoError> {
        Self::validate_collection_name(ts_name)?;
        let c_name = char_p::new(ts_name);
        Ok(TimeSeries {
            ditto: self.ditto.retain(),
            ts_name: c_name,
        })
    }

    /// Return a [`DiskUsage`](crate::prelude::DiskUsage) to monitor the disk usage of the
    /// [`Store`].
    pub fn disk_usage(&self) -> &DiskUsage {
        &self.disk_usage
    }
}

#[non_exhaustive]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
/// Specify the order of returned Documents in a query.
pub enum SortDirection {
    Ascending,
    Descending,
}

#[non_exhaustive]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
/// Specify the write strategy when inserting documents.
pub enum WriteStrategy {
    /// An existing document will be merged with the document being inserted, if there is a
    /// pre-existing document.
    Merge,

    /// Insert the document only if there is not already a document with the same Id in the store.
    /// If there is already a document in the store with the same Id then this will be a no-op.
    InsertIfAbsent,

    /// Insert the document, with its contents treated as default data, only if there is not
    /// already a document with the same Id in the store. If there is already a document in the
    /// store with the same Id then this will be a no-op. Use this strategy if you want to
    /// insert default data for a given document Id, which you want to treat as common initial
    /// data amongst all peers and that you expect to be mutated or overwritten in due course.
    InsertDefaultIfAbsent,
}

impl WriteStrategy {
    fn as_write_strategy_rs(&self) -> WriteStrategyRs {
        match self {
            WriteStrategy::Merge => WriteStrategyRs::Merge,
            WriteStrategy::InsertIfAbsent => WriteStrategyRs::InsertIfAbsent,
            WriteStrategy::InsertDefaultIfAbsent => WriteStrategyRs::InsertDefaultIfAbsent,
        }
    }
}