diff --git a/deltachat-ffi/deltachat.h b/deltachat-ffi/deltachat.h index 97fe035c3a..f710106621 100644 --- a/deltachat-ffi/deltachat.h +++ b/deltachat-ffi/deltachat.h @@ -3151,6 +3151,23 @@ void dc_accounts_maybe_network (dc_accounts_t* accounts); void dc_accounts_maybe_network_lost (dc_accounts_t* accounts); +/** + * Perform a background fetch for all accounts in parallel with a timeout. + * Pauses the scheduler, fetches messages from imap and then resumes the scheduler. + * + * dc_accounts_background_fetch() was created for the iOS Background fetch. + * + * The `DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE` event is emitted at the end + * even in case of timeout, unless the function fails and returns 0. + * Process all events until you get this one and you can safely return to the background + * without forgetting to create notifications caused by timing race conditions. + * + * @memberof dc_accounts_t + * @param timeout The timeout in seconds + * @return Return 1 if DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE was emitted and 0 otherwise. + */ +int dc_accounts_background_fetch (dc_accounts_t* accounts, uint64_t timeout); + /** * Create the event emitter that is used to receive events. * @@ -6255,6 +6272,16 @@ void dc_event_unref(dc_event_t* event); #define DC_EVENT_WEBXDC_INSTANCE_DELETED 2121 +/** + * Tells that the Background fetch was completed (or timed out). + * + * This event acts as a marker, when you reach this event you can be sure + * that all events emitted during the background fetch were processed. + * + * This event is only emitted by the account manager + */ + +#define DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE 2200 /** * @} diff --git a/deltachat-ffi/src/lib.rs b/deltachat-ffi/src/lib.rs index 5d68a940d4..6f28a89b96 100644 --- a/deltachat-ffi/src/lib.rs +++ b/deltachat-ffi/src/lib.rs @@ -559,6 +559,7 @@ pub unsafe extern "C" fn dc_event_get_id(event: *mut dc_event_t) -> libc::c_int EventType::ConfigSynced { .. } => 2111, EventType::WebxdcStatusUpdate { .. } => 2120, EventType::WebxdcInstanceDeleted { .. } => 2121, + EventType::AccountsBackgroundFetchDone => 2200, } } @@ -586,7 +587,8 @@ pub unsafe extern "C" fn dc_event_get_data1_int(event: *mut dc_event_t) -> libc: | EventType::SelfavatarChanged | EventType::ConfigSynced { .. } | EventType::IncomingMsgBunch { .. } - | EventType::ErrorSelfNotInGroup(_) => 0, + | EventType::ErrorSelfNotInGroup(_) + | EventType::AccountsBackgroundFetchDone => 0, EventType::MsgsChanged { chat_id, .. } | EventType::ReactionsChanged { chat_id, .. } | EventType::IncomingMsg { chat_id, .. } @@ -646,6 +648,7 @@ pub unsafe extern "C" fn dc_event_get_data2_int(event: *mut dc_event_t) -> libc: | EventType::WebxdcInstanceDeleted { .. } | EventType::IncomingMsgBunch { .. } | EventType::SelfavatarChanged + | EventType::AccountsBackgroundFetchDone | EventType::ConfigSynced { .. } => 0, EventType::ChatModified(_) => 0, EventType::MsgsChanged { msg_id, .. } @@ -708,6 +711,7 @@ pub unsafe extern "C" fn dc_event_get_data2_str(event: *mut dc_event_t) -> *mut | EventType::SelfavatarChanged | EventType::WebxdcStatusUpdate { .. } | EventType::WebxdcInstanceDeleted { .. } + | EventType::AccountsBackgroundFetchDone | EventType::ChatEphemeralTimerModified { .. } => ptr::null_mut(), EventType::ConfigureProgress { comment, .. } => { if let Some(comment) = comment { @@ -4898,6 +4902,26 @@ pub unsafe extern "C" fn dc_accounts_maybe_network_lost(accounts: *mut dc_accoun block_on(async move { accounts.write().await.maybe_network_lost().await }); } +#[no_mangle] +pub unsafe extern "C" fn dc_accounts_background_fetch( + accounts: *mut dc_accounts_t, + timeout_in_seconds: u64, +) -> libc::c_int { + if accounts.is_null() || timeout_in_seconds <= 2 { + eprintln!("ignoring careless call to dc_accounts_background_fetch()"); + return 0; + } + + let accounts = &*accounts; + block_on(async move { + let accounts = accounts.read().await; + accounts + .background_fetch(Duration::from_secs(timeout_in_seconds)) + .await; + }); + 1 +} + #[no_mangle] pub unsafe extern "C" fn dc_accounts_get_event_emitter( accounts: *mut dc_accounts_t, diff --git a/deltachat-jsonrpc/src/api.rs b/deltachat-jsonrpc/src/api.rs index fb8a2166be..e2bf6c62f9 100644 --- a/deltachat-jsonrpc/src/api.rs +++ b/deltachat-jsonrpc/src/api.rs @@ -231,6 +231,20 @@ impl CommandApi { Ok(()) } + /// Performs a background fetch for all accounts in parallel with a timeout. + /// + /// The `AccountsBackgroundFetchDone` event is emitted at the end even in case of timeout. + /// Process all events until you get this one and you can safely return to the background + /// without forgetting to create notifications caused by timing race conditions. + async fn accounts_background_fetch(&self, timeout_in_seconds: f64) -> Result<()> { + self.accounts + .write() + .await + .background_fetch(std::time::Duration::from_secs_f64(timeout_in_seconds)) + .await; + Ok(()) + } + // --------------------------------------------- // Methods that work on individual accounts // --------------------------------------------- diff --git a/deltachat-jsonrpc/src/api/types/events.rs b/deltachat-jsonrpc/src/api/types/events.rs index c1feee63c7..e6f441f840 100644 --- a/deltachat-jsonrpc/src/api/types/events.rs +++ b/deltachat-jsonrpc/src/api/types/events.rs @@ -245,6 +245,13 @@ pub enum EventType { /// Inform that a message containing a webxdc instance has been deleted #[serde(rename_all = "camelCase")] WebxdcInstanceDeleted { msg_id: u32 }, + + /// Tells that the Background fetch was completed (or timed out). + /// This event acts as a marker, when you reach this event you can be sure + /// that all events emitted during the background fetch were processed. + /// + /// This event is only emitted by the account manager + AccountsBackgroundFetchDone, } impl From for EventType { @@ -353,6 +360,7 @@ impl From for EventType { CoreEventType::WebxdcInstanceDeleted { msg_id } => WebxdcInstanceDeleted { msg_id: msg_id.to_u32(), }, + CoreEventType::AccountsBackgroundFetchDone => AccountsBackgroundFetchDone, } } } diff --git a/node/constants.js b/node/constants.js index 7c50ca55db..59031ee1d8 100644 --- a/node/constants.js +++ b/node/constants.js @@ -29,6 +29,7 @@ module.exports = { DC_DOWNLOAD_FAILURE: 20, DC_DOWNLOAD_IN_PROGRESS: 1000, DC_DOWNLOAD_UNDECIPHERABLE: 30, + DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE: 2200, DC_EVENT_CHAT_EPHEMERAL_TIMER_MODIFIED: 2021, DC_EVENT_CHAT_MODIFIED: 2020, DC_EVENT_CONFIGURE_PROGRESS: 2041, diff --git a/node/events.js b/node/events.js index a618d4599d..a5c9281b98 100644 --- a/node/events.js +++ b/node/events.js @@ -36,5 +36,6 @@ module.exports = { 2110: 'DC_EVENT_SELFAVATAR_CHANGED', 2111: 'DC_EVENT_CONFIG_SYNCED', 2120: 'DC_EVENT_WEBXDC_STATUS_UPDATE', - 2121: 'DC_EVENT_WEBXDC_INSTANCE_DELETED' + 2121: 'DC_EVENT_WEBXDC_INSTANCE_DELETED', + 2200: 'DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE' } diff --git a/node/lib/constants.ts b/node/lib/constants.ts index 9eba1b4b60..2fe4f9af50 100644 --- a/node/lib/constants.ts +++ b/node/lib/constants.ts @@ -29,6 +29,7 @@ export enum C { DC_DOWNLOAD_FAILURE = 20, DC_DOWNLOAD_IN_PROGRESS = 1000, DC_DOWNLOAD_UNDECIPHERABLE = 30, + DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE = 2200, DC_EVENT_CHAT_EPHEMERAL_TIMER_MODIFIED = 2021, DC_EVENT_CHAT_MODIFIED = 2020, DC_EVENT_CONFIGURE_PROGRESS = 2041, @@ -326,4 +327,5 @@ export const EventId2EventName: { [key: number]: string } = { 2111: 'DC_EVENT_CONFIG_SYNCED', 2120: 'DC_EVENT_WEBXDC_STATUS_UPDATE', 2121: 'DC_EVENT_WEBXDC_INSTANCE_DELETED', + 2200: 'DC_EVENT_ACCOUNTS_BACKGROUND_FETCH_DONE', } diff --git a/src/accounts.rs b/src/accounts.rs index e0d39c9525..626d279eff 100644 --- a/src/accounts.rs +++ b/src/accounts.rs @@ -5,6 +5,7 @@ use std::future::Future; use std::path::{Path, PathBuf}; use anyhow::{ensure, Context as _, Result}; +use futures::future::join_all; use serde::{Deserialize, Serialize}; use tokio::fs; use tokio::io::AsyncWriteExt; @@ -291,6 +292,42 @@ impl Accounts { } } + /// Performs a background fetch for all accounts in parallel. + /// + /// This is an auxiliary function and not part of public API. + /// Use [Accounts::background_fetch] instead. + async fn background_fetch_without_timeout(&self) { + async fn background_fetch_and_log_error(account: Context) { + if let Err(error) = account.background_fetch().await { + warn!(account, "{error:#}"); + } + } + + join_all( + self.accounts + .values() + .cloned() + .map(background_fetch_and_log_error), + ) + .await; + } + + /// Performs a background fetch for all accounts in parallel with a timeout. + /// + /// The `AccountsBackgroundFetchDone` event is emitted at the end, + /// process all events until you get this one and you can safely return to the background + /// without forgetting to create notifications caused by timing race conditions. + pub async fn background_fetch(&self, timeout: std::time::Duration) { + if let Err(_err) = + tokio::time::timeout(timeout, self.background_fetch_without_timeout()).await + { + self.emit_event(EventType::Warning( + "Background fetch timed out.".to_string(), + )); + } + self.emit_event(EventType::AccountsBackgroundFetchDone); + } + /// Emits a single event. pub fn emit_event(&self, event: EventType) { self.events.emit(Event { id: 0, typ: event }) diff --git a/src/constants.rs b/src/constants.rs index ac3c281ed0..61bc4909cb 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -214,6 +214,9 @@ pub(crate) const DC_FOLDERS_CONFIGURED_VERSION: i32 = 4; // `max_smtp_rcpt_to` in the provider db. pub(crate) const DEFAULT_MAX_SMTP_RCPT_TO: usize = 50; +/// How far the last quota check needs to be in the past to be checked by the background function (in seconds). +pub(crate) const DC_BACKGROUND_FETCH_QUOTA_CHECK_RATELIMIT: i64 = 12 * 60 * 60; // 12 hours + #[cfg(test)] mod tests { use num_traits::FromPrimitive; diff --git a/src/context.rs b/src/context.rs index cfe7a91182..38bf11e0d8 100644 --- a/src/context.rs +++ b/src/context.rs @@ -15,16 +15,16 @@ use tokio::sync::{Mutex, Notify, RwLock}; use crate::chat::{get_chat_cnt, ChatId}; use crate::config::Config; -use crate::constants::DC_VERSION_STR; +use crate::constants::{DC_BACKGROUND_FETCH_QUOTA_CHECK_RATELIMIT, DC_VERSION_STR}; use crate::contact::Contact; use crate::debug_logging::DebugLogging; use crate::events::{Event, EventEmitter, EventType, Events}; -use crate::imap::ServerMetadata; +use crate::imap::{FolderMeaning, Imap, ServerMetadata}; use crate::key::{load_self_public_key, DcKey as _}; use crate::login_param::LoginParam; use crate::message::{self, MessageState, MsgId}; use crate::quota::QuotaInfo; -use crate::scheduler::SchedulerState; +use crate::scheduler::{convert_folder_meaning, SchedulerState}; use crate::sql::Sql; use crate::stock_str::StockStrings; use crate::timesmearing::SmearedTimestamp; @@ -441,6 +441,55 @@ impl Context { self.scheduler.maybe_network().await; } + /// Does a background fetch + /// pauses the scheduler and does one imap fetch, then unpauses and returns + pub async fn background_fetch(&self) -> Result<()> { + if !(self.is_configured().await?) { + return Ok(()); + } + + let address = self.get_primary_self_addr().await?; + let time_start = std::time::SystemTime::now(); + info!(self, "background_fetch started fetching {address}"); + + let _pause_guard = self.scheduler.pause(self.clone()).await?; + + // connection + let mut connection = Imap::new_configured(self, channel::bounded(1).1).await?; + connection.prepare(self).await?; + + // fetch imap folders + for folder_meaning in [FolderMeaning::Inbox, FolderMeaning::Mvbox] { + let (_, watch_folder) = convert_folder_meaning(self, folder_meaning).await?; + connection + .fetch_move_delete(self, &watch_folder, folder_meaning) + .await?; + } + + // update quota (to send warning if full) - but only check it once in a while + let quota_needs_update = { + let quota = self.quota.read().await; + quota + .as_ref() + .filter(|quota| quota.modified + DC_BACKGROUND_FETCH_QUOTA_CHECK_RATELIMIT > time()) + .is_none() + }; + + if quota_needs_update { + if let Err(err) = self.update_recent_quota(&mut connection).await { + warn!(self, "Failed to update quota: {err:#}."); + } + } + + info!( + self, + "background_fetch done for {address} took {:?}", + time_start.elapsed().unwrap_or_default() + ); + + Ok(()) + } + pub(crate) async fn schedule_resync(&self) -> Result<()> { self.resync_request.store(true, Ordering::Relaxed); self.scheduler.interrupt_inbox().await; diff --git a/src/events/payload.rs b/src/events/payload.rs index d2cf3f4768..771088ab11 100644 --- a/src/events/payload.rs +++ b/src/events/payload.rs @@ -287,4 +287,11 @@ pub enum EventType { /// ID of the deleted message. msg_id: MsgId, }, + + /// Tells that the Background fetch was completed (or timed out). + /// This event acts as a marker, when you reach this event you can be sure + /// that all events emitted during the background fetch were processed. + /// + /// This event is only emitted by the account manager + AccountsBackgroundFetchDone, } diff --git a/src/scheduler.rs b/src/scheduler.rs index 7ad7b3c960..9a8b651ea1 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -477,46 +477,59 @@ async fn inbox_loop( .await; } -/// Implement a single iteration of IMAP loop. -/// -/// This function performs all IMAP operations on a single folder, selecting it if necessary and -/// handling all the errors. In case of an error, it is logged, but not propagated upwards. If -/// critical operation fails such as fetching new messages fails, connection is reset via -/// `trigger_reconnect`, so a fresh one can be opened. -async fn fetch_idle(ctx: &Context, connection: &mut Imap, folder_meaning: FolderMeaning) { +/// Convert folder meaning +/// used internally by [fetch_idle] and [Context::background_fetch] +pub async fn convert_folder_meaning( + ctx: &Context, + folder_meaning: FolderMeaning, +) -> Result<(Config, String)> { let folder_config = match folder_meaning.to_config() { Some(c) => c, None => { - error!(ctx, "Bad folder meaning: {}", folder_meaning); - connection - .fake_idle(ctx, None, FolderMeaning::Unknown) - .await; - return; + bail!("Bad folder meaning: {}", folder_meaning); } }; + let folder = match ctx.get_config(folder_config).await { Ok(folder) => folder, Err(err) => { - warn!( - ctx, - "Can not watch {} folder, failed to retrieve config: {:#}", folder_config, err + bail!( + "Can not watch {} folder, failed to retrieve config: {:#}", + folder_config, + err ); - connection - .fake_idle(ctx, None, FolderMeaning::Unknown) - .await; - return; } }; let watch_folder = if let Some(watch_folder) = folder { watch_folder } else { - connection.connectivity.set_not_configured(ctx).await; - info!(ctx, "Can not watch {} folder, not set", folder_config); - connection - .fake_idle(ctx, None, FolderMeaning::Unknown) - .await; - return; + bail!("Can not watch {} folder, not set", folder_config); + }; + + Ok((folder_config, watch_folder)) +} + +/// Implement a single iteration of IMAP loop. +/// +/// This function performs all IMAP operations on a single folder, selecting it if necessary and +/// handling all the errors. In case of an error, it is logged, but not propagated upwards. If +/// critical operation fails such as fetching new messages fails, connection is reset via +/// `trigger_reconnect`, so a fresh one can be opened. +async fn fetch_idle(ctx: &Context, connection: &mut Imap, folder_meaning: FolderMeaning) { + let (folder_config, watch_folder) = match convert_folder_meaning(ctx, folder_meaning).await { + Ok(meaning) => meaning, + Err(error) => { + // Warning instead of error because the folder may not be configured. + // For example, this happens if the server does not have Sent folder + // but watching Sent folder is enabled. + warn!(ctx, "Error converting IMAP Folder name: {:?}", error); + connection.connectivity.set_not_configured(ctx).await; + connection + .fake_idle(ctx, None, FolderMeaning::Unknown) + .await; + return; + } }; // connect and fake idle if unable to connect