From ab1fff2642be961f052dde06b2c03c588d904d51 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sat, 23 Dec 2023 19:48:14 -0800 Subject: [PATCH] fix `cargo clippy` lints --- src/api/client_server/membership.rs | 10 +- src/api/client_server/message.rs | 2 +- src/api/client_server/relations.rs | 6 +- src/api/client_server/session.rs | 10 +- src/api/client_server/state.rs | 6 +- src/api/client_server/sync.rs | 175 +++++++++--------- src/api/server_server.rs | 10 +- src/config/proxy.rs | 7 +- src/database/abstraction/watchers.rs | 1 + .../key_value/rooms/state_accessor.rs | 6 +- src/database/key_value/rooms/state_cache.rs | 2 + src/database/key_value/rooms/threads.rs | 4 +- src/database/key_value/rooms/timeline.rs | 11 +- src/main.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 1 + src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 1 + src/service/rooms/pdu_metadata/data.rs | 1 + src/service/rooms/pdu_metadata/mod.rs | 7 +- src/service/rooms/search/data.rs | 1 + src/service/rooms/spaces/mod.rs | 43 ++--- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 16 +- src/service/rooms/state_cache/data.rs | 2 + src/service/rooms/state_compressor/mod.rs | 12 +- src/service/rooms/threads/data.rs | 1 + src/service/rooms/threads/mod.rs | 4 +- src/service/rooms/timeline/data.rs | 2 + src/service/rooms/timeline/mod.rs | 57 +++--- src/service/sending/data.rs | 1 + src/service/users/mod.rs | 1 + 31 files changed, 205 insertions(+), 205 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 346f2575..ed59691d 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -400,7 +400,7 @@ pub async fn get_member_events_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -435,7 +435,7 @@ pub async fn joined_members_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -712,7 +712,7 @@ async fn join_room_by_id_helper( } info!("Running send_join auth check"); - if !state_res::event_auth::auth_check( + let authenticated = state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &parsed_join_pdu, None::, // TODO: third party invite @@ -735,7 +735,9 @@ async fn join_room_by_id_helper( .map_err(|e| { warn!("Auth check failed: {e}"); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") - })? { + })?; + + if !authenticated { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 750e0303..0952092b 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -124,7 +124,7 @@ pub async fn get_message_events_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); services().rooms.lazy_loading.lazy_load_confirm_delivery( sender_user, diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs index a7cea786..124f1310 100644 --- a/src/api/client_server/relations.rs +++ b/src/api/client_server/relations.rs @@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body @@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body @@ -121,7 +121,7 @@ pub async fn get_relating_events_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 5ffd8134..c17bd99b 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -117,12 +117,10 @@ pub async fn login_route(body: Ruma) -> Result { warn!("Unsupported or unknown login type: {:?}", &body.login_info); diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index d6d39390..174282a1 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -85,7 +85,7 @@ pub async fn get_state_events_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index a275b066..57572284 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -554,6 +554,7 @@ async fn sync_helper( } } +#[allow(clippy::too_many_arguments)] async fn load_joined_room( sender_user: &UserId, sender_device: &DeviceId, @@ -590,7 +591,7 @@ async fn load_joined_room( || services() .rooms .user - .last_notification_read(&sender_user, &room_id)? + .last_notification_read(sender_user, room_id)? > since; let mut timeline_users = HashSet::new(); @@ -599,16 +600,16 @@ async fn load_joined_room( } services().rooms.lazy_loading.lazy_load_confirm_delivery( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, sincecount, )?; // Database queries: let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { s } else { error!("Room {} has no state", room_id); @@ -618,7 +619,7 @@ async fn load_joined_room( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, since)?; + .get_token_shortstatehash(room_id, since)?; let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { @@ -630,12 +631,12 @@ async fn load_joined_room( let joined_member_count = services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or(0); let invited_member_count = services() .rooms .state_cache - .room_invited_count(&room_id)? + .room_invited_count(room_id)? .unwrap_or(0); // Recalculate heroes (first 5 members) @@ -648,7 +649,7 @@ async fn load_joined_room( for hero in services() .rooms .timeline - .all_pdus(&sender_user, &room_id)? + .all_pdus(sender_user, room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) .map(|(_, pdu)| { @@ -669,11 +670,11 @@ async fn load_joined_room( ) && (services() .rooms .state_cache - .is_joined(&user_id, &room_id)? + .is_joined(&user_id, room_id)? || services() .rooms .state_cache - .is_invited(&user_id, &room_id)?) + .is_invited(&user_id, room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -789,17 +790,17 @@ async fn load_joined_room( // Reset lazy loading because this is an initial sync services().rooms.lazy_loading.lazy_load_reset( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, )?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, lazy_loaded, next_batchcount, ); @@ -866,14 +867,14 @@ async fn load_joined_room( } if !services().rooms.lazy_loading.lazy_load_was_sent_before( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, &event.sender, )? || lazy_load_send_redundant { if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &room_id, + room_id, &StateEventType::RoomMember, event.sender.as_str(), )? { @@ -884,9 +885,9 @@ async fn load_joined_room( } services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, lazy_loaded, next_batchcount, ); @@ -934,7 +935,7 @@ async fn load_joined_room( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + if !share_encrypted_room(sender_user, &user_id, room_id)? { device_list_updates.insert(user_id); } } @@ -954,15 +955,15 @@ async fn load_joined_room( services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender - &sender_user != user_id + sender_user != user_id }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) + !share_encrypted_room(sender_user, user_id, room_id) .unwrap_or(false) }), ); @@ -997,7 +998,7 @@ async fn load_joined_room( services() .rooms .user - .notification_count(&sender_user, &room_id)? + .notification_count(sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ) @@ -1010,7 +1011,7 @@ async fn load_joined_room( services() .rooms .user - .highlight_count(&sender_user, &room_id)? + .highlight_count(sender_user, room_id)? .try_into() .expect("highlight count can't go that high"), ) @@ -1039,15 +1040,15 @@ async fn load_joined_room( .rooms .edus .read_receipt - .readreceipts_since(&room_id, since) + .readreceipts_since(room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.typing.last_typing_update(&room_id)? > since { + if services().rooms.edus.typing.last_typing_update(room_id)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -1056,7 +1057,7 @@ async fn load_joined_room( // Save the state after this sync so we can send the correct state diff next sync services().rooms.user.associate_token_shortstatehash( - &room_id, + room_id, next_batch, current_shortstatehash, )?; @@ -1065,7 +1066,7 @@ async fn load_joined_room( account_data: RoomAccountData { events: services() .account_data - .changes_since(Some(&room_id), &sender_user, since)? + .changes_since(Some(room_id), sender_user, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -1110,13 +1111,13 @@ fn load_timeline( if services() .rooms .timeline - .last_timeline_count(&sender_user, &room_id)? + .last_timeline_count(sender_user, room_id)? > roomsincecount { let mut non_timeline_pdus = services() .rooms .timeline - .pdus_until(&sender_user, &room_id, PduCount::max())? + .pdus_until(sender_user, room_id, PduCount::max())? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -1172,7 +1173,6 @@ fn share_encrypted_room( pub async fn sync_events_v4_route( body: Ruma, ) -> Result> { - dbg!(&body.body); let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; @@ -1232,7 +1232,7 @@ pub async fn sync_events_v4_route( for room_id in &all_joined_rooms { let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { s } else { error!("Room {} has no state", room_id); @@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, globalsince)?; + .get_token_shortstatehash(room_id, globalsince)?; let since_sender_member: Option = since_shortstatehash .and_then(|shortstatehash| { @@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route( if !share_encrypted_room( &sender_user, &user_id, - &room_id, + room_id, )? { device_list_changes.insert(user_id); } @@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route( services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender @@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) + !share_encrypted_room(&sender_user, user_id, room_id) .unwrap_or(false) }), ); @@ -1451,7 +1451,7 @@ pub async fn sync_events_v4_route( } sync_events::v4::SyncOp { op: SlidingOp::Sync, - range: Some(r.clone()), + range: Some(r), index: None, room_ids, room_id: None, @@ -1523,7 +1523,7 @@ pub async fn sync_events_v4_route( let roomsincecount = PduCount::Normal(*roomsince); let (timeline_pdus, limited) = - load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?; + load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; if roomsince != &0 && timeline_pdus.is_empty() { continue; @@ -1555,63 +1555,58 @@ pub async fn sync_events_v4_route( let required_state = required_state_request .iter() - .map(|state| { + .flat_map(|state| { services() .rooms .state_accessor - .room_state_get(&room_id, &state.0, &state.1) + .room_state_get(room_id, &state.0, &state.1) + .ok() + .flatten() + .map(|state| state.to_sync_state_event()) }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) - .map(|state| state.to_sync_state_event()) .collect(); // Heroes let heroes = services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .filter_map(|r| r.ok()) .filter(|member| member != &sender_user) - .map(|member| { - Ok::<_, Error>( - services() - .rooms - .state_accessor - .get_member(&room_id, &member)? - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }), - ) + .flat_map(|member| { + services() + .rooms + .state_accessor + .get_member(room_id, &member) + .ok() + .flatten() + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }) }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) .take(5) .collect::>(); - let name = if heroes.len() > 1 { - let last = heroes[0].0.clone(); - Some( - heroes[1..] + let name = match &heroes[..] { + [] => None, + [only] => Some(only.0.clone()), + [firsts @ .., last] => Some( + firsts .iter() .map(|h| h.0.clone()) .collect::>() .join(", ") + " and " - + &last, - ) - } else if heroes.len() == 1 { - Some(heroes[0].0.clone()) - } else { - None + + &last.0, + ), }; - let avatar = if heroes.len() == 1 { - heroes[0].1.clone() + let avatar = if let [only] = &heroes[..] { + only.1.clone() } else { None }; @@ -1619,15 +1614,11 @@ pub async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services() - .rooms - .state_accessor - .get_name(&room_id)? - .or_else(|| name), + name: services().rooms.state_accessor.get_name(room_id)?.or(name), avatar: services() .rooms .state_accessor - .get_avatar(&room_id)? + .get_avatar(room_id)? .map_or(avatar, |a| a.url), initial: Some(roomsince == &0), is_dm: None, @@ -1637,7 +1628,7 @@ pub async fn sync_events_v4_route( services() .rooms .user - .highlight_count(&sender_user, &room_id)? + .highlight_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), @@ -1645,7 +1636,7 @@ pub async fn sync_events_v4_route( services() .rooms .user - .notification_count(&sender_user, &room_id)? + .notification_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), @@ -1658,7 +1649,7 @@ pub async fn sync_events_v4_route( (services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or(0) as u32) .into(), ), @@ -1666,7 +1657,7 @@ pub async fn sync_events_v4_route( (services() .rooms .state_cache - .room_invited_count(&room_id)? + .room_invited_count(room_id)? .unwrap_or(0) as u32) .into(), ), @@ -1689,7 +1680,7 @@ pub async fn sync_events_v4_route( let _ = tokio::time::timeout(duration, watcher).await; } - Ok(dbg!(sync_events::v4::Response { + Ok(sync_events::v4::Response { initial: globalsince == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), @@ -1744,5 +1735,5 @@ pub async fn sync_events_v4_route( }, }, delta_token: None, - })) + }) } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index a11857dc..79f921e3 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -666,7 +666,7 @@ pub fn parse_incoming_pdu( let room_version_id = services().rooms.state.get_room_version(&room_id)?; - let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) { + let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -724,7 +724,7 @@ pub async fn send_transaction_message_route( continue; } - let r = parse_incoming_pdu(&pdu); + let r = parse_incoming_pdu(pdu); let (event_id, value, room_id) = match r { Ok(t) => t, Err(e) => { @@ -992,7 +992,7 @@ pub async fn get_event_route( if !services().rooms.state_accessor.server_can_see_event( sender_servername, - &room_id, + room_id, &body.event_id, )? { return Err(Error::BadRequest( @@ -1058,7 +1058,7 @@ pub async fn get_backfill_route( let all_events = services() .rooms .timeline - .pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? + .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? .take(limit.try_into().unwrap()); let events = all_events @@ -1075,7 +1075,7 @@ pub async fn get_backfill_route( }) .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) .filter_map(|r| r.ok().flatten()) - .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu)) + .map(PduEvent::convert_to_outgoing_federation_event) .collect(); Ok(get_backfill::v1::Response { diff --git a/src/config/proxy.rs b/src/config/proxy.rs index dcf304e9..c03463e7 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -29,7 +29,9 @@ use crate::Result; /// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. #[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "snake_case")] +#[derive(Default)] pub enum ProxyConfig { + #[default] None, Global { #[serde(deserialize_with = "crate::utils::deserialize_from_str")] @@ -48,11 +50,6 @@ impl ProxyConfig { }) } } -impl Default for ProxyConfig { - fn default() -> Self { - ProxyConfig::None - } -} #[derive(Clone, Debug, Deserialize)] pub struct PartialProxyConfig { diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 55cb60b3..01156abd 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -8,6 +8,7 @@ use tokio::sync::watch; #[derive(Default)] pub(super) struct Watchers { + #[allow(clippy::type_complexity)] watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, } diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ad08f46e..fe40b937 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let parsed = services() .rooms .state_compressor - .parse_compressed_state_event(&compressed)?; + .parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let (_, eventid) = services() .rooms .state_compressor - .parse_compressed_state_event(&compressed)?; + .parse_compressed_state_event(compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { services() .rooms .state_compressor - .parse_compressed_state_event(&compressed) + .parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index d0ea0c2c..3dcaf4ae 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -471,6 +471,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } /// Returns an iterator over all rooms a user was invited to. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] fn rooms_invited<'a>( &'a self, @@ -549,6 +550,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } /// Returns an iterator over all rooms a user left. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] fn rooms_left<'a>( &'a self, diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs index 35c4e6e2..5e3dc970 100644 --- a/src/database/key_value/rooms/threads.rs +++ b/src/database/key_value/rooms/threads.rs @@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase { .collect::>() .join(&[0xff][..]); - self.threadid_userids.insert(&root_id, &users)?; + self.threadid_userids.insert(root_id, &users)?; Ok(()) } fn get_participants(&self, root_id: &[u8]) -> Result>> { - if let Some(users) = self.threadid_userids.get(&root_id)? { + if let Some(users) = self.threadid_userids.get(root_id)? { Ok(Some( users .split(|b| *b == 0xff) diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index ba1e85ef..f322d430 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the `count` of this pdu's id. fn get_pdu_count(&self, event_id: &EventId) -> Result> { - Ok(self - .eventid_pduid + self.eventid_pduid .get(event_id.as_bytes())? .map(|pdu_id| pdu_count(&pdu_id)) - .transpose()?) + .transpose() } /// Returns the json of a pdu. @@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - Ok(self.eventid_pduid.get(event_id.as_bytes())?) + self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. @@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, until: PduCount, ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, until, 1, true)?; + let (prefix, current) = count_to_id(room_id, until, 1, true)?; let user_id = user_id.to_owned(); @@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, from: PduCount, ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, from, 1, false)?; + let (prefix, current) = count_to_id(room_id, from, 1, false)?; let user_id = user_id.to_owned(); diff --git a/src/main.rs b/src/main.rs index c74d6ddb..683e0914 100644 --- a/src/main.rs +++ b/src/main.rs @@ -238,7 +238,7 @@ async fn spawn_task( .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) } -async fn unrecognized_method( +async fn unrecognized_method( req: axum::http::Request, next: axum::middleware::Next, ) -> std::result::Result { diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index a183d196..044dad82 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -11,6 +11,7 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[allow(clippy::type_complexity)] fn readreceipts_since<'a>( &'a self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 29199781..e7db6f78 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -92,7 +92,7 @@ impl Service { )); } - services().rooms.event_handler.acl_check(origin, &room_id)?; + services().rooms.event_handler.acl_check(origin, room_id)?; // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { @@ -276,6 +276,7 @@ impl Service { r } + #[allow(clippy::type_complexity, clippy::too_many_arguments)] #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( &'a self, @@ -1009,6 +1010,7 @@ impl Service { /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? + #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( &'a self, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index e6e4f896..c51a57e9 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -14,6 +14,7 @@ use super::timeline::PduCount; pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub lazy_load_waiting: Mutex>>, } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 6c4cb3ce..a4df34cc 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId}; pub trait Data: Send + Sync { fn add_relation(&self, from: u64, to: u64) -> Result<()>; + #[allow(clippy::type_complexity)] fn relations_until<'a>( &'a self, user_id: &'a UserId, diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 9ce74f4d..411f4f54 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -40,6 +40,7 @@ impl Service { } } + #[allow(clippy::too_many_arguments)] pub fn paginate_relations_with_filter( &self, sender_user: &UserId, @@ -82,7 +83,7 @@ impl Service { services() .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` @@ -106,7 +107,7 @@ impl Service { let events_before: Vec<_> = services() .rooms .pdu_metadata - .relations_until(sender_user, &room_id, target, from)? + .relations_until(sender_user, room_id, target, from)? .filter(|r| { r.as_ref().map_or(true, |(_, pdu)| { filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) @@ -129,7 +130,7 @@ impl Service { services() .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 6eef38fb..7ea7e3d1 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -4,6 +4,7 @@ use ruma::RoomId; pub trait Data: Send + Sync { fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + #[allow(clippy::type_complexity)] fn search_pdus<'a>( &'a self, room_id: &RoomId, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 53232f46..615e9ca0 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -197,7 +197,7 @@ impl Service { if let Ok(response) = services() .sending .send_federation_request( - &server, + server, federation::space::get_hierarchy::v1::Request { room_id: current_room.to_owned(), suggested_only, @@ -235,7 +235,7 @@ impl Service { .room .allowed_room_ids .into_iter() - .map(|room| AllowRule::room_membership(room)) + .map(AllowRule::room_membership) .collect(), }) } @@ -245,7 +245,7 @@ impl Service { .room .allowed_room_ids .into_iter() - .map(|room| AllowRule::room_membership(room)) + .map(AllowRule::room_membership) .collect(), }) } @@ -313,7 +313,7 @@ impl Service { canonical_alias: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? + .room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) @@ -321,11 +321,11 @@ impl Service { Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: services().rooms.state_accessor.get_name(&room_id)?, + name: services().rooms.state_accessor.get_name(room_id)?, num_joined_members: services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or_else(|| { warn!("Room {} has no member count", room_id); 0 @@ -336,7 +336,7 @@ impl Service { topic: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomTopic, "")? + .room_state_get(room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) @@ -348,7 +348,7 @@ impl Service { world_readable: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { @@ -363,7 +363,7 @@ impl Service { guest_can_join: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomGuestAccessEventContent| { @@ -376,7 +376,7 @@ impl Service { avatar_url: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) @@ -389,7 +389,7 @@ impl Service { let join_rule = services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| c.join_rule) @@ -415,7 +415,7 @@ impl Service { room_type: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .room_state_get(room_id, &StateEventType::RoomCreate, "")? .map(|s| { serde_json::from_str::(s.content.get()).map_err(|e| { error!("Invalid room create event in database: {}", e); @@ -455,7 +455,7 @@ impl Service { SpaceRoomJoinRule::Invite => services() .rooms .state_cache - .is_joined(sender_user, &room_id)?, + .is_joined(sender_user, room_id)?, _ => false, }; @@ -479,17 +479,14 @@ impl Service { match join_rule { JoinRule::Restricted(r) => { for rule in &r.allow { - match rule { - join_rules::AllowRule::RoomMembership(rm) => { - if let Ok(true) = services() - .rooms - .state_cache - .is_joined(sender_user, &rm.room_id) - { - return Ok(true); - } + if let join_rules::AllowRule::RoomMembership(rm) = rule { + if let Ok(true) = services() + .rooms + .state_cache + .is_joined(sender_user, &rm.room_id) + { + return Ok(true); } - _ => {} } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 48c60203..c209eb5a 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -41,7 +41,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(&new) + .parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { @@ -412,7 +412,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(&compressed) + .parse_compressed_state_event(compressed) .ok() }) .filter_map(|(shortstatekey, event_id)| { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a4a62fe4..b00dc58c 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -180,7 +180,7 @@ impl Service { return Ok(*visibility); } - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let history_visibility = self .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? @@ -197,11 +197,11 @@ impl Service { HistoryVisibility::Shared => currently_member, HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, &user_id) + self.user_was_invited(shortstatehash, user_id) } HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, &user_id) + self.user_was_joined(shortstatehash, user_id) } _ => { error!("Unknown history visibility {history_visibility}"); @@ -221,10 +221,10 @@ impl Service { /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, user_id, room_id))] pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let history_visibility = self - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(HistoryVisibility::Shared), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) @@ -276,7 +276,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomName, "")? + .room_state_get(room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomNameEventContent| c.name) @@ -288,7 +288,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) @@ -303,7 +303,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())? + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room member event in database.")) diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index d8bb4a44..8921909f 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -78,6 +78,7 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Returns an iterator over all rooms a user was invited to. + #[allow(clippy::type_complexity)] fn rooms_invited<'a>( &'a self, user_id: &UserId, @@ -96,6 +97,7 @@ pub trait Data: Send + Sync { ) -> Result>>>; /// Returns an iterator over all rooms a user left. + #[allow(clippy::type_complexity)] fn rooms_left<'a>( &'a self, user_id: &UserId, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index d29b020b..6118e06b 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -16,6 +16,7 @@ use self::data::StateDiff; pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub stateinfo_cache: Mutex< LruCache< u64, @@ -33,6 +34,7 @@ pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( &self, @@ -131,6 +133,7 @@ impl Service { /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer + #[allow(clippy::type_complexity)] #[tracing::instrument(skip( self, statediffnew, @@ -164,7 +167,7 @@ impl Service { for removed in statediffremoved.iter() { if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(*removed); } // Else it was added in the parent and we removed it again. We can forget this change } @@ -172,7 +175,7 @@ impl Service { for new in statediffnew.iter() { if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(*new); } // Else it was removed in the parent and we added it again. We can forget this change } @@ -217,7 +220,7 @@ impl Service { for removed in statediffremoved.iter() { if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(*removed); } // Else it was added in the parent and we removed it again. We can forget this change } @@ -225,7 +228,7 @@ impl Service { for new in statediffnew.iter() { if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(*new); } // Else it was removed in the parent and we added it again. We can forget this change } @@ -253,6 +256,7 @@ impl Service { } /// Returns the new shortstatehash, and the state diff from the previous room state + #[allow(clippy::type_complexity)] pub fn save_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs index 9221e8e8..e7159de0 100644 --- a/src/service/rooms/threads/data.rs +++ b/src/service/rooms/threads/data.rs @@ -2,6 +2,7 @@ use crate::{PduEvent, Result}; use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { + #[allow(clippy::type_complexity)] fn threads_until<'a>( &'a self, user_id: &'a UserId, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index fb703839..c6193bc8 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -26,7 +26,7 @@ impl Service { self.db.threads_until(user_id, room_id, until, include) } - pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { let root_id = &services() .rooms .timeline @@ -103,7 +103,7 @@ impl Service { } let mut users = Vec::new(); - if let Some(userids) = self.db.get_participants(&root_id)? { + if let Some(userids) = self.db.get_participants(root_id)? { users.extend_from_slice(&userids); users.push(pdu.sender.clone()); } else { diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index afa2cfbf..6290b8cc 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -66,6 +66,7 @@ pub trait Data: Send + Sync { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. + #[allow(clippy::type_complexity)] fn pdus_until<'a>( &'a self, user_id: &UserId, @@ -75,6 +76,7 @@ pub trait Data: Send + Sync { /// Returns an iterator over all events in a room that happened after the event with id `from` /// in chronological order. + #[allow(clippy::type_complexity)] fn pdus_after<'a>( &'a self, user_id: &UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 25e1c54d..61b73378 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -58,8 +58,8 @@ impl PduCount { } pub fn try_from_string(token: &str) -> Result { - if token.starts_with('-') { - token[1..].parse().map(PduCount::Backfilled) + if let Some(stripped) = token.strip_prefix('-') { + stripped.parse().map(PduCount::Backfilled) } else { token.parse().map(PduCount::Normal) } @@ -112,7 +112,7 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .next() .map(|o| o.map(|(_, p)| Arc::new(p))) .transpose() @@ -458,7 +458,7 @@ impl Service { let to_conduit = body.starts_with(&format!("{server_user}: ")) || body.starts_with(&format!("{server_user} ")) || body == format!("{server_user}:") - || body == format!("{server_user}"); + || body == server_user; // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit @@ -842,7 +842,7 @@ impl Service { let target = pdu .state_key() - .filter(|v| v.starts_with("@")) + .filter(|v| v.starts_with('@')) .unwrap_or(sender.as_str()); let server_name = services().globals.server_name(); let server_user = format!("@conduit:{}", server_name); @@ -850,7 +850,7 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if content.membership == MembershipState::Leave { - if target == &server_user { + if target == server_user { warn!("Conduit user cannot leave from admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -876,7 +876,7 @@ impl Service { } if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == &server_user { + if target == server_user { warn!("Conduit user cannot be banned in admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -1048,7 +1048,7 @@ impl Service { #[tracing::instrument(skip(self, room_id))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { let first_pdu = self - .all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .next() .expect("Room is not empty")?; @@ -1060,7 +1060,7 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -1091,11 +1091,9 @@ impl Service { .await; match response { Ok(response) => { - let mut pub_key_map = RwLock::new(BTreeMap::new()); + let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self - .backfill_pdu(backfill_server, pdu, &mut pub_key_map) - .await + if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await { warn!("Failed to add backfilled pdu: {e}"); } @@ -1142,7 +1140,7 @@ impl Service { services() .rooms .event_handler - .handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map) + .handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map) .await?; let value = self.get_pdu_json(&event_id)?.expect("We just created it"); @@ -1175,24 +1173,21 @@ impl Service { drop(insert_lock); - match pdu.kind { - TimelineEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody { - body: Option, - } - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - services() - .rooms - .search - .index_pdu(shortroomid, &pdu_id, &body)?; - } + if pdu.kind == TimelineEventType::RoomMessage { + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if let Some(body) = content.body { + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; } - _ => {} } drop(mutex_lock); diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 2e574e23..8b4d236f 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -5,6 +5,7 @@ use crate::Result; use super::{OutgoingKind, SendingEventType}; pub trait Data: Send + Sync { + #[allow(clippy::type_complexity)] fn active_requests<'a>( &'a self, ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6faa6d8e..dc34d533 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -34,6 +34,7 @@ pub struct SlidingSyncCache { pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub connections: Mutex>>>, }