From ccf501a420d12d79b803ccf7334d0db978e4724e Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Mon, 18 Oct 2021 04:51:11 +0000
Subject: [PATCH 001/445] Initial implementation of /report, fixing #13
---
src/client_server/mod.rs | 2 +
src/client_server/report.rs | 75 +++++++++++++++++++++++++++++++++++++
src/main.rs | 1 +
3 files changed, 78 insertions(+)
create mode 100644 src/client_server/report.rs
diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs
index e0c340f1..115ddaf6 100644
--- a/src/client_server/mod.rs
+++ b/src/client_server/mod.rs
@@ -16,6 +16,7 @@ mod profile;
mod push;
mod read_marker;
mod redact;
+mod report;
mod room;
mod search;
mod session;
@@ -47,6 +48,7 @@ pub use profile::*;
pub use push::*;
pub use read_marker::*;
pub use redact::*;
+pub use report::*;
pub use room::*;
pub use search::*;
pub use session::*;
diff --git a/src/client_server/report.rs b/src/client_server/report.rs
new file mode 100644
index 00000000..e56cbc9f
--- /dev/null
+++ b/src/client_server/report.rs
@@ -0,0 +1,75 @@
+use std::sync::Arc;
+
+use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma};
+use ruma::{
+ api::client::{error::ErrorKind, r0::room::report_content},
+ events::room::message,
+ Int,
+};
+
+#[cfg(feature = "conduit_bin")]
+use rocket::post;
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
+///
+/// Reports an inappropriate event to homeserver admins
+///
+#[cfg_attr(
+ feature = "conduit_bin",
+ post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "
")
+)]
+#[tracing::instrument(skip(db, body))]
+pub async fn report_event_route(
+ db: DatabaseGuard,
+ body: Ruma>,
+) -> ConduitResult {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let pdu = match db.rooms.get_pdu(&body.event_id) {
+ Ok(pdu) if !pdu.is_none() => pdu,
+ _ => {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid Event ID",
+ ))
+ }
+ }
+ .unwrap();
+
+ if body.score >= Int::from(0) && body.score <= Int::from(-100) {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid score, must be within 0 to -100",
+ ));
+ };
+
+ if body.reason.chars().count() > 160 {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Reason too long, should be 160 characters or fewer",
+ ));
+ };
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ db.admin.send(AdminCommand::SendMessage(
+ message::RoomMessageEventContent::text_plain(format!(
+ "Report received from: {}\r\n\r\nEvent ID: {}\r\nRoom ID: {}\r\nSent By: {}\r\n\r\nReport Score: {}\r\nReport Reason: {}",
+ sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason,
+ )),
+ ));
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ Ok(report_content::Response {}.into())
+}
diff --git a/src/main.rs b/src/main.rs
index 84dfb1fc..56faa3e7 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -101,6 +101,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket<
client_server::create_typing_event_route,
client_server::create_room_route,
client_server::redact_event_route,
+ client_server::report_event_route,
client_server::create_alias_route,
client_server::delete_alias_route,
client_server::get_alias_route,
From 1541b93f457de2d5fb8c37739d6791fa3f60312b Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Mon, 18 Oct 2021 05:38:41 +0000
Subject: [PATCH 002/445] Make reports look nicer and reduce spam potential,
increase max report length to 1000 characters
---
src/client_server/report.rs | 39 ++++++++++++++++++++++++++++++-------
1 file changed, 32 insertions(+), 7 deletions(-)
diff --git a/src/client_server/report.rs b/src/client_server/report.rs
index e56cbc9f..7f66fa13 100644
--- a/src/client_server/report.rs
+++ b/src/client_server/report.rs
@@ -8,7 +8,7 @@ use ruma::{
};
#[cfg(feature = "conduit_bin")]
-use rocket::post;
+use rocket::{http::RawStr, post};
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
///
@@ -43,10 +43,10 @@ pub async fn report_event_route(
));
};
- if body.reason.chars().count() > 160 {
+ if body.reason.chars().count() > 1000 {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
- "Reason too long, should be 160 characters or fewer",
+ "Reason too long, should be 1000 characters or fewer",
));
};
@@ -61,10 +61,35 @@ pub async fn report_event_route(
let state_lock = mutex_state.lock().await;
db.admin.send(AdminCommand::SendMessage(
- message::RoomMessageEventContent::text_plain(format!(
- "Report received from: {}\r\n\r\nEvent ID: {}\r\nRoom ID: {}\r\nSent By: {}\r\n\r\nReport Score: {}\r\nReport Reason: {}",
- sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason,
- )),
+ message::RoomMessageEventContent::text_html(
+ format!(
+ concat!(
+ "Report received from: {}\r\n\r\n",
+ "Event ID: {}\r\n",
+ "Room ID: {}\r\n",
+ "Sent By: {}\r\n\r\n",
+ "Report Score: {}\r\n",
+ "Report Reason: {}"
+ ),
+ sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
+ )
+ .to_owned(),
+ format!(
+ concat!(
+ "Report received from: {}
",
+ "Event Info
Event ID: {}
Room ID: {}
Sent By: {}",
+ "
Report Info
Report Score: {}",
+ "Report Reason: {}
"
+ ),
+ sender_user,
+ pdu.event_id,
+ pdu.room_id,
+ pdu.sender,
+ body.score,
+ RawStr::new(&body.reason).html_escape()
+ )
+ .to_owned(),
+ ),
));
drop(state_lock);
From 50f931a2fda72d94a6190092dac18f2268c96af1 Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Wed, 20 Oct 2021 11:12:06 +0000
Subject: [PATCH 003/445] Cleanup and fix validation in report.rs, lower max
report length, better html
---
src/client_server/report.rs | 53 +++++++++++++------------------------
1 file changed, 18 insertions(+), 35 deletions(-)
diff --git a/src/client_server/report.rs b/src/client_server/report.rs
index 7f66fa13..3dcb4d1c 100644
--- a/src/client_server/report.rs
+++ b/src/client_server/report.rs
@@ -1,5 +1,3 @@
-use std::sync::Arc;
-
use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma};
use ruma::{
api::client::{error::ErrorKind, r0::room::report_content},
@@ -25,62 +23,49 @@ pub async fn report_event_route(
) -> ConduitResult {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
- let pdu = match db.rooms.get_pdu(&body.event_id) {
- Ok(pdu) if !pdu.is_none() => pdu,
+ let pdu = match db.rooms.get_pdu(&body.event_id)? {
+ Some(pdu) => pdu,
_ => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid Event ID",
))
}
- }
- .unwrap();
+ };
- if body.score >= Int::from(0) && body.score <= Int::from(-100) {
+ if body.score > Int::from(0) || body.score < Int::from(-100) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid score, must be within 0 to -100",
));
};
- if body.reason.chars().count() > 1000 {
+ if body.reason.chars().count() > 250 {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
- "Reason too long, should be 1000 characters or fewer",
+ "Reason too long, should be 250 characters or fewer",
));
};
- let mutex_state = Arc::clone(
- db.globals
- .roomid_mutex_state
- .write()
- .unwrap()
- .entry(body.room_id.clone())
- .or_default(),
- );
- let state_lock = mutex_state.lock().await;
-
db.admin.send(AdminCommand::SendMessage(
message::RoomMessageEventContent::text_html(
format!(
- concat!(
- "Report received from: {}\r\n\r\n",
- "Event ID: {}\r\n",
- "Room ID: {}\r\n",
- "Sent By: {}\r\n\r\n",
- "Report Score: {}\r\n",
- "Report Reason: {}"
- ),
+ "Report received from: {}\n\n\
+ Event ID: {}\n\
+ Room ID: {}\n\
+ Sent By: {}\n\n\
+ Report Score: {}\n\
+ Report Reason: {}",
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
)
.to_owned(),
format!(
- concat!(
- "Report received from: {}
",
- "Event Info
Event ID: {}
Room ID: {}
Sent By: {}",
- "
Report Info
Report Score: {}",
- "Report Reason: {}
"
- ),
+ "Report received from: {0}\
+
- Event Info
- Event ID:
{1}
\
+ 🔗 - Room ID:
{2}
\
+ - Sent By: {3}
- \
+ Report Info
- Report Score: {4}
- Report Reason: {5}
\
+
",
sender_user,
pdu.event_id,
pdu.room_id,
@@ -92,8 +77,6 @@ pub async fn report_event_route(
),
));
- drop(state_lock);
-
db.flush()?;
Ok(report_content::Response {}.into())
From bbe16f84679061f1f4af5c1ab76f519279a234c0 Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Sun, 24 Oct 2021 00:45:02 +0000
Subject: [PATCH 004/445] Update Ruma
---
Cargo.toml | 2 +-
src/client_server/room.rs | 13 +++++++++----
2 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/Cargo.toml b/Cargo.toml
index dae68bf1..13a7af44 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
-ruma = { git = "https://github.com/ruma/ruma", rev = "58cdcae1f9a8f4824bcbec1de1bb13e659c66804", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
+ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
diff --git a/src/client_server/room.rs b/src/client_server/room.rs
index 2d1fe237..ec09eec8 100644
--- a/src/client_server/room.rs
+++ b/src/client_server/room.rs
@@ -22,10 +22,10 @@ use ruma::{
},
EventType,
},
- serde::JsonObject,
+ serde::{JsonObject},
RoomAliasId, RoomId, RoomVersionId,
};
-use serde_json::value::to_raw_value;
+use serde_json::{value::to_raw_value};
use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc};
use tracing::{info, warn};
@@ -102,9 +102,14 @@ pub async fn create_room_route(
}
})?;
+ let creation_content = match body.creation_content.clone() {
+ Some(content) => content.deserialize().expect("Invalid creation content"),
+ None => create_room::CreationContent::new(),
+ };
+
let mut content = RoomCreateEventContent::new(sender_user.clone());
- content.federate = body.creation_content.federate;
- content.predecessor = body.creation_content.predecessor.clone();
+ content.federate = creation_content.federate;
+ content.predecessor = creation_content.predecessor.clone();
content.room_version = match body.room_version.clone() {
Some(room_version) => {
if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 {
From 8087a26a35fdcd495e28e8bff401fa3ba2afd9ef Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Sun, 24 Oct 2021 20:26:51 +0000
Subject: [PATCH 005/445] Make createRoom follow spec for m.room.create,
allowing creation of spaces
---
src/client_server/room.rs | 65 +++++++++++++++++++++++++++++++--------
1 file changed, 53 insertions(+), 12 deletions(-)
diff --git a/src/client_server/room.rs b/src/client_server/room.rs
index ec09eec8..5e59e81d 100644
--- a/src/client_server/room.rs
+++ b/src/client_server/room.rs
@@ -22,11 +22,16 @@ use ruma::{
},
EventType,
},
- serde::{JsonObject},
+ serde::{CanonicalJsonObject, JsonObject, Raw},
RoomAliasId, RoomId, RoomVersionId,
};
-use serde_json::{value::to_raw_value};
-use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc};
+use serde_json::{json, value::to_raw_value};
+use std::{
+ cmp::max,
+ collections::BTreeMap,
+ convert::{TryFrom, TryInto},
+ sync::Arc,
+};
use tracing::{info, warn};
#[cfg(feature = "conduit_bin")]
@@ -102,15 +107,7 @@ pub async fn create_room_route(
}
})?;
- let creation_content = match body.creation_content.clone() {
- Some(content) => content.deserialize().expect("Invalid creation content"),
- None => create_room::CreationContent::new(),
- };
-
- let mut content = RoomCreateEventContent::new(sender_user.clone());
- content.federate = creation_content.federate;
- content.predecessor = creation_content.predecessor.clone();
- content.room_version = match body.room_version.clone() {
+ let room_version = match body.room_version.clone() {
Some(room_version) => {
if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 {
room_version
@@ -124,6 +121,50 @@ pub async fn create_room_route(
None => RoomVersionId::Version6,
};
+ let content = match &body.creation_content {
+ Some(content) => {
+ let mut content = content
+ .deserialize_as::()
+ .expect("Invalid creation content");
+ content.insert(
+ "creator".into(),
+ json!(sender_user.clone()).try_into().unwrap(),
+ );
+ content.insert(
+ "room_version".into(),
+ json!(room_version.as_str()).try_into().unwrap(),
+ );
+ content
+ }
+ None => {
+ let mut content = Raw::::from_json(
+ to_raw_value(&RoomCreateEventContent::new(sender_user.clone())).unwrap(),
+ )
+ .deserialize_as::()
+ .unwrap();
+ content.insert(
+ "room_version".into(),
+ json!(room_version.as_str()).try_into().unwrap(),
+ );
+ content
+ }
+ };
+
+ // Validate creation content
+ match Raw::::from_json(
+ to_raw_value(&content).expect("Invalid creation content"),
+ )
+ .deserialize_as::()
+ {
+ Ok(_t) => {}
+ Err(_e) => {
+ return Err(Error::BadRequest(
+ ErrorKind::BadJson,
+ "Invalid creation content",
+ ))
+ }
+ };
+
// 1. The room create event
db.rooms.build_and_append_pdu(
PduBuilder {
From d5d25fb064449cb42a0243248e6fc2020bf77fe2 Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Sun, 24 Oct 2021 22:13:08 +0000
Subject: [PATCH 006/445] Preserve all m.room.create entries when performing
room upgrades
---
src/client_server/room.rs | 37 ++++++++++++++++++++++++++++---------
1 file changed, 28 insertions(+), 9 deletions(-)
diff --git a/src/client_server/room.rs b/src/client_server/room.rs
index 5e59e81d..0c62d2d6 100644
--- a/src/client_server/room.rs
+++ b/src/client_server/room.rs
@@ -478,7 +478,7 @@ pub async fn get_room_aliases_route(
.into())
}
-/// # `GET /_matrix/client/r0/rooms/{roomId}/upgrade`
+/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
///
/// Upgrades the room.
///
@@ -556,16 +556,15 @@ pub async fn upgrade_room_route(
);
let state_lock = mutex_state.lock().await;
- // Get the old room federations status
- let federate = serde_json::from_str::(
+ // Get the old room creation event
+ let mut create_event_content = serde_json::from_str::(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content
.get(),
)
- .map_err(|_| Error::bad_database("Invalid room event in database."))?
- .federate;
+ .map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
@@ -574,10 +573,30 @@ pub async fn upgrade_room_route(
));
// Send a m.room.create event containing a predecessor field and the applicable room_version
- let mut create_event_content = RoomCreateEventContent::new(sender_user.clone());
- create_event_content.federate = federate;
- create_event_content.room_version = body.new_version.clone();
- create_event_content.predecessor = predecessor;
+ create_event_content.insert(
+ "creator".into(),
+ json!(sender_user.clone()).try_into().unwrap(),
+ );
+ create_event_content.insert(
+ "room_version".into(),
+ json!(body.new_version.clone()).try_into().unwrap(),
+ );
+ create_event_content.insert("predecessor".into(), json!(predecessor).try_into().unwrap());
+
+ // Validate creation event content
+ match Raw::::from_json(
+ to_raw_value(&create_event_content).expect("Error forming creation event"),
+ )
+ .deserialize_as::()
+ {
+ Ok(_t) => {}
+ Err(_e) => {
+ return Err(Error::BadRequest(
+ ErrorKind::BadJson,
+ "Error forming creation event",
+ ))
+ }
+ };
db.rooms.build_and_append_pdu(
PduBuilder {
From 743bdbe96125881418feb8583edb75ca703da4fc Mon Sep 17 00:00:00 2001
From: Andrew Morgan
Date: Tue, 26 Oct 2021 13:30:02 +0000
Subject: [PATCH 007/445] Add 'Federation publicRoom Name/topic keys are
correct' test to sytest whitelist
---
tests/sytest/sytest-whitelist | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist
index eda851ad..5afc3fd9 100644
--- a/tests/sytest/sytest-whitelist
+++ b/tests/sytest/sytest-whitelist
@@ -510,3 +510,4 @@ remote user can join room with version 5
remote user can join room with version 6
setting 'm.room.name' respects room powerlevel
setting 'm.room.power_levels' respects room powerlevel
+Federation publicRoom Name/topic keys are correct
From 86177faae7f812136d02d08fe2f6533eabe28642 Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Sun, 7 Nov 2021 07:57:15 +0000
Subject: [PATCH 008/445] Fix join panic bug
---
src/client_server/membership.rs | 2 +-
src/server_server.rs | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs
index 732f6162..ec685ec9 100644
--- a/src/client_server/membership.rs
+++ b/src/client_server/membership.rs
@@ -934,7 +934,7 @@ pub(crate) async fn invite_helper<'a>(
unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
unsigned.insert(
"prev_sender".to_owned(),
- serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"),
+ to_raw_value(&prev_pdu.sender).expect("UserId is valid"),
);
}
diff --git a/src/server_server.rs b/src/server_server.rs
index 68e262b4..482edf0f 100644
--- a/src/server_server.rs
+++ b/src/server_server.rs
@@ -2721,7 +2721,7 @@ pub fn create_join_event_template_route(
unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
unsigned.insert(
"prev_sender".to_owned(),
- serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"),
+ to_raw_value(&prev_pdu.sender).expect("UserId is valid"),
);
}
From c4bce1d0c7ee0ba9c88fdccb11ac79112c19075b Mon Sep 17 00:00:00 2001
From: Nyaaori <+@nyaaori.cat>
Date: Tue, 9 Nov 2021 16:12:44 +0000
Subject: [PATCH 009/445] Cleanup room.rs; replace unwraps with map_err
---
src/client_server/room.rs | 86 +++++++++++++++++++++++----------------
1 file changed, 50 insertions(+), 36 deletions(-)
diff --git a/src/client_server/room.rs b/src/client_server/room.rs
index 0c62d2d6..47c7ee6f 100644
--- a/src/client_server/room.rs
+++ b/src/client_server/room.rs
@@ -22,7 +22,7 @@ use ruma::{
},
EventType,
},
- serde::{CanonicalJsonObject, JsonObject, Raw},
+ serde::{CanonicalJsonObject, JsonObject},
RoomAliasId, RoomId, RoomVersionId,
};
use serde_json::{json, value::to_raw_value};
@@ -128,42 +128,48 @@ pub async fn create_room_route(
.expect("Invalid creation content");
content.insert(
"creator".into(),
- json!(sender_user.clone()).try_into().unwrap(),
+ json!(&sender_user).try_into().map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
+ })?,
);
content.insert(
"room_version".into(),
- json!(room_version.as_str()).try_into().unwrap(),
+ json!(room_version.as_str()).try_into().map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
+ })?,
);
content
}
None => {
- let mut content = Raw::::from_json(
- to_raw_value(&RoomCreateEventContent::new(sender_user.clone())).unwrap(),
+ let mut content = serde_json::from_str::(
+ to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
+ .get(),
)
- .deserialize_as::()
.unwrap();
content.insert(
"room_version".into(),
- json!(room_version.as_str()).try_into().unwrap(),
+ json!(room_version.as_str()).try_into().map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
+ })?,
);
content
}
};
// Validate creation content
- match Raw::::from_json(
- to_raw_value(&content).expect("Invalid creation content"),
- )
- .deserialize_as::()
- {
- Ok(_t) => {}
- Err(_e) => {
- return Err(Error::BadRequest(
- ErrorKind::BadJson,
- "Invalid creation content",
- ))
- }
- };
+ let de_result = serde_json::from_str::(
+ to_raw_value(&content)
+ .expect("Invalid creation content")
+ .get(),
+ );
+
+ if let Err(_) = de_result {
+ return Err(Error::BadRequest(
+ ErrorKind::BadJson,
+ "Invalid creation content",
+ ));
+ }
// 1. The room create event
db.rooms.build_and_append_pdu(
@@ -575,28 +581,36 @@ pub async fn upgrade_room_route(
// Send a m.room.create event containing a predecessor field and the applicable room_version
create_event_content.insert(
"creator".into(),
- json!(sender_user.clone()).try_into().unwrap(),
+ json!(&sender_user)
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
create_event_content.insert(
"room_version".into(),
- json!(body.new_version.clone()).try_into().unwrap(),
+ json!(&body.new_version)
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
+ );
+ create_event_content.insert(
+ "predecessor".into(),
+ json!(predecessor)
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
- create_event_content.insert("predecessor".into(), json!(predecessor).try_into().unwrap());
// Validate creation event content
- match Raw::::from_json(
- to_raw_value(&create_event_content).expect("Error forming creation event"),
- )
- .deserialize_as::()
- {
- Ok(_t) => {}
- Err(_e) => {
- return Err(Error::BadRequest(
- ErrorKind::BadJson,
- "Error forming creation event",
- ))
- }
- };
+ let de_result = serde_json::from_str::(
+ to_raw_value(&create_event_content)
+ .expect("Error forming creation event")
+ .get(),
+ );
+
+ if let Err(_) = de_result {
+ return Err(Error::BadRequest(
+ ErrorKind::BadJson,
+ "Error forming creation event",
+ ));
+ }
db.rooms.build_and_append_pdu(
PduBuilder {
From 109892b4b754e1666d4f00d9aec6356b46093668 Mon Sep 17 00:00:00 2001
From: Moritz Bitsch
Date: Fri, 1 Oct 2021 15:53:16 +0200
Subject: [PATCH 010/445] Implement turn server settings
this fills out the infos in /_matrix/client/r0/voip/turnServer with
values specified in the server config
---
src/client_server/voip.rs | 14 +++++++-------
src/database.rs | 12 ++++++++++++
src/database/globals.rs | 16 ++++++++++++++++
3 files changed, 35 insertions(+), 7 deletions(-)
diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs
index 2a7f28e1..83f39a48 100644
--- a/src/client_server/voip.rs
+++ b/src/client_server/voip.rs
@@ -1,4 +1,4 @@
-use crate::ConduitResult;
+use crate::{database::DatabaseGuard, ConduitResult};
use ruma::api::client::r0::voip::get_turn_server_info;
use std::time::Duration;
@@ -9,13 +9,13 @@ use rocket::get;
///
/// TODO: Returns information about the recommended turn server.
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))]
-#[tracing::instrument]
-pub async fn turn_server_route() -> ConduitResult {
+#[tracing::instrument(skip(db))]
+pub async fn turn_server_route(db: DatabaseGuard) -> ConduitResult {
Ok(get_turn_server_info::Response {
- username: "".to_owned(),
- password: "".to_owned(),
- uris: Vec::new(),
- ttl: Duration::from_secs(60 * 60 * 24),
+ username: db.globals.turn_username().clone(),
+ password: db.globals.turn_password().clone(),
+ uris: db.globals.turn_uris().to_vec(),
+ ttl: Duration::from_secs(db.globals.turn_ttl()),
}
.into())
}
diff --git a/src/database.rs b/src/database.rs
index 8cf4f640..85213c00 100644
--- a/src/database.rs
+++ b/src/database.rs
@@ -74,6 +74,14 @@ pub struct Config {
trusted_servers: Vec>,
#[serde(default = "default_log")]
pub log: String,
+ #[serde(default)]
+ turn_username: String,
+ #[serde(default)]
+ turn_password: String,
+ #[serde(default = "Vec::new")]
+ turn_uris: Vec,
+ #[serde(default = "default_turn_ttl")]
+ turn_ttl: u64,
#[serde(flatten)]
catchall: BTreeMap,
@@ -131,6 +139,10 @@ fn default_log() -> String {
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
}
+fn default_turn_ttl() -> u64 {
+ 60 * 60 * 24
+}
+
#[cfg(feature = "sled")]
pub type Engine = abstraction::sled::Engine;
diff --git a/src/database/globals.rs b/src/database/globals.rs
index f1cbbd92..7338f1ed 100644
--- a/src/database/globals.rs
+++ b/src/database/globals.rs
@@ -226,6 +226,22 @@ impl Globals {
self.jwt_decoding_key.as_ref()
}
+ pub fn turn_password(&self) -> &String {
+ &self.config.turn_password
+ }
+
+ pub fn turn_ttl(&self) -> u64 {
+ self.config.turn_ttl
+ }
+
+ pub fn turn_uris(&self) -> &[String] {
+ &self.config.turn_uris
+ }
+
+ pub fn turn_username(&self) -> &String {
+ &self.config.turn_username
+ }
+
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
From 9fccbb014a3297961fd169ce12363564e56afbc3 Mon Sep 17 00:00:00 2001
From: Moritz Bitsch
Date: Sat, 2 Oct 2021 00:37:39 +0200
Subject: [PATCH 011/445] Implement TURN server authentication with hmac
This is a prefered method to allow limited access to the TURN server
---
Cargo.lock | 35 +++++++++++++++++++++++++++
Cargo.toml | 3 +++
src/client_server/voip.rs | 51 +++++++++++++++++++++++++++++++++------
src/database.rs | 2 ++
src/database/globals.rs | 4 +++
5 files changed, 88 insertions(+), 7 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 293bcff7..68293896 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -245,6 +245,7 @@ dependencies = [
"crossbeam",
"directories",
"heed",
+ "hmac",
"http",
"image",
"jsonwebtoken",
@@ -266,6 +267,7 @@ dependencies = [
"serde",
"serde_json",
"serde_yaml",
+ "sha-1",
"sled",
"thiserror",
"thread_local",
@@ -428,6 +430,16 @@ dependencies = [
"lazy_static",
]
+[[package]]
+name = "crypto-mac"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
+dependencies = [
+ "generic-array",
+ "subtle",
+]
+
[[package]]
name = "curve25519-dalek"
version = "3.2.0"
@@ -897,6 +909,16 @@ dependencies = [
"libc",
]
+[[package]]
+name = "hmac"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
+dependencies = [
+ "crypto-mac",
+ "digest",
+]
+
[[package]]
name = "hostname"
version = "0.3.1"
@@ -2422,6 +2444,19 @@ dependencies = [
"yaml-rust",
]
+[[package]]
+name = "sha-1"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
+dependencies = [
+ "block-buffer",
+ "cfg-if 1.0.0",
+ "cpufeatures",
+ "digest",
+ "opaque-debug",
+]
+
[[package]]
name = "sha1"
version = "0.6.0"
diff --git a/Cargo.toml b/Cargo.toml
index 13a7af44..fc83d11b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -79,6 +79,9 @@ num_cpus = "1.13.0"
threadpool = "1.8.1"
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
thread_local = "1.1.3"
+# used for TURN server authentication
+hmac = "0.11.0"
+sha-1 = "0.9.8"
[features]
default = ["conduit_bin", "backend_sqlite"]
diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs
index 83f39a48..9c3b20d4 100644
--- a/src/client_server/voip.rs
+++ b/src/client_server/voip.rs
@@ -1,6 +1,11 @@
-use crate::{database::DatabaseGuard, ConduitResult};
+use crate::{database::DatabaseGuard, ConduitResult, Ruma};
+use hmac::{Hmac, Mac, NewMac};
use ruma::api::client::r0::voip::get_turn_server_info;
-use std::time::Duration;
+use ruma::SecondsSinceUnixEpoch;
+use sha1::Sha1;
+use std::time::{Duration, SystemTime};
+
+type HmacSha1 = Hmac;
#[cfg(feature = "conduit_bin")]
use rocket::get;
@@ -8,12 +13,44 @@ use rocket::get;
/// # `GET /_matrix/client/r0/voip/turnServer`
///
/// TODO: Returns information about the recommended turn server.
-#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))]
-#[tracing::instrument(skip(db))]
-pub async fn turn_server_route(db: DatabaseGuard) -> ConduitResult {
+#[cfg_attr(
+ feature = "conduit_bin",
+ get("/_matrix/client/r0/voip/turnServer", data = "")
+)]
+#[tracing::instrument(skip(body, db))]
+pub async fn turn_server_route(
+ body: Ruma,
+ db: DatabaseGuard,
+) -> ConduitResult {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let turn_secret = db.globals.turn_secret();
+
+ let (username, password) = if turn_secret != "" {
+ let expiry = SecondsSinceUnixEpoch::from_system_time(
+ SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
+ )
+ .expect("time is valid");
+
+ let username: String = format!("{}:{}", expiry.get(), sender_user);
+
+ let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
+ .expect("HMAC can take key of any size");
+ mac.update(username.as_bytes());
+
+ let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
+
+ (username, password)
+ } else {
+ (
+ db.globals.turn_username().clone(),
+ db.globals.turn_password().clone(),
+ )
+ };
+
Ok(get_turn_server_info::Response {
- username: db.globals.turn_username().clone(),
- password: db.globals.turn_password().clone(),
+ username: username,
+ password: password,
uris: db.globals.turn_uris().to_vec(),
ttl: Duration::from_secs(db.globals.turn_ttl()),
}
diff --git a/src/database.rs b/src/database.rs
index 85213c00..080e24b3 100644
--- a/src/database.rs
+++ b/src/database.rs
@@ -80,6 +80,8 @@ pub struct Config {
turn_password: String,
#[serde(default = "Vec::new")]
turn_uris: Vec,
+ #[serde(default)]
+ turn_secret: String,
#[serde(default = "default_turn_ttl")]
turn_ttl: u64,
diff --git a/src/database/globals.rs b/src/database/globals.rs
index 7338f1ed..05ecb568 100644
--- a/src/database/globals.rs
+++ b/src/database/globals.rs
@@ -242,6 +242,10 @@ impl Globals {
&self.config.turn_username
}
+ pub fn turn_secret(&self) -> &String {
+ &self.config.turn_secret
+ }
+
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
From 2fff720df38c83673269fa597361c5631e991c9a Mon Sep 17 00:00:00 2001
From: Jonas Zohren
Date: Sun, 21 Nov 2021 17:34:08 +0000
Subject: [PATCH 012/445] CI: New Multiarch builds and Docker images + cargo
clippy/test output now integrated into GitLab
---
.dockerignore | 2 +
.gitlab-ci.yml | 376 +++++++++++-------------
Cargo.lock | 36 +--
Cargo.toml | 3 +-
DEPLOY.md | 52 ++--
Dockerfile | 137 ++++-----
docker/README.md | 105 +++----
docker/ci-binaries-packaging.Dockerfile | 48 +--
docker/healthcheck.sh | 6 +-
9 files changed, 356 insertions(+), 409 deletions(-)
diff --git a/.dockerignore b/.dockerignore
index 80b30721..933b380f 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -14,6 +14,8 @@ docker-compose*
# Git folder
.git
.gitea
+.gitlab
+.github
# Dot files
.env
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 386986fd..6f2e0fe3 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -9,7 +9,6 @@ variables:
FF_USE_FASTZIP: 1
CACHE_COMPRESSION_LEVEL: fastest
-
# --------------------------------------------------------------------- #
# Cargo: Compiling for different architectures #
# --------------------------------------------------------------------- #
@@ -20,7 +19,7 @@ variables:
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"'
- - if: '$CI_COMMIT_TAG'
+ - if: "$CI_COMMIT_TAG"
interruptible: true
image: "rust:latest"
tags: ["docker"]
@@ -28,258 +27,209 @@ variables:
paths:
- cargohome
- target/
- key: "build_cache-$TARGET-release"
+ key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release"
variables:
- CARGO_PROFILE_RELEASE_LTO=true
- CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1
+ CARGO_PROFILE_RELEASE_LTO: "true"
+ CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
before_script:
- 'echo "Building for target $TARGET"'
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
- - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- - 'apt-get update -yqq'
- - 'echo "Installing packages: $NEEDED_PACKAGES"'
- - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES"
+ - "rustc --version && cargo --version && rustup show" # Print version info for debugging
- "rustup target add $TARGET"
script:
- time cargo build --target $TARGET --release
- - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"'
+ - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"'
artifacts:
expire_in: never
-
-build:release:cargo:x86_64-unknown-linux-gnu:
+build:release:cargo:x86_64-unknown-linux-musl-with-debug:
extends: .build-cargo-shared-settings
+ image: messense/rust-musl-cross:x86_64-musl
variables:
- TARGET: "x86_64-unknown-linux-gnu"
+ CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling
+ TARGET: "x86_64-unknown-linux-musl"
+ after_script:
+ - "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug"
artifacts:
- name: "conduit-x86_64-unknown-linux-gnu"
+ name: "conduit-x86_64-unknown-linux-musl-with-debug"
paths:
- - "conduit-x86_64-unknown-linux-gnu"
- expose_as: "Conduit for x86_64-unknown-linux-gnu"
-
-build:release:cargo:armv7-unknown-linux-gnueabihf:
- extends: .build-cargo-shared-settings
- variables:
- TARGET: "armv7-unknown-linux-gnueabihf"
- NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross"
- CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc
- CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc
- CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++
- artifacts:
- name: "conduit-armv7-unknown-linux-gnueabihf"
- paths:
- - "conduit-armv7-unknown-linux-gnueabihf"
- expose_as: "Conduit for armv7-unknown-linux-gnueabihf"
-
-build:release:cargo:aarch64-unknown-linux-gnu:
- extends: .build-cargo-shared-settings
- variables:
- TARGET: "aarch64-unknown-linux-gnu"
- NEEDED_PACKAGES: "build-essential gcc-10-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross"
- CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
- CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc
- CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++
- TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-10"
- TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-10"
- artifacts:
- name: "conduit-aarch64-unknown-linux-gnu"
- paths:
- - "conduit-aarch64-unknown-linux-gnu"
- expose_as: "Conduit for aarch64-unknown-linux-gnu"
+ - "conduit-x86_64-unknown-linux-musl-with-debug"
+ expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug"
build:release:cargo:x86_64-unknown-linux-musl:
extends: .build-cargo-shared-settings
- image: "rust:alpine"
+ image: messense/rust-musl-cross:x86_64-musl
variables:
TARGET: "x86_64-unknown-linux-musl"
- before_script:
- - 'echo "Building for target $TARGET"'
- - 'mkdir -p cargohome && CARGOHOME="cargohome"'
- - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- - "rustup target add $TARGET"
- - "apk add libc-dev"
artifacts:
name: "conduit-x86_64-unknown-linux-musl"
paths:
- "conduit-x86_64-unknown-linux-musl"
expose_as: "Conduit for x86_64-unknown-linux-musl"
+build:release:cargo:arm-unknown-linux-musleabihf:
+ extends: .build-cargo-shared-settings
+ image: messense/rust-musl-cross:arm-musleabihf
+ variables:
+ TARGET: "arm-unknown-linux-musleabihf"
+ artifacts:
+ name: "conduit-arm-unknown-linux-musleabihf"
+ paths:
+ - "conduit-arm-unknown-linux-musleabihf"
+ expose_as: "Conduit for arm-unknown-linux-musleabihf"
+build:release:cargo:armv7-unknown-linux-musleabihf:
+ extends: .build-cargo-shared-settings
+ image: messense/rust-musl-cross:armv7-musleabihf
+ variables:
+ TARGET: "armv7-unknown-linux-musleabihf"
+ artifacts:
+ name: "conduit-armv7-unknown-linux-musleabihf"
+ paths:
+ - "conduit-armv7-unknown-linux-musleabihf"
+ expose_as: "Conduit for armv7-unknown-linux-musleabihf"
+
+build:release:cargo:aarch64-unknown-linux-musl:
+ extends: .build-cargo-shared-settings
+ image: messense/rust-musl-cross:aarch64-musl
+ variables:
+ TARGET: "aarch64-unknown-linux-musl"
+ artifacts:
+ name: "conduit-aarch64-unknown-linux-musl"
+ paths:
+ - "conduit-aarch64-unknown-linux-musl"
+ expose_as: "Conduit for aarch64-unknown-linux-musl"
.cargo-debug-shared-settings:
extends: ".build-cargo-shared-settings"
rules:
- - if: '$CI_COMMIT_BRANCH'
- - if: '$CI_COMMIT_TAG'
+ - if: '$CI_COMMIT_BRANCH != "master"'
cache:
- key: "build_cache-$TARGET-debug"
+ key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug"
script:
- "time cargo build --target $TARGET"
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
artifacts:
expire_in: 4 weeks
-build:debug:cargo:x86_64-unknown-linux-gnu:
- extends: ".cargo-debug-shared-settings"
- variables:
- TARGET: "x86_64-unknown-linux-gnu"
- artifacts:
- name: "conduit-debug-x86_64-unknown-linux-gnu"
- paths:
- - "conduit-debug-x86_64-unknown-linux-gnu"
- expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu"
-
build:debug:cargo:x86_64-unknown-linux-musl:
extends: ".cargo-debug-shared-settings"
- image: "rust:alpine"
+ image: messense/rust-musl-cross:x86_64-musl
variables:
TARGET: "x86_64-unknown-linux-musl"
- before_script:
- - 'echo "Building for target $TARGET"'
- - 'mkdir -p cargohome && CARGOHOME="cargohome"'
- - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- - "rustup target add $TARGET"
- - "apk add libc-dev"
artifacts:
name: "conduit-debug-x86_64-unknown-linux-musl"
paths:
- "conduit-debug-x86_64-unknown-linux-musl"
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
-
-
-# --------------------------------------------------------------------- #
-# Cargo: Compiling deb packages for different architectures #
-# --------------------------------------------------------------------- #
-
-
-.build-cargo-deb-shared-settings:
- stage: "build"
- needs: [ ]
- rules:
- - if: '$CI_COMMIT_BRANCH == "master"'
- - if: '$CI_COMMIT_BRANCH == "next"'
- - if: '$CI_COMMIT_TAG'
- interruptible: true
- image: "rust:latest"
- tags: ["docker"]
- cache:
- paths:
- - cargohome
- - target/
- key: "build_cache-deb-$TARGET"
- before_script:
- - 'echo "Building debian package for target $TARGET"'
- - 'mkdir -p cargohome && CARGOHOME="cargohome"'
- - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- - 'apt-get update -yqq'
- - 'echo "Installing packages: $NEEDED_PACKAGES"'
- - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES"
- - "rustup target add $TARGET"
- - "cargo install cargo-deb"
- script:
- - time cargo deb --target $TARGET
- - 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"'
-
-build:cargo-deb:x86_64-unknown-linux-gnu:
- extends: .build-cargo-deb-shared-settings
- variables:
- TARGET: "x86_64-unknown-linux-gnu"
- NEEDED_PACKAGES: ""
- artifacts:
- name: "conduit-x86_64-unknown-linux-gnu.deb"
- paths:
- - "conduit-x86_64-unknown-linux-gnu.deb"
- expose_as: "Debian Package x86_64"
-
-
# --------------------------------------------------------------------- #
# Create and publish docker image #
# --------------------------------------------------------------------- #
-# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image
.docker-shared-settings:
stage: "build docker image"
- needs: []
- interruptible: true
- image:
- name: "gcr.io/kaniko-project/executor:debug"
- entrypoint: [""]
+ image: jdrouet/docker-with-buildx:stable
tags: ["docker"]
+ services:
+ - docker:dind
+ needs:
+ - "build:release:cargo:x86_64-unknown-linux-musl"
+ - "build:release:cargo:arm-unknown-linux-musleabihf"
+ - "build:release:cargo:armv7-unknown-linux-musleabihf"
+ - "build:release:cargo:aarch64-unknown-linux-musl"
variables:
- # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache
- KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache"
+ DOCKER_HOST: tcp://docker:2375/
+ DOCKER_TLS_CERTDIR: ""
+ DOCKER_DRIVER: overlay2
+ PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64"
+ DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
before_script:
- - "mkdir -p /kaniko/.docker"
- - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json'
-
+ - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+ # Only log in to Dockerhub if the credentials are given:
+ - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
+ script:
+ # Prepare buildx to build multiarch stuff:
+ - docker context create 'ci-context'
+ - docker buildx create --name 'multiarch-builder' --use 'ci-context'
+ # Copy binaries to their docker arch path
+ - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
+ - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
+ - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
+ - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8
+ # Actually create multiarch image:
+ - >
+ docker buildx build
+ --pull
+ --push
+ --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
+ --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
+ --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
+ --platform "$PLATFORMS"
+ --tag "$GL_IMAGE_TAG"
+ --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"
+ --file "$DOCKER_FILE" .
+ # Only try to push to docker hub, if auth data for dockerhub exists:
+ - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi
+ - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi
build:docker:next:
extends: .docker-shared-settings
- needs:
- - "build:release:cargo:x86_64-unknown-linux-musl"
- script:
- - >
- /kaniko/executor
- $KANIKO_CACHE_ARGS
- --force
- --context $CI_PROJECT_DIR
- --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
- --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
- --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
- --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
- --destination "$CI_REGISTRY_IMAGE/conduit:next"
- --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine"
- --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA"
rules:
- if: '$CI_COMMIT_BRANCH == "next"'
-
+ variables:
+ GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
+ DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
build:docker:master:
extends: .docker-shared-settings
- needs:
- - "build:release:cargo:x86_64-unknown-linux-musl"
- script:
- - >
- /kaniko/executor
- $KANIKO_CACHE_ARGS
- --context $CI_PROJECT_DIR
- --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
- --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
- --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
- --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
- --destination "$CI_REGISTRY_IMAGE/conduit:latest"
- --destination "$CI_REGISTRY_IMAGE/conduit:latest-alpine"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest-alpine"
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
+ variables:
+ GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
+ DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
-
-build:docker:tags:
- extends: .docker-shared-settings
- needs:
- - "build:release:cargo:x86_64-unknown-linux-musl"
- script:
- - >
- /kaniko/executor
- $KANIKO_CACHE_ARGS
- --context $CI_PROJECT_DIR
- --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
- --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
- --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
- --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
- --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG"
- --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG-alpine"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG"
- --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG-alpine"
- rules:
- - if: '$CI_COMMIT_TAG'
-
-
+## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image
+#.docker-shared-settings:
+# stage: "build docker image"
+# needs: []
+# interruptible: true
+# image:
+# name: "gcr.io/kaniko-project/executor:debug"
+# entrypoint: [""]
+# tags: ["docker"]
+# variables:
+# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache
+# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache"
+# before_script:
+# - "mkdir -p /kaniko/.docker"
+# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json'
+#
+#
+#build:docker:next:
+# extends: .docker-shared-settings
+# needs:
+# - "build:release:cargo:x86_64-unknown-linux-musl"
+# script:
+# - >
+# /kaniko/executor
+# $KANIKO_CACHE_ARGS
+# --force
+# --context $CI_PROJECT_DIR
+# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
+# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
+# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
+# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
+# --destination "$CI_REGISTRY_IMAGE/conduit:next"
+# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine"
+# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA"
+# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
+# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine"
+# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA"
+# rules:
+# - if: '$CI_COMMIT_BRANCH == "next"'
+#
+#
# --------------------------------------------------------------------- #
# Run tests #
@@ -287,9 +237,9 @@ build:docker:tags:
test:cargo:
stage: "test"
- needs: [ ]
+ needs: []
image: "rust:latest"
- tags: [ "docker" ]
+ tags: ["docker"]
variables:
CARGO_HOME: "cargohome"
cache:
@@ -301,13 +251,20 @@ test:cargo:
before_script:
- mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
- apt-get update -yqq
- - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config
+ - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget
- rustup component add clippy rustfmt
+ - wget "https://faulty-storage.de/gitlab-report"
+ - chmod +x ./gitlab-report
script:
- - rustc --version && cargo --version # Print version info for debugging
+ - rustc --version && cargo --version # Print version info for debugging
- cargo fmt --all -- --check
- - cargo test --workspace --verbose --locked
- - cargo clippy
+ - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
+ - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
+ artifacts:
+ when: always
+ reports:
+ junit: report.xml
+ codequality: gl-code-quality-report.json
test:sytest:
stage: "test"
@@ -316,8 +273,8 @@ test:sytest:
- "build:debug:cargo:x86_64-unknown-linux-musl"
image:
name: "valkum/sytest-conduit:latest"
- entrypoint: [ "" ]
- tags: [ "docker" ]
+ entrypoint: [""]
+ tags: ["docker"]
variables:
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
before_script:
@@ -330,7 +287,7 @@ test:sytest:
script:
- "SYTEST_EXIT_CODE=0"
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
- - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap"
+ - 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap'
- "exit $SYTEST_EXIT_CODE"
artifacts:
when: always
@@ -340,7 +297,6 @@ test:sytest:
reports:
junit: "$CI_PROJECT_DIR/sytest.xml"
-
# --------------------------------------------------------------------- #
# Store binaries as package so they have download urls #
# --------------------------------------------------------------------- #
@@ -348,25 +304,31 @@ test:sytest:
publish:package:
stage: "upload artifacts"
needs:
- - "build:release:cargo:x86_64-unknown-linux-gnu"
- - "build:release:cargo:armv7-unknown-linux-gnueabihf"
- - "build:release:cargo:aarch64-unknown-linux-gnu"
- "build:release:cargo:x86_64-unknown-linux-musl"
- - "build:cargo-deb:x86_64-unknown-linux-gnu"
+ - "build:release:cargo:arm-unknown-linux-musleabihf"
+ - "build:release:cargo:armv7-unknown-linux-musleabihf"
+ - "build:release:cargo:aarch64-unknown-linux-musl"
+ # - "build:cargo-deb:x86_64-unknown-linux-gnu"
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"'
- - if: '$CI_COMMIT_TAG'
+ - if: "$CI_COMMIT_TAG"
image: curlimages/curl:latest
tags: ["docker"]
variables:
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
script:
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
- - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
- - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"'
- - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
- - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
-
+ - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"'
+ - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
+ - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
+# Avoid duplicate pipelines
+# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
+workflow:
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
+ when: never
+ - if: "$CI_COMMIT_BRANCH"
diff --git a/Cargo.lock b/Cargo.lock
index 293bcff7..166d67fc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1968,7 +1968,7 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.4.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"assign",
"js_int",
@@ -1989,7 +1989,7 @@ dependencies = [
[[package]]
name = "ruma-api"
version = "0.18.5"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"bytes",
"http",
@@ -2005,7 +2005,7 @@ dependencies = [
[[package]]
name = "ruma-api-macros"
version = "0.18.5"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@@ -2016,7 +2016,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.4.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"ruma-api",
"ruma-common",
@@ -2030,7 +2030,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.12.3"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"assign",
"bytes",
@@ -2050,7 +2050,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.6.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"indexmap",
"js_int",
@@ -2065,7 +2065,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.24.6"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"indoc",
"js_int",
@@ -2081,7 +2081,7 @@ dependencies = [
[[package]]
name = "ruma-events-macros"
version = "0.24.6"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@@ -2092,7 +2092,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.3.1"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"js_int",
"ruma-api",
@@ -2107,7 +2107,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers"
version = "0.20.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"paste",
"percent-encoding",
@@ -2122,7 +2122,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-macros"
version = "0.20.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"quote",
"ruma-identifiers-validation",
@@ -2132,7 +2132,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"thiserror",
]
@@ -2140,7 +2140,7 @@ dependencies = [
[[package]]
name = "ruma-identity-service-api"
version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"js_int",
"ruma-api",
@@ -2153,7 +2153,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"js_int",
"ruma-api",
@@ -2168,7 +2168,7 @@ dependencies = [
[[package]]
name = "ruma-serde"
version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"bytes",
"form_urlencoded",
@@ -2182,7 +2182,7 @@ dependencies = [
[[package]]
name = "ruma-serde-macros"
version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@@ -2193,7 +2193,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.9.0"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"base64 0.13.0",
"ed25519-dalek",
@@ -2210,7 +2210,7 @@ dependencies = [
[[package]]
name = "ruma-state-res"
version = "0.4.1"
-source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197"
+source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [
"itertools 0.10.1",
"js_int",
diff --git a/Cargo.toml b/Cargo.toml
index 13a7af44..d0dd6413 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -120,13 +120,12 @@ maintainer-scripts = "debian/"
systemd-units = { unit-name = "matrix-conduit" }
[profile.dev]
-lto = 'thin'
+lto = 'off'
incremental = true
[profile.release]
lto = 'thin'
incremental = true
-
codegen-units=32
# If you want to make flamegraphs, enable debug info:
# debug = true
diff --git a/DEPLOY.md b/DEPLOY.md
index 84dd2beb..6470c902 100644
--- a/DEPLOY.md
+++ b/DEPLOY.md
@@ -2,25 +2,30 @@
## Getting help
-If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
+If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us
+in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
## Installing Conduit
+Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
+only offer Linux binaries.
+
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
-| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) |
-| -------------------- | ------------------------------------- | ----------------------- |
-| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] |
-| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - |
-| armv8 / aarch64 | [Download][armv8-gnu] | - |
-
-[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu
+| CPU Architecture | Download link |
+| ------------------------------------------- | ----------------------- |
+| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] |
+| armv6 | [Download][armv6-musl] |
+| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] |
+| armv8 / aarch64 | [Download][armv8-musl] |
[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
-[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf
+[armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
-[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu
+[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
+
+[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
```bash
$ sudo wget -O /usr/local/bin/matrix-conduit
@@ -32,15 +37,15 @@ Alternatively, you may compile the binary yourself using
```bash
$ cargo build --release
```
+
Note that this currently requires Rust 1.50.
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md).
-
## Adding a Conduit user
-While Conduit can run as any user it is usually better to use dedicated users for different services.
-This also allows you to make sure that the file permissions are correctly set up.
+While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
+you to make sure that the file permissions are correctly set up.
In Debian you can use this command to create a Conduit user:
@@ -50,9 +55,8 @@ sudo adduser --system conduit --no-create-home
## Setting up a systemd service
-Now we'll set up a systemd service for Conduit, so it's easy to start/stop
-Conduit and set it to autostart when your server reboots. Simply paste the
-default systemd service you can find below into
+Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
+server reboots. Simply paste the default systemd service you can find below into
`/etc/systemd/system/conduit.service`.
```systemd
@@ -77,10 +81,10 @@ Finally, run
$ sudo systemctl daemon-reload
```
-
## Creating the Conduit configuration file
-Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.**
+Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
+to read it. You need to change at least the server name.**
```toml
[global]
@@ -128,8 +132,8 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re
## Setting the correct file permissions
-As we are using a Conduit specific user we need to allow it to read the config.
-To do that you can run this command on Debian:
+As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
+Debian:
```bash
sudo chown -R conduit:nogroup /etc/matrix-conduit
@@ -142,7 +146,6 @@ sudo mkdir -p /var/lib/matrix-conduit/conduit_db
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db
```
-
## Setting up the Reverse Proxy
This depends on whether you use Apache, Nginx or another web server.
@@ -171,11 +174,9 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
$ sudo systemctl reload apache2
```
-
### Nginx
-If you use Nginx and not Apache, add the following server section inside the
-http section of `/etc/nginx/nginx.conf`
+If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
```nginx
server {
@@ -198,13 +199,13 @@ server {
include /etc/letsencrypt/options-ssl-nginx.conf;
}
```
+
**You need to make some edits again.** When you are done, run
```bash
$ sudo systemctl reload nginx
```
-
## SSL Certificate
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
@@ -213,7 +214,6 @@ The easiest way to get an SSL certificate, if you don't have one already, is to
$ sudo certbot -d your.server.name
```
-
## You're done!
Now you can start Conduit with:
diff --git a/Dockerfile b/Dockerfile
index f4b176f5..d137353a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,75 +1,66 @@
-# Using multistage build:
-# https://docs.docker.com/develop/develop-images/multistage-build/
-# https://whitfin.io/speeding-up-rust-docker-builds/
+# syntax=docker/dockerfile:1
+FROM docker.io/rust:1.53-alpine AS builder
+WORKDIR /usr/src/conduit
+
+# Install required packages to build Conduit and it's dependencies
+RUN apk add musl-dev
+
+# == Build dependencies without our own code separately for caching ==
+#
+# Need a fake main.rs since Cargo refuses to build anything otherwise.
+#
+# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
+# request that would allow just dependencies to be compiled, presumably
+# regardless of whether source files are available.
+RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
+COPY Cargo.toml Cargo.lock ./
+RUN cargo build --release && rm -r src
+
+# Copy over actual Conduit sources
+COPY src src
+
+# main.rs and lib.rs need their timestamp updated for this to work correctly since
+# otherwise the build with the fake main.rs from above is newer than the
+# source files (COPY preserves timestamps).
+#
+# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
+RUN touch src/main.rs && touch src/lib.rs && cargo build --release
-########################## BUILD IMAGE ##########################
-# Alpine build image to build Conduit's statically compiled binary
-FROM alpine:3.14 as builder
-# Install packages needed for building all crates
-RUN apk add --no-cache \
- cargo \
- openssl-dev
-# Specifies if the local project is build or if Conduit gets build
-# from the official git repository. Defaults to the git repo.
-ARG LOCAL=false
-# Specifies which revision/commit is build. Defaults to HEAD
-ARG GIT_REF=origin/master
+# ---------------------------------------------------------------------------------------------------------------
+# Stuff below this line actually ends up in the resulting docker image
+# ---------------------------------------------------------------------------------------------------------------
+FROM docker.io/alpine:3.14 AS runner
-# Copy project files from current folder
-COPY . .
-# Build it from the copied local files or from the official git repository
-RUN if [[ $LOCAL == "true" ]]; then \
- mv ./docker/healthcheck.sh . ; \
- echo "Building from local source..." ; \
- cargo install --path . ; \
- else \
- echo "Building revision '${GIT_REF}' from online source..." ; \
- cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \
- echo "Loadings healthcheck script from online source..." ; \
- wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \
- fi
-
-########################## RUNTIME IMAGE ##########################
-# Create new stage with a minimal image for the actual
-# runtime image/container
-FROM alpine:3.14
-
-ARG CREATED
-ARG VERSION
-ARG GIT_REF=origin/master
-
-ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
-
-# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
-# including a custom label specifying the build command
-LABEL org.opencontainers.image.created=${CREATED} \
- org.opencontainers.image.authors="Conduit Contributors" \
- org.opencontainers.image.title="Conduit" \
- org.opencontainers.image.version=${VERSION} \
- org.opencontainers.image.vendor="Conduit Contributors" \
- org.opencontainers.image.description="A Matrix homeserver written in Rust" \
- org.opencontainers.image.url="https://conduit.rs/" \
- org.opencontainers.image.revision=${GIT_REF} \
- org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
- org.opencontainers.image.licenses="Apache-2.0" \
- org.opencontainers.image.documentation="" \
- org.opencontainers.image.ref.name="" \
- org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
- maintainer="Weasy666"
-
-# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
+# Standard port on which Conduit launches.
+# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
-# Copy config files from context and the binary from
-# the "builder" stage to the current stage into folder
-# /srv/conduit and create data folder for database
-RUN mkdir -p /srv/conduit/.local/share/conduit
-COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/
-COPY --from=builder ./healthcheck.sh /srv/conduit/
+# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs.
+ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
+# Conduit needs:
+# ca-certificates: for https
+# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
+RUN apk add --no-cache \
+ ca-certificates \
+ curl \
+ libgcc
+
+
+# Created directory for the database and media files
+RUN mkdir -p /srv/conduit/.local/share/conduit
+
+# Test if Conduit is still alive, uses the same endpoint as Element
+COPY ./docker/healthcheck.sh /srv/conduit/
+HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
+
+# Copy over the actual Conduit binary from the builder stage
+COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/
+
+# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
RUN set -x ; \
@@ -79,19 +70,13 @@ RUN set -x ; \
# Change ownership of Conduit files to www-data user and group
RUN chown -cR www-data:www-data /srv/conduit
+RUN chmod +x /srv/conduit/healthcheck.sh
-# Install packages needed to run Conduit
-RUN apk add --no-cache \
- ca-certificates \
- curl \
- libgcc
-
-# Test if Conduit is still alive, uses the same endpoint as Element
-HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh
-
-# Set user to www-data
+# Change user to www-data
USER www-data
# Set container home directory
WORKDIR /srv/conduit
-# Run Conduit
-ENTRYPOINT [ "/srv/conduit/conduit" ]
+
+# Run Conduit and print backtraces on panics
+ENV RUST_BACKTRACE=1
+ENTRYPOINT [ "/srv/conduit/conduit" ]
\ No newline at end of file
diff --git a/docker/README.md b/docker/README.md
index 0e834820..19d9dca6 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -2,53 +2,41 @@
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
-
## Docker
### Build & Dockerfile
The Dockerfile provided by Conduit has two stages, each of which creates an image.
+
1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
-2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
-
-The Dockerfile includes a few build arguments that should be supplied when building it.
-
-``` Dockerfile
-ARG LOCAL=false
-ARG CREATED
-ARG VERSION
-ARG GIT_REF=origin/master
-```
-
-- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')`
-- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)`
-- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`.
-- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`.
+2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
To build the image you can use the following command
-``` bash
-docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
+```bash
+docker build --tag matrixconduit/matrix-conduit:latest .
```
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
-**Note:** it ommits the two optional `build-arg`s.
-
### Run
After building the image you can simply run it with
-``` bash
+```bash
docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
```
or you can skip the build step and pull the image from one of the following registries:
-| Registry | Image | Size |
-| --------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- |
-| Docker Hub | [matrixconduit/matrix-conduit:latest](https://hub.docker.com/r/matrixconduit/matrix-conduit) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) |
-| GitLab Registry | [registry.gitlab.com/famedly/conduit/conduit:latest](https://gitlab.com/famedly/conduit/container_registry/2134341) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) |
+| Registry | Image | Size |
+| --------------- | --------------------------------------------------------------- | --------------------- |
+| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] |
+| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] |
+
+[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
+[gl]: https://gitlab.com/famedly/conduit/container_registry/
+[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
@@ -56,29 +44,26 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
-
## Docker-compose
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying
Conduit can be found [here](../DEPLOY.md).
-
### Build
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
-``` bash
-CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up
+```bash
+docker-compose up
```
-This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section.
-
+This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
### Run
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
-``` bash
+```bash
docker-compose up -d
```
@@ -101,32 +86,36 @@ So...step by step:
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
5. Create the files needed by the `well-known` service.
- - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
- ```nginx
- server {
- server_name .;
- listen 80 default_server;
- location /.well-known/matrix/ {
- root /var/www;
- default_type application/json;
- add_header Access-Control-Allow-Origin *;
- }
- }
- ```
- - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping)
- ```json
- {
- "m.homeserver": {
- "base_url": "https://."
- }
- }
- ```
- - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping)
- ```json
- {
- "m.server": ".:443"
- }
- ```
+ - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
+
+ ```nginx
+ server {
+ server_name .;
+ listen 80 default_server;
+
+ location /.well-known/matrix/ {
+ root /var/www;
+ default_type application/json;
+ add_header Access-Control-Allow-Origin *;
+ }
+ }
+ ```
+
+ - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping)
+ ```json
+ {
+ "m.homeserver": {
+ "base_url": "https://."
+ }
+ }
+ ```
+ - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping)
+ ```json
+ {
+ "m.server": ".:443"
+ }
+ ```
+
6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin.
diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile
index fb674396..b51df7c1 100644
--- a/docker/ci-binaries-packaging.Dockerfile
+++ b/docker/ci-binaries-packaging.Dockerfile
@@ -1,3 +1,4 @@
+# syntax=docker/dockerfile:1
# ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
# It does not build Conduit in Docker, but just copies the matching build artifact from the build job.
@@ -7,20 +8,26 @@
# Credit's for the original Dockerfile: Weasy666.
# ---------------------------------------------------------------------------------------------------------
-FROM alpine:3.14
+FROM docker.io/alpine:3.14 AS runner
-# Install packages needed to run Conduit
+# Standard port on which Conduit launches.
+# You still need to map the port when using the docker command or docker-compose.
+EXPOSE 6167
+
+# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs.
+ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
+
+# Conduit needs:
+# ca-certificates: for https
+# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \
ca-certificates \
- curl \
libgcc
+
ARG CREATED
ARG VERSION
ARG GIT_REF
-
-ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
-
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
# including a custom label specifying the build command
LABEL org.opencontainers.image.created=${CREATED} \
@@ -33,19 +40,24 @@ LABEL org.opencontainers.image.created=${CREATED} \
org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
org.opencontainers.image.licenses="Apache-2.0" \
- org.opencontainers.image.documentation="" \
+ org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
org.opencontainers.image.ref.name=""
-# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
-EXPOSE 6167
-
-# create data folder for database
+# Created directory for the database and media files
RUN mkdir -p /srv/conduit/.local/share/conduit
-# Copy the Conduit binary into the image at the latest possible moment to maximise caching:
-COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit
+# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/
+HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
+
+# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
+# copy the matching binary into this docker image
+ARG TARGETPLATFORM
+COPY ./$TARGETPLATFORM /srv/conduit/conduit
+
+
+# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
RUN set -x ; \
@@ -57,13 +69,11 @@ RUN set -x ; \
RUN chown -cR www-data:www-data /srv/conduit
RUN chmod +x /srv/conduit/healthcheck.sh
-
-# Test if Conduit is still alive, uses the same endpoint as Element
-HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh
-
-# Set user to www-data
+# Change user to www-data
USER www-data
# Set container home directory
WORKDIR /srv/conduit
-# Run Conduit
+
+# Run Conduit and print backtraces on panics
+ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/srv/conduit/conduit" ]
diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh
index 568838ec..7ca04602 100644
--- a/docker/healthcheck.sh
+++ b/docker/healthcheck.sh
@@ -7,7 +7,7 @@ fi
# The actual health check.
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
-# TODO: Change this to a single curl call. Do we have a config value that we can check for that?
-curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
- curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
+# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
+wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
+ wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
exit 1
From 9bfc7b34b6d72def7da19ccd1decbe1ac2c7e6db Mon Sep 17 00:00:00 2001
From: Jonas Zohren
Date: Thu, 25 Nov 2021 22:36:44 +0000
Subject: [PATCH 013/445] Fixes for !225
---
.gitlab-ci.yml | 87 +++++++++----------------
DEPLOY.md | 23 +++----
Dockerfile | 4 +-
docker/ci-binaries-packaging.Dockerfile | 2 +-
4 files changed, 44 insertions(+), 72 deletions(-)
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6f2e0fe3..a8d43842 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -142,8 +142,12 @@ build:debug:cargo:x86_64-unknown-linux-musl:
DOCKER_HOST: tcp://docker:2375/
DOCKER_TLS_CERTDIR: ""
DOCKER_DRIVER: overlay2
- PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64"
+ PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64"
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
+ cache:
+ paths:
+ - docker_cache
+ key: "$CI_JOB_NAME"
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# Only log in to Dockerhub if the credentials are given:
@@ -156,80 +160,51 @@ build:debug:cargo:x86_64-unknown-linux-musl:
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
- - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8
- # Actually create multiarch image:
+ - mv ./conduit-aarch64-unknown-linux-musl linux/arm64
+ - 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"'
+ # Build and push image:
- >
docker buildx build
--pull
--push
- --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
+ --cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache
+ --cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
+ --build-arg CREATED=$CREATED
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
--platform "$PLATFORMS"
- --tag "$GL_IMAGE_TAG"
- --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"
+ --tag "$TAG"
+ --tag "$TAG-alpine"
+ --tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
--file "$DOCKER_FILE" .
- # Only try to push to docker hub, if auth data for dockerhub exists:
- - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi
- - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi
-build:docker:next:
+docker:next:gitlab:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "next"'
variables:
- GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
- DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
+ TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
-build:docker:master:
+docker:next:dockerhub:
+ extends: .docker-shared-settings
+ rules:
+ - if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
+ variables:
+ TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
+
+docker:master:gitlab:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
variables:
- GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
- DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
+ TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
-## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image
-#.docker-shared-settings:
-# stage: "build docker image"
-# needs: []
-# interruptible: true
-# image:
-# name: "gcr.io/kaniko-project/executor:debug"
-# entrypoint: [""]
-# tags: ["docker"]
-# variables:
-# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache
-# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache"
-# before_script:
-# - "mkdir -p /kaniko/.docker"
-# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json'
-#
-#
-#build:docker:next:
-# extends: .docker-shared-settings
-# needs:
-# - "build:release:cargo:x86_64-unknown-linux-musl"
-# script:
-# - >
-# /kaniko/executor
-# $KANIKO_CACHE_ARGS
-# --force
-# --context $CI_PROJECT_DIR
-# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
-# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
-# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
-# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
-# --destination "$CI_REGISTRY_IMAGE/conduit:next"
-# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine"
-# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA"
-# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
-# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine"
-# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA"
-# rules:
-# - if: '$CI_COMMIT_BRANCH == "next"'
-#
-#
+docker:master:dockerhub:
+ extends: .docker-shared-settings
+ rules:
+ - if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB'
+ variables:
+ TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
# --------------------------------------------------------------------- #
# Run tests #
diff --git a/DEPLOY.md b/DEPLOY.md
index 6470c902..0058b93d 100644
--- a/DEPLOY.md
+++ b/DEPLOY.md
@@ -12,20 +12,17 @@ only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
-| CPU Architecture | Download link |
-| ------------------------------------------- | ----------------------- |
-| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] |
-| armv6 | [Download][armv6-musl] |
-| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] |
-| armv8 / aarch64 | [Download][armv8-musl] |
+| CPU Architecture | Download stable version |
+| ------------------------------------------- | ------------------------------ |
+| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] |
+| armv6 | [Download][armv6-musl-master] |
+| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] |
+| armv8 / aarch64 | [Download][armv8-musl-master] |
-[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
-
-[armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
-
-[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
-
-[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
+[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
+[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
+[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
+[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
```bash
$ sudo wget -O /usr/local/bin/matrix-conduit
diff --git a/Dockerfile b/Dockerfile
index d137353a..6a9ea732 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -54,11 +54,11 @@ RUN apk add --no-cache \
RUN mkdir -p /srv/conduit/.local/share/conduit
# Test if Conduit is still alive, uses the same endpoint as Element
-COPY ./docker/healthcheck.sh /srv/conduit/
+COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Copy over the actual Conduit binary from the builder stage
-COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/
+COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine
diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile
index b51df7c1..4ab874dd 100644
--- a/docker/ci-binaries-packaging.Dockerfile
+++ b/docker/ci-binaries-packaging.Dockerfile
@@ -47,7 +47,7 @@ LABEL org.opencontainers.image.created=${CREATED} \
RUN mkdir -p /srv/conduit/.local/share/conduit
# Test if Conduit is still alive, uses the same endpoint as Element
-COPY ./docker/healthcheck.sh /srv/conduit/
+COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
From f91216dd3ce5f842c1c441d0bae5a852e689bccf Mon Sep 17 00:00:00 2001
From: Jonas Zohren
Date: Tue, 14 Dec 2021 11:16:02 +0100
Subject: [PATCH 014/445] CI: Optionally use sccache for compilation
This moves compiler caching for incremental builds away from GitLab
caching the whole target/ folder to caching each code unit in S3.
This aleviates the need to zip and unzip and just caches on the fly.
This feature is optional and gated behind the SCCACHE_BIN_URL env
---
.gitlab-ci.yml | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index a8d43842..664b5ea3 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -26,16 +26,19 @@ variables:
cache:
paths:
- cargohome
- - target/
- key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release"
+ key: "build_cache--$TARGET--$CI_COMMIT_BRANCH"
variables:
CARGO_PROFILE_RELEASE_LTO: "true"
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
+ CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
+ CARGO_HOME: $CI_PROJECT_DIR/cargohome
before_script:
- 'echo "Building for target $TARGET"'
- - 'mkdir -p cargohome && CARGOHOME="cargohome"'
+ - "mkdir -p $CARGO_HOME"
- "rustc --version && cargo --version && rustup show" # Print version info for debugging
- "rustup target add $TARGET"
+ # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
+ - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi
script:
- time cargo build --target $TARGET --release
- 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"'
@@ -216,20 +219,20 @@ test:cargo:
image: "rust:latest"
tags: ["docker"]
variables:
- CARGO_HOME: "cargohome"
+ CARGO_HOME: "$CI_PROJECT_DIR/cargohome"
+ CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
cache:
paths:
- - target
- cargohome
- key: test_cache
+ key: "test_cache--$CI_COMMIT_BRANCH"
interruptible: true
before_script:
- - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
+ - mkdir -p $CARGO_HOME
- apt-get update -yqq
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget
- rustup component add clippy rustfmt
- - wget "https://faulty-storage.de/gitlab-report"
- - chmod +x ./gitlab-report
+ # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
+ - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi
script:
- rustc --version && cargo --version # Print version info for debugging
- cargo fmt --all -- --check
From adb518fa0df35ba85c2ff1c96a539dda085f8991 Mon Sep 17 00:00:00 2001
From: Jonas Zohren
Date: Tue, 14 Dec 2021 11:16:40 +0100
Subject: [PATCH 015/445] CI: Use curl instead of wget
The rust docker image already comes with curl, no need to install wget.
---
.gitlab-ci.yml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 664b5ea3..1dedd8ff 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -229,8 +229,9 @@ test:cargo:
before_script:
- mkdir -p $CARGO_HOME
- apt-get update -yqq
- - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget
+ - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config
- rustup component add clippy rustfmt
+ - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
- if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi
script:
From 339a26f56c84da242d753a1894589f5923b0fd7e Mon Sep 17 00:00:00 2001
From: Jonas Zohren
Date: Wed, 15 Dec 2021 10:14:20 +0000
Subject: [PATCH 016/445] Update docker images
---
Dockerfile | 7 +++---
docker/ci-binaries-packaging.Dockerfile | 31 ++++++++++++-------------
2 files changed, 18 insertions(+), 20 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 6a9ea732..5812fdf9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -32,7 +32,7 @@ RUN touch src/main.rs && touch src/lib.rs && cargo build --release
# ---------------------------------------------------------------------------------------------------------------
# Stuff below this line actually ends up in the resulting docker image
# ---------------------------------------------------------------------------------------------------------------
-FROM docker.io/alpine:3.14 AS runner
+FROM docker.io/alpine:3.15.0 AS runner
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
@@ -45,9 +45,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# ca-certificates: for https
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \
- ca-certificates \
- curl \
- libgcc
+ ca-certificates \
+ libgcc
# Created directory for the database and media files
diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile
index 4ab874dd..f4603105 100644
--- a/docker/ci-binaries-packaging.Dockerfile
+++ b/docker/ci-binaries-packaging.Dockerfile
@@ -1,14 +1,13 @@
# syntax=docker/dockerfile:1
# ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
-# It does not build Conduit in Docker, but just copies the matching build artifact from the build job.
-# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary.
+# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
#
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
# Credit's for the original Dockerfile: Weasy666.
# ---------------------------------------------------------------------------------------------------------
-FROM docker.io/alpine:3.14 AS runner
+FROM docker.io/alpine:3.15.0 AS runner
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
@@ -21,8 +20,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# ca-certificates: for https
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \
- ca-certificates \
- libgcc
+ ca-certificates \
+ libgcc
ARG CREATED
@@ -31,17 +30,17 @@ ARG GIT_REF
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
# including a custom label specifying the build command
LABEL org.opencontainers.image.created=${CREATED} \
- org.opencontainers.image.authors="Conduit Contributors" \
- org.opencontainers.image.title="Conduit" \
- org.opencontainers.image.version=${VERSION} \
- org.opencontainers.image.vendor="Conduit Contributors" \
- org.opencontainers.image.description="A Matrix homeserver written in Rust" \
- org.opencontainers.image.url="https://conduit.rs/" \
- org.opencontainers.image.revision=${GIT_REF} \
- org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
- org.opencontainers.image.licenses="Apache-2.0" \
- org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
- org.opencontainers.image.ref.name=""
+ org.opencontainers.image.authors="Conduit Contributors" \
+ org.opencontainers.image.title="Conduit" \
+ org.opencontainers.image.version=${VERSION} \
+ org.opencontainers.image.vendor="Conduit Contributors" \
+ org.opencontainers.image.description="A Matrix homeserver written in Rust" \
+ org.opencontainers.image.url="https://conduit.rs/" \
+ org.opencontainers.image.revision=${GIT_REF} \
+ org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
+ org.opencontainers.image.licenses="Apache-2.0" \
+ org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
+ org.opencontainers.image.ref.name=""
# Created directory for the database and media files
RUN mkdir -p /srv/conduit/.local/share/conduit
From 1fc616320a2aa8ab02edbfca7620773f69abf797 Mon Sep 17 00:00:00 2001
From: Jonas Platte
Date: Fri, 26 Nov 2021 19:28:47 +0100
Subject: [PATCH 017/445] Use struct init shorthand
---
src/client_server/voip.rs | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs
index 9c3b20d4..c9a98d9f 100644
--- a/src/client_server/voip.rs
+++ b/src/client_server/voip.rs
@@ -49,8 +49,8 @@ pub async fn turn_server_route(
};
Ok(get_turn_server_info::Response {
- username: username,
- password: password,
+ username,
+ password,
uris: db.globals.turn_uris().to_vec(),
ttl: Duration::from_secs(db.globals.turn_ttl()),
}
From 892a0525f20a0a815e7d12f45a7c5a623de7844d Mon Sep 17 00:00:00 2001
From: Jonas Platte
Date: Fri, 26 Nov 2021 20:36:40 +0100
Subject: [PATCH 018/445] Upgrade Ruma
---
Cargo.lock | 43 ++++------
Cargo.toml | 2 +-
src/client_server/account.rs | 15 ++--
src/client_server/capabilities.rs | 6 +-
src/client_server/directory.rs | 2 +-
src/client_server/keys.rs | 22 ++---
src/client_server/membership.rs | 41 ++++-----
src/client_server/message.rs | 2 +-
src/client_server/report.rs | 6 +-
src/client_server/room.rs | 26 +++---
src/client_server/state.rs | 4 +-
src/client_server/sync.rs | 27 +++---
src/client_server/voip.rs | 2 +-
src/database.rs | 11 +--
src/database/admin.rs | 17 ++--
src/database/globals.rs | 14 +--
src/database/key_backups.rs | 6 +-
src/database/pusher.rs | 4 +-
src/database/rooms.rs | 138 +++++++++++++++---------------
src/database/rooms/edus.rs | 21 +++--
src/database/sending.rs | 4 +-
src/database/users.rs | 35 ++++----
src/pdu.rs | 28 +++---
src/ruma_wrapper.rs | 6 +-
src/server_server.rs | 119 ++++++++++++--------------
25 files changed, 297 insertions(+), 304 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 9682f2fe..8b25b478 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1516,12 +1516,6 @@ dependencies = [
"winapi",
]
-[[package]]
-name = "paste"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
-
[[package]]
name = "pear"
version = "0.2.3"
@@ -1990,7 +1984,7 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.4.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"assign",
"js_int",
@@ -2011,7 +2005,7 @@ dependencies = [
[[package]]
name = "ruma-api"
version = "0.18.5"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"bytes",
"http",
@@ -2027,7 +2021,7 @@ dependencies = [
[[package]]
name = "ruma-api-macros"
version = "0.18.5"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@@ -2038,7 +2032,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.4.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"ruma-api",
"ruma-common",
@@ -2052,7 +2046,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.12.3"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"assign",
"bytes",
@@ -2072,7 +2066,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.6.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"indexmap",
"js_int",
@@ -2087,7 +2081,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.24.6"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"indoc",
"js_int",
@@ -2103,7 +2097,7 @@ dependencies = [
[[package]]
name = "ruma-events-macros"
version = "0.24.6"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@@ -2114,7 +2108,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.3.1"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"js_int",
"ruma-api",
@@ -2129,9 +2123,8 @@ dependencies = [
[[package]]
name = "ruma-identifiers"
version = "0.20.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
- "paste",
"percent-encoding",
"rand 0.8.4",
"ruma-identifiers-macros",
@@ -2144,7 +2137,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-macros"
version = "0.20.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"quote",
"ruma-identifiers-validation",
@@ -2154,7 +2147,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"thiserror",
]
@@ -2162,7 +2155,7 @@ dependencies = [
[[package]]
name = "ruma-identity-service-api"
version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"js_int",
"ruma-api",
@@ -2175,7 +2168,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"js_int",
"ruma-api",
@@ -2190,7 +2183,7 @@ dependencies = [
[[package]]
name = "ruma-serde"
version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"bytes",
"form_urlencoded",
@@ -2204,7 +2197,7 @@ dependencies = [
[[package]]
name = "ruma-serde-macros"
version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@@ -2215,7 +2208,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.9.0"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"base64 0.13.0",
"ed25519-dalek",
@@ -2232,7 +2225,7 @@ dependencies = [
[[package]]
name = "ruma-state-res"
version = "0.4.1"
-source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
+source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf"
dependencies = [
"itertools 0.10.1",
"js_int",
diff --git a/Cargo.toml b/Cargo.toml
index 91c7e259..b24afb5c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
-ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
+ruma = { git = "https://github.com/ruma/ruma", rev = "bba7d624425da2c65a834bbd0e633b7577488cdf", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
diff --git a/src/client_server/account.rs b/src/client_server/account.rs
index 4b3ad0d4..d7c2f63e 100644
--- a/src/client_server/account.rs
+++ b/src/client_server/account.rs
@@ -11,10 +11,9 @@ use ruma::{
error::ErrorKind,
r0::{
account::{
- change_password, deactivate, get_username_availability, register, whoami,
- ThirdPartyIdRemovalStatus,
+ change_password, deactivate, get_3pids, get_username_availability, register,
+ whoami, ThirdPartyIdRemovalStatus,
},
- contact::get_contacts,
uiaa::{AuthFlow, AuthType, UiaaInfo},
},
},
@@ -282,7 +281,7 @@ pub async fn register_route(
let mut content = RoomCreateEventContent::new(conduit_user.clone());
content.federate = true;
content.predecessor = None;
- content.room_version = RoomVersionId::Version6;
+ content.room_version = RoomVersionId::V6;
// 1. The room create event
db.rooms.build_and_append_pdu(
@@ -433,7 +432,7 @@ pub async fn register_route(
)?;
// Room alias
- let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name())
+ let alias: Box = format!("#admins:{}", db.globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
@@ -757,9 +756,9 @@ pub async fn deactivate_route(
get("/_matrix/client/r0/account/3pid", data = "")
)]
pub async fn third_party_route(
- body: Ruma,
-) -> ConduitResult {
+ body: Ruma,
+) -> ConduitResult {
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
- Ok(get_contacts::Response::new(Vec::new()).into())
+ Ok(get_3pids::Response::new(Vec::new()).into())
}
diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs
index f86b23b5..c69b7cb2 100644
--- a/src/client_server/capabilities.rs
+++ b/src/client_server/capabilities.rs
@@ -22,12 +22,12 @@ pub async fn get_capabilities_route(
_body: Ruma,
) -> ConduitResult {
let mut available = BTreeMap::new();
- available.insert(RoomVersionId::Version5, RoomVersionStability::Stable);
- available.insert(RoomVersionId::Version6, RoomVersionStability::Stable);
+ available.insert(RoomVersionId::V5, RoomVersionStability::Stable);
+ available.insert(RoomVersionId::V6, RoomVersionStability::Stable);
let mut capabilities = Capabilities::new();
capabilities.room_versions = RoomVersionsCapability {
- default: RoomVersionId::Version6,
+ default: RoomVersionId::V6,
available,
};
diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs
index 490f7524..5a1bc494 100644
--- a/src/client_server/directory.rs
+++ b/src/client_server/directory.rs
@@ -167,7 +167,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
other_server,
federation::directory::get_public_rooms_filtered::v1::Request {
limit,
- since: since.as_deref(),
+ since,
filter: Filter {
generic_search_term: filter.generic_search_term.as_deref(),
},
diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs
index a44f5e9c..08ea6e76 100644
--- a/src/client_server/keys.rs
+++ b/src/client_server/keys.rs
@@ -316,7 +316,7 @@ pub async fn get_key_changes_route(
pub(crate) async fn get_keys_helper bool>(
sender_user: Option<&UserId>,
- device_keys_input: &BTreeMap>>,
+ device_keys_input: &BTreeMap, Vec>>,
allowed_signatures: F,
db: &Database,
) -> Result {
@@ -328,6 +328,8 @@ pub(crate) async fn get_keys_helper bool>(
let mut get_over_federation = HashMap::new();
for (user_id, device_ids) in device_keys_input {
+ let user_id: &UserId = &**user_id;
+
if user_id.server_name() != db.globals.server_name() {
get_over_federation
.entry(user_id.server_name())
@@ -355,11 +357,11 @@ pub(crate) async fn get_keys_helper bool>(
container.insert(device_id, keys);
}
}
- device_keys.insert(user_id.clone(), container);
+ device_keys.insert(user_id.to_owned(), container);
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
- if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? {
+ if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
Error::BadRequest(
ErrorKind::InvalidParam,
@@ -371,24 +373,24 @@ pub(crate) async fn get_keys_helper bool>(
device_display_name: metadata.display_name,
};
- container.insert(device_id.clone(), keys);
+ container.insert(device_id.to_owned(), keys);
}
- device_keys.insert(user_id.clone(), container);
+ device_keys.insert(user_id.to_owned(), container);
}
}
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
- master_keys.insert(user_id.clone(), master_key);
+ master_keys.insert(user_id.to_owned(), master_key);
}
if let Some(self_signing_key) = db
.users
.get_self_signing_key(user_id, &allowed_signatures)?
{
- self_signing_keys.insert(user_id.clone(), self_signing_key);
+ self_signing_keys.insert(user_id.to_owned(), self_signing_key);
}
if Some(user_id) == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
- user_signing_keys.insert(user_id.clone(), user_signing_key);
+ user_signing_keys.insert(user_id.to_owned(), user_signing_key);
}
}
}
@@ -400,7 +402,7 @@ pub(crate) async fn get_keys_helper bool>(
.map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
- device_keys_input_fed.insert(user_id.clone(), keys.clone());
+ device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
(
server,
@@ -440,7 +442,7 @@ pub(crate) async fn get_keys_helper bool>(
}
pub(crate) async fn claim_keys_helper(
- one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>,
+ one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>,
db: &Database,
) -> Result {
let mut one_time_keys = BTreeMap::new();
diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs
index ec685ec9..f65287da 100644
--- a/src/client_server/membership.rs
+++ b/src/client_server/membership.rs
@@ -64,7 +64,7 @@ pub async fn join_room_by_id_route(
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
- .filter_map(|sender| UserId::try_from(sender).ok())
+ .filter_map(|sender| Box::::try_from(sender).ok())
.map(|user| user.server_name().to_owned())
.collect();
@@ -72,7 +72,7 @@ pub async fn join_room_by_id_route(
let ret = join_room_by_id_helper(
&db,
- body.sender_user.as_ref(),
+ body.sender_user.as_deref(),
&body.room_id,
&servers,
body.third_party_signed.as_ref(),
@@ -101,7 +101,7 @@ pub async fn join_room_by_id_or_alias_route(
) -> ConduitResult {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
- let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) {
+ let (servers, room_id) = match Box::::try_from(body.room_id_or_alias.clone()) {
Ok(room_id) => {
let mut servers: HashSet<_> = db
.rooms
@@ -111,7 +111,7 @@ pub async fn join_room_by_id_or_alias_route(
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
- .filter_map(|sender| UserId::try_from(sender).ok())
+ .filter_map(|sender| Box::::try_from(sender).ok())
.map(|user| user.server_name().to_owned())
.collect();
@@ -127,7 +127,7 @@ pub async fn join_room_by_id_or_alias_route(
let join_room_response = join_room_by_id_helper(
&db,
- body.sender_user.as_ref(),
+ body.sender_user.as_deref(),
&room_id,
&servers,
body.third_party_signed.as_ref(),
@@ -531,7 +531,7 @@ async fn join_room_by_id_helper(
.roomid_mutex_state
.write()
.unwrap()
- .entry(room_id.clone())
+ .entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
@@ -551,7 +551,7 @@ async fn join_room_by_id_helper(
federation::membership::create_join_event_template::v1::Request {
room_id,
user_id: sender_user,
- ver: &[RoomVersionId::Version5, RoomVersionId::Version6],
+ ver: &[RoomVersionId::V5, RoomVersionId::V6],
},
)
.await;
@@ -567,8 +567,7 @@ async fn join_room_by_id_helper(
let room_version = match make_join_response.room_version {
Some(room_version)
- if room_version == RoomVersionId::Version5
- || room_version == RoomVersionId::Version6 =>
+ if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 =>
{
room_version
}
@@ -620,7 +619,7 @@ async fn join_room_by_id_helper(
.expect("event is valid, we just created it");
// Generate event id
- let event_id = EventId::try_from(&*format!(
+ let event_id = Box::::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version)
.expect("ruma can calculate reference hashes")
@@ -776,7 +775,7 @@ async fn join_room_by_id_helper(
db.flush()?;
- Ok(join_room_by_id::Response::new(room_id.clone()).into())
+ Ok(join_room_by_id::Response::new(room_id.to_owned()).into())
}
fn validate_and_add_event_id(
@@ -784,12 +783,12 @@ fn validate_and_add_event_id(
room_version: &RoomVersionId,
pub_key_map: &RwLock>>,
db: &Database,
-) -> Result<(EventId, CanonicalJsonObject)> {
+) -> Result<(Box, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response")
})?;
- let event_id = EventId::try_from(&*format!(
+ let event_id = Box::::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&value, room_version)
.expect("ruma can calculate reference hashes")
@@ -856,7 +855,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state
.write()
.unwrap()
- .entry(room_id.clone())
+ .entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
@@ -892,9 +891,7 @@ pub(crate) async fn invite_helper<'a>(
// If there was no create event yet, assume we are creating a version 6 room right now
let room_version_id = create_event_content
- .map_or(RoomVersionId::Version6, |create_event| {
- create_event.room_version
- });
+ .map_or(RoomVersionId::V6, |create_event| create_event.room_version);
let room_version =
RoomVersion::new(&room_version_id).expect("room version is supported");
@@ -939,9 +936,9 @@ pub(crate) async fn invite_helper<'a>(
}
let pdu = PduEvent {
- event_id: ruma::event_id!("$thiswillbefilledinlater"),
- room_id: room_id.clone(),
- sender: sender_user.clone(),
+ event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(),
+ room_id: room_id.to_owned(),
+ sender: sender_user.to_owned(),
origin_server_ts: utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
@@ -1014,7 +1011,7 @@ pub(crate) async fn invite_helper<'a>(
};
// Generate event id
- let expected_event_id = EventId::try_from(&*format!(
+ let expected_event_id = Box::::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes")
@@ -1100,7 +1097,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state
.write()
.unwrap()
- .entry(room_id.clone())
+ .entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
diff --git a/src/client_server/message.rs b/src/client_server/message.rs
index abbbe8ea..0d006101 100644
--- a/src/client_server/message.rs
+++ b/src/client_server/message.rs
@@ -67,7 +67,7 @@ pub async fn send_message_event_route(
));
}
- let event_id = EventId::try_from(
+ let event_id = Box::::try_from(
utils::string_from_bytes(&response)
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?,
)
diff --git a/src/client_server/report.rs b/src/client_server/report.rs
index 3dcb4d1c..2e6527d4 100644
--- a/src/client_server/report.rs
+++ b/src/client_server/report.rs
@@ -57,8 +57,7 @@ pub async fn report_event_route(
Report Score: {}\n\
Report Reason: {}",
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
- )
- .to_owned(),
+ ),
format!(
"Report received from: {0}\
- Event Info
- Event ID:
{1}
\
@@ -72,8 +71,7 @@ pub async fn report_event_route(
pdu.sender,
body.score,
RawStr::new(&body.reason).html_escape()
- )
- .to_owned(),
+ ),
),
));
diff --git a/src/client_server/room.rs b/src/client_server/room.rs
index 47c7ee6f..97b3f482 100644
--- a/src/client_server/room.rs
+++ b/src/client_server/room.rs
@@ -88,14 +88,17 @@ pub async fn create_room_route(
));
}
- let alias: Option =
+ let alias: Option> =
body.room_alias_name
.as_ref()
.map_or(Ok(None), |localpart| {
// TODO: Check for invalid characters and maximum length
- let alias =
- RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name()))
- .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
+ let alias = Box::::try_from(format!(
+ "#{}:{}",
+ localpart,
+ db.globals.server_name(),
+ ))
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
if db.rooms.id_from_alias(&alias)?.is_some() {
Err(Error::BadRequest(
@@ -109,7 +112,7 @@ pub async fn create_room_route(
let room_version = match body.room_version.clone() {
Some(room_version) => {
- if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 {
+ if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 {
room_version
} else {
return Err(Error::BadRequest(
@@ -118,7 +121,7 @@ pub async fn create_room_route(
));
}
}
- None => RoomVersionId::Version6,
+ None => RoomVersionId::V6,
};
let content = match &body.creation_content {
@@ -164,7 +167,7 @@ pub async fn create_room_route(
.get(),
);
- if let Err(_) = de_result {
+ if de_result.is_err() {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Invalid creation content",
@@ -269,7 +272,7 @@ pub async fn create_room_route(
PduBuilder {
event_type: EventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
- alias: Some(room_alias_id.clone()),
+ alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![],
})
.expect("We checked that alias earlier, it must be fine"),
@@ -505,10 +508,7 @@ pub async fn upgrade_room_route(
) -> ConduitResult {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
- if !matches!(
- body.new_version,
- RoomVersionId::Version5 | RoomVersionId::Version6
- ) {
+ if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) {
return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.",
@@ -605,7 +605,7 @@ pub async fn upgrade_room_route(
.get(),
);
- if let Err(_) = de_result {
+ if de_result.is_err() {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Error forming creation event",
diff --git a/src/client_server/state.rs b/src/client_server/state.rs
index 307bccab..0ba20620 100644
--- a/src/client_server/state.rs
+++ b/src/client_server/state.rs
@@ -267,7 +267,7 @@ async fn send_state_event_for_key_helper(
event_type: EventType,
json: &Raw,
state_key: String,
-) -> Result {
+) -> Result> {
let sender_user = sender;
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it
@@ -303,7 +303,7 @@ async fn send_state_event_for_key_helper(
.roomid_mutex_state
.write()
.unwrap()
- .entry(room_id.clone())
+ .entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs
index 65c07bc9..1060d917 100644
--- a/src/client_server/sync.rs
+++ b/src/client_server/sync.rs
@@ -54,15 +54,17 @@ use rocket::{get, tokio};
/// `since` will be cached
#[cfg_attr(
feature = "conduit_bin",
- get("/_matrix/client/r0/sync", data = "")
+ get("/_matrix/client/r0/sync", data = "")
)]
-#[tracing::instrument(skip(db, body))]
+#[tracing::instrument(skip(db, req))]
pub async fn sync_events_route(
db: DatabaseGuard,
- body: Ruma>,
+ req: Ruma>,
) -> Result, RumaResponse> {
- let sender_user = body.sender_user.as_ref().expect("user is authenticated");
- let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+ let body = req.body;
+
+ let sender_user = req.sender_user.expect("user is authenticated");
+ let sender_device = req.sender_device.expect("user is authenticated");
let arc_db = Arc::new(db);
@@ -132,7 +134,7 @@ pub async fn sync_events_route(
async fn sync_helper_wrapper(
db: Arc,
- sender_user: UserId,
+ sender_user: Box,
sender_device: Box,
since: Option,
full_state: bool,
@@ -176,7 +178,7 @@ async fn sync_helper_wrapper(
async fn sync_helper(
db: Arc,
- sender_user: UserId,
+ sender_user: Box,
sender_device: Box,
since: Option,
full_state: bool,
@@ -296,9 +298,10 @@ async fn sync_helper(
})?;
if let Some(state_key) = &pdu.state_key {
- let user_id = UserId::try_from(state_key.clone()).map_err(|_| {
- Error::bad_database("Invalid UserId in member PDU.")
- })?;
+ let user_id =
+ Box::::try_from(state_key.clone()).map_err(|_| {
+ Error::bad_database("Invalid UserId in member PDU.")
+ })?;
// The membership was and still is invite or join
if matches!(
@@ -424,7 +427,7 @@ async fn sync_helper(
}
if let Some(state_key) = &state_event.state_key {
- let user_id = UserId::try_from(state_key.clone())
+ let user_id = Box::::try_from(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user {
@@ -793,7 +796,7 @@ fn share_encrypted_room(
) -> Result {
Ok(db
.rooms
- .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
+ .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.filter_map(|r| r.ok())
.filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| {
diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs
index c9a98d9f..66a85f0f 100644
--- a/src/client_server/voip.rs
+++ b/src/client_server/voip.rs
@@ -26,7 +26,7 @@ pub async fn turn_server_route(
let turn_secret = db.globals.turn_secret();
- let (username, password) = if turn_secret != "" {
+ let (username, password) = if !turn_secret.is_empty() {
let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
)
diff --git a/src/database.rs b/src/database.rs
index 080e24b3..056d49ad 100644
--- a/src/database.rs
+++ b/src/database.rs
@@ -477,7 +477,8 @@ impl Database {
// Set room member count
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
let room_id =
- RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap();
+ Box::::try_from(utils::string_from_bytes(&roomid).unwrap())
+ .unwrap();
db.rooms.update_joined_count(&room_id, &db)?;
}
@@ -489,7 +490,7 @@ impl Database {
if db.globals.database_version()? < 7 {
// Upgrade state store
- let mut last_roomstates: HashMap = HashMap::new();
+ let mut last_roomstates: HashMap, u64> = HashMap::new();
let mut current_sstatehash: Option = None;
let mut current_room = None;
let mut current_state = HashSet::new();
@@ -570,7 +571,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash {
handle_state(
current_sstatehash,
- current_room.as_ref().unwrap(),
+ current_room.as_deref().unwrap(),
current_state,
&mut last_roomstates,
)?;
@@ -587,7 +588,7 @@ impl Database {
.unwrap()
.unwrap();
let event_id =
- EventId::try_from(utils::string_from_bytes(&event_id).unwrap())
+ Box::::try_from(utils::string_from_bytes(&event_id).unwrap())
.unwrap();
let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap();
@@ -604,7 +605,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash {
handle_state(
current_sstatehash,
- current_room.as_ref().unwrap(),
+ current_room.as_deref().unwrap(),
current_state,
&mut last_roomstates,
)?;
diff --git a/src/database/admin.rs b/src/database/admin.rs
index 8d8559a5..07a487e2 100644
--- a/src/database/admin.rs
+++ b/src/database/admin.rs
@@ -1,13 +1,10 @@
-use std::{
- convert::{TryFrom, TryInto},
- sync::Arc,
-};
+use std::{convert::TryFrom, sync::Arc};
use crate::{pdu::PduBuilder, Database};
use rocket::futures::{channel::mpsc, stream::StreamExt};
use ruma::{
events::{room::message::RoomMessageEventContent, EventType},
- UserId,
+ RoomAliasId, UserId,
};
use serde_json::value::to_raw_value;
use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard};
@@ -37,15 +34,17 @@ impl Admin {
let guard = db.read().await;
let conduit_user =
- UserId::try_from(format!("@conduit:{}", guard.globals.server_name()))
+ Box::::try_from(format!("@conduit:{}", guard.globals.server_name()))
.expect("@conduit:server_name is valid");
let conduit_room = guard
.rooms
.id_from_alias(
- &format!("#admins:{}", guard.globals.server_name())
- .try_into()
- .expect("#admins:server_name is a valid room alias"),
+ &Box::::try_from(format!(
+ "#admins:{}",
+ guard.globals.server_name()
+ ))
+ .expect("#admins:server_name is a valid room alias"),
)
.unwrap();
diff --git a/src/database/globals.rs b/src/database/globals.rs
index 05ecb568..098d8197 100644
--- a/src/database/globals.rs
+++ b/src/database/globals.rs
@@ -40,13 +40,13 @@ pub struct Globals {
dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option>,
pub(super) server_signingkeys: Arc,
- pub bad_event_ratelimiter: Arc>>,
+ pub bad_event_ratelimiter: Arc, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc, RateLimitState>>>,
pub servername_ratelimiter: Arc, Arc>>>,
- pub sync_receivers: RwLock), SyncHandle>>,
- pub roomid_mutex_insert: RwLock>>>,
- pub roomid_mutex_state: RwLock>>>,
- pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer
+ pub sync_receivers: RwLock, Box), SyncHandle>>,
+ pub roomid_mutex_insert: RwLock, Arc>>>,
+ pub roomid_mutex_state: RwLock, Arc>>>,
+ pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer
pub rotate: RotationHandler,
}
@@ -254,7 +254,7 @@ impl Globals {
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
- ) -> Result> {
+ ) -> Result, VerifyKey>> {
// Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
@@ -293,7 +293,7 @@ impl Globals {
pub fn signing_keys_for(
&self,
origin: &ServerName,
- ) -> Result> {
+ ) -> Result, VerifyKey>> {
let signingkeys = self
.server_signingkeys
.get(origin.as_bytes())?
diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs
index 98ea0111..3010a37b 100644
--- a/src/database/key_backups.rs
+++ b/src/database/key_backups.rs
@@ -209,13 +209,13 @@ impl KeyBackups {
&self,
user_id: &UserId,
version: &str,
- ) -> Result> {
+ ) -> Result, RoomKeyBackup>> {
let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff);
- let mut rooms = BTreeMap::::new();
+ let mut rooms = BTreeMap::, RoomKeyBackup>::new();
for result in self
.backupkeyid_backup
@@ -231,7 +231,7 @@ impl KeyBackups {
Error::bad_database("backupkeyid_backup session_id is invalid.")
})?;
- let room_id = RoomId::try_from(
+ let room_id = Box::::try_from(
utils::string_from_bytes(parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.")
})?)
diff --git a/src/database/pusher.rs b/src/database/pusher.rs
index f53f137b..97ca85d8 100644
--- a/src/database/pusher.rs
+++ b/src/database/pusher.rs
@@ -234,7 +234,7 @@ pub fn get_actions<'a>(
db: &Database,
) -> Result<&'a [Action]> {
let ctx = PushConditionRoomCtx {
- room_id: room_id.clone(),
+ room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently
user_display_name: db
.users
@@ -277,7 +277,7 @@ async fn send_notice(
let mut data_minus_url = pusher.data.clone();
// The url must be stripped off according to spec
data_minus_url.url = None;
- device.data = Some(data_minus_url);
+ device.data = data_minus_url;
// Tweaks are only added if the format is NOT event_id_only
if !event_id_only {
diff --git a/src/database/rooms.rs b/src/database/rooms.rs
index c5b795bd..ebd0941b 100644
--- a/src/database/rooms.rs
+++ b/src/database/rooms.rs
@@ -107,14 +107,14 @@ pub struct Rooms {
/// RoomId + EventId -> Parent PDU EventId.
pub(super) referencedevents: Arc,
- pub(super) pdu_cache: Mutex>>,
+ pub(super) pdu_cache: Mutex, Arc>>,
pub(super) shorteventid_cache: Mutex>>,
pub(super) auth_chain_cache: Mutex, Arc>>>,
- pub(super) eventidshort_cache: Mutex>,
+ pub(super) eventidshort_cache: Mutex, u64>>,
pub(super) statekeyshort_cache: Mutex>,
pub(super) shortstatekey_cache: Mutex>,
- pub(super) our_real_users_cache: RwLock>>>,
- pub(super) appservice_in_room_cache: RwLock>>,
+ pub(super) our_real_users_cache: RwLock, Arc>>>>,
+ pub(super) appservice_in_room_cache: RwLock, HashMap>>,
pub(super) stateinfo_cache: Mutex<
LruCache<
u64,
@@ -434,7 +434,7 @@ impl Rooms {
None => continue,
};
- let user_id = match UserId::try_from(state_key) {
+ let user_id = match Box::::try_from(state_key) {
Ok(id) => id,
Err(_) => continue,
};
@@ -742,7 +742,7 @@ impl Rooms {
self.eventidshort_cache
.lock()
.unwrap()
- .insert(event_id.clone(), short);
+ .insert(event_id.to_owned(), short);
Ok(short)
}
@@ -871,8 +871,8 @@ impl Rooms {
.get(&shorteventid.to_be_bytes())?
.ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
- let event_id = Arc::new(
- EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
+ let event_id = Arc::from(
+ Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?,
@@ -1112,7 +1112,7 @@ impl Rooms {
self.pdu_cache
.lock()
.unwrap()
- .insert(event_id.clone(), Arc::clone(&pdu));
+ .insert(event_id.to_owned(), Arc::clone(&pdu));
Ok(Some(pdu))
} else {
Ok(None)
@@ -1162,14 +1162,14 @@ impl Rooms {
/// Returns the leaf pdus of a room.
#[tracing::instrument(skip(self))]
- pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> {
+ pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> {
let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff);
self.roomid_pduleaves
.scan_prefix(prefix)
.map(|(_, bytes)| {
- EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
+ Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in roomid_pduleaves is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))
@@ -1178,7 +1178,7 @@ impl Rooms {
}
#[tracing::instrument(skip(self, room_id, event_ids))]
- pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> {
+ pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> {
for prev in event_ids {
let mut key = room_id.as_bytes().to_vec();
key.extend_from_slice(prev.as_bytes());
@@ -1193,7 +1193,7 @@ impl Rooms {
/// The provided `event_ids` become the new leaves, this allows a room to have multiple
/// `prev_events`.
#[tracing::instrument(skip(self))]
- pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> {
+ pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> {
let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff);
@@ -1261,7 +1261,7 @@ impl Rooms {
&self,
pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject,
- leaves: &[EventId],
+ leaves: &[Box],
db: &Database,
) -> Result> {
let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists");
@@ -1420,7 +1420,7 @@ impl Rooms {
}
// if the state_key fails
- let target_user_id = UserId::try_from(state_key.clone())
+ let target_user_id = Box::::try_from(state_key.clone())
.expect("This state_key was previously validated");
let content = serde_json::from_str::(pdu.content.get())
@@ -1476,9 +1476,11 @@ impl Rooms {
if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name()))
&& self
.id_from_alias(
- &format!("#admins:{}", db.globals.server_name())
- .try_into()
- .expect("#admins:server_name is a valid room alias"),
+ &Box::::try_from(format!(
+ "#admins:{}",
+ db.globals.server_name()
+ ))
+ .expect("#admins:server_name is a valid room alias"),
)?
.as_ref()
== Some(&pdu.room_id)
@@ -1528,7 +1530,7 @@ impl Rooms {
}
"get_auth_chain" => {
if args.len() == 1 {
- if let Ok(event_id) = EventId::try_from(args[0]) {
+ if let Ok(event_id) = Box::::try_from(args[0]) {
if let Some(event) = db.rooms.get_pdu_json(&event_id)? {
let room_id_str = event
.get("room_id")
@@ -1539,12 +1541,12 @@ impl Rooms {
)
})?;
- let room_id = RoomId::try_from(room_id_str)
+ let room_id = Box::::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let start = Instant::now();
let count = server_server::get_auth_chain(
&room_id,
- vec![Arc::new(event_id)],
+ vec![Arc::from(event_id)],
db,
)?
.count();
@@ -1567,12 +1569,12 @@ impl Rooms {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(value) => {
- let event_id = EventId::try_from(&*format!(
+ let event_id = Box::::try_from(&*format!(
"${}",
// Anything higher than version3 behaves the same
ruma::signatures::reference_hash(
&value,
- &RoomVersionId::Version6
+ &RoomVersionId::V6
)
.expect("ruma can calculate reference hashes")
))
@@ -1622,7 +1624,7 @@ impl Rooms {
}
"get_pdu" => {
if args.len() == 1 {
- if let Ok(event_id) = EventId::try_from(args[0]) {
+ if let Ok(event_id) = Box::::try_from(args[0]) {
let mut outlier = false;
let mut pdu_json =
db.rooms.get_non_outlier_pdu_json(&event_id)?;
@@ -1948,7 +1950,7 @@ impl Rooms {
room_id: &RoomId,
db: &Database,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
- ) -> Result {
+ ) -> Result> {
let PduBuilder {
event_type,
content,
@@ -1985,9 +1987,7 @@ impl Rooms {
// If there was no create event yet, assume we are creating a version 6 room right now
let room_version_id = create_event_content
- .map_or(RoomVersionId::Version6, |create_event| {
- create_event.room_version
- });
+ .map_or(RoomVersionId::V6, |create_event| create_event.room_version);
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events =
@@ -2016,9 +2016,9 @@ impl Rooms {
}
let mut pdu = PduEvent {
- event_id: ruma::event_id!("$thiswillbefilledinlater"),
- room_id: room_id.clone(),
- sender: sender.clone(),
+ event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(),
+ room_id: room_id.to_owned(),
+ sender: sender.to_owned(),
origin_server_ts: utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
@@ -2083,7 +2083,7 @@ impl Rooms {
.expect("event is valid, we just created it");
// Generate event id
- pdu.event_id = EventId::try_from(&*format!(
+ pdu.event_id = Box::::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes")
@@ -2206,7 +2206,7 @@ impl Rooms {
let mut first_pdu_id = prefix.clone();
first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes());
- let user_id = user_id.clone();
+ let user_id = user_id.to_owned();
Ok(self
.pduid_pdu
@@ -2243,7 +2243,7 @@ impl Rooms {
let current: &[u8] = ¤t;
- let user_id = user_id.clone();
+ let user_id = user_id.to_owned();
Ok(self
.pduid_pdu
@@ -2280,7 +2280,7 @@ impl Rooms {
let current: &[u8] = ¤t;
- let user_id = user_id.clone();
+ let user_id = user_id.to_owned();
Ok(self
.pduid_pdu
@@ -2412,7 +2412,7 @@ impl Rooms {
for room_ids in direct_event.content.0.values_mut() {
if room_ids.iter().any(|r| r == &predecessor.room_id) {
- room_ids.push(room_id.clone());
+ room_ids.push(room_id.to_owned());
room_ids_updated = true;
}
}
@@ -2451,7 +2451,11 @@ impl Rooms {
EventType::IgnoredUserList,
)?
.map_or(false, |ignored| {
- ignored.content.ignored_users.contains(sender)
+ ignored
+ .content
+ .ignored_users
+ .iter()
+ .any(|user| user == sender)
});
if is_ignored {
@@ -2537,7 +2541,7 @@ impl Rooms {
self.our_real_users_cache
.write()
.unwrap()
- .insert(room_id.clone(), Arc::new(real_users));
+ .insert(room_id.to_owned(), Arc::new(real_users));
for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) {
if !joined_servers.remove(&old_joined_server) {
@@ -2582,7 +2586,7 @@ impl Rooms {
&self,
room_id: &RoomId,
db: &Database,
- ) -> Result>> {
+ ) -> Result>>> {
let maybe = self
.our_real_users_cache
.read()
@@ -2650,7 +2654,7 @@ impl Rooms {
self.appservice_in_room_cache
.write()
.unwrap()
- .entry(room_id.clone())
+ .entry(room_id.to_owned())
.or_default()
.insert(appservice.0.clone(), in_room);
@@ -2694,7 +2698,7 @@ impl Rooms {
.roomid_mutex_state
.write()
.unwrap()
- .entry(room_id.clone())
+ .entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
@@ -2754,7 +2758,7 @@ impl Rooms {
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
- .filter_map(|sender| UserId::try_from(sender).ok())
+ .filter_map(|sender| Box::::try_from(sender).ok())
.map(|user| user.server_name().to_owned())
.collect();
@@ -2778,9 +2782,7 @@ impl Rooms {
let (make_leave_response, remote_server) = make_leave_response_and_server?;
let room_version_id = match make_leave_response.room_version {
- Some(version)
- if version == RoomVersionId::Version5 || version == RoomVersionId::Version6 =>
- {
+ Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => {
version
}
_ => return Err(Error::BadServerResponse("Room version is not supported")),
@@ -2817,7 +2819,7 @@ impl Rooms {
.expect("event is valid, we just created it");
// Generate event id
- let event_id = EventId::try_from(&*format!(
+ let event_id = Box::::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes")
@@ -2902,11 +2904,11 @@ impl Rooms {
}
#[tracing::instrument(skip(self))]
- pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result