network configuration is exposed by cli flags

Signed-off-by: Uncle Stretch <uncle.stretch@ghostchain.io>
This commit is contained in:
Uncle Stretch 2025-12-28 15:04:38 +03:00
parent 35296f13b0
commit ef69d13a44
Signed by: str3tch
GPG Key ID: 84F3190747EE79AA
8 changed files with 1028 additions and 1073 deletions

718
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package]
name = "ghost-echo"
version = "0.0.2"
version = "0.0.3"
edition = "2021"
[[bin]]
@ -17,7 +17,6 @@ futures = "0.3.31"
futures-timer = "3.0.3"
hex = "0.4.3"
libp2p = { version = "0.56", features = ["identify", "ping", "tokio", "gossipsub", "macros", "relay", "kad", "rsa", "ed25519", "quic", "request-response", "dns", "memory-connection-limits", "tcp", "noise", "yamux", "autonat", "tls", "dcutr"] }
libp2p-webrtc = { version = "0.9.0-alpha", features = ["tokio", "pem"] }
quick-protobuf = "0.8.1"
rand = "0.9.0"
rand_core = { version = "0.6.4", features = ["getrandom"] }

View File

@ -3,7 +3,6 @@ use ghost_echo::prelude::*;
use anyhow::Result;
use clap::Parser;
use libp2p::{identity, PeerId};
use libp2p_webrtc::tokio::Certificate;
use std::path::{Path, PathBuf};
use tokio::{fs, task::JoinHandle};
use tokio_util::sync::CancellationToken;
@ -20,9 +19,8 @@ async fn main() -> Result<()> {
// create a shutdown token
let shutdown = CancellationToken::new();
// load the identity and certificate
// load the local identity
let local_key = read_or_create_identity(&opt.local_key_path).await?;
let webrtc_cert = read_or_create_certificate(&opt.local_cert_path).await?;
// create the ui and the channels to communicate with it
let (mut ui, to_ui, from_ui) = if opt.headless {
@ -32,7 +30,7 @@ async fn main() -> Result<()> {
};
// create the peer, connecting it to the ui
let mut peer = Peer::new(local_key, webrtc_cert, to_ui, from_ui, shutdown.clone()).await?;
let mut peer = Peer::new(local_key, to_ui, from_ui, shutdown.clone()).await?;
// spawn tasks for both the swarm and the ui
let peer_task: JoinHandle<Result<()>> = tokio::spawn(async move { peer.run().await });
@ -48,26 +46,6 @@ async fn main() -> Result<()> {
Ok(())
}
async fn read_or_create_certificate(path: &Path) -> Result<Certificate> {
if path.exists() {
let pem = fs::read_to_string(&path).await?;
info!("Using existing certificate from {}", path.display());
return Ok(Certificate::from_pem(&pem)?);
}
let cert = Certificate::generate(&mut rand_core::OsRng)?;
fs::write(&path, &cert.serialize_pem().as_bytes()).await?;
info!(
"Generated new certificate and wrote it to {}",
path.display()
);
Ok(cert)
}
async fn read_or_create_identity(path: &Path) -> Result<identity::Keypair> {
let mut key_path = PathBuf::from(path);
let is_key = key_path

View File

@ -17,9 +17,23 @@ pub enum Message {
peers: Vec<(PeerId, Vec<String>)>,
},
/// Add a peer
AddPeer(ChatPeer),
AddPeer {
/// Topic name.
topic: Option<String>,
/// Peer id.
chat_peer: ChatPeer,
},
/// Remove a peer
RemovePeer(ChatPeer),
RemovePeer {
/// Topic name.
topic: Option<String>,
/// Peer id.
chat_peer: ChatPeer,
},
/// Add an event message
Event(String),
/// Add new topic
AddTopic(String),
/// Remove existing topic
RemoveTopic(String),
}

View File

@ -1,18 +1,39 @@
use clap::Parser;
use std::{net::IpAddr, path::PathBuf};
use clap::{Args, Parser, ValueEnum};
use libp2p::{
gossipsub::ValidationMode,
identity::Keypair,
kad::{BucketInserts, Caching, StoreInserts},
Multiaddr, PeerId,
};
use std::{net::IpAddr, num::NonZero, path::PathBuf, time::Duration};
const LISTEN_ADDR: [&str; 1] = ["0.0.0.0"];
const LOCAL_KEY_PATH: &str = "./local";
const LOCAL_CERT_PATH: &str = "./cert.pem";
/// Available authenticity options.
#[derive(ValueEnum, Clone, Copy, Debug, PartialEq, Eq)]
pub enum GossipAuthenticity {
/// Message signing is enabled, the author will be the owner of the key and the sequence number
/// will be linearly increasing.
Signed,
/// Message signing is disabled. The specified PeerId will be used as the author of all
/// published messages. The sequence number will be randomized.
Author,
/// Message signing is disabled. A random PeerId will be used when publishing each message. The
/// sequence number will be randomized.
RandomAuthor,
/// Message signing is disabled. The author of the message and the sequence numbers are
/// excluded from the message.
Anonymous,
}
/// The rust peer command line options
#[derive(Debug, Parser)]
#[clap(name = "ghost-gossiper p2p messenger")]
#[clap(author, version, about)]
pub struct Options {
/// WebRTC UDP port for the app.
#[clap(long, env, default_value = "0")]
pub webrtc_port: u16,
/// QUIC UDP port for the app.
#[clap(long, env, default_value = "0")]
pub quic_port: u16,
@ -21,11 +42,11 @@ pub struct Options {
#[clap(long, env, default_value = "0")]
pub tcp_port: u16,
/// Address to listen on.
/// Addresses to listen on.
#[clap(long, env, action = clap::ArgAction::Append, value_delimiter = ',', default_values = LISTEN_ADDR)]
pub listen_addresses: Vec<IpAddr>,
/// If known, the external address of this node. Will be used to correctly advertise our external address across all transports.
/// The external address of this node. Will be used to correctly advertise our external address across all transports.
#[clap(long, env, action = clap::ArgAction::Append, value_delimiter = ',')]
pub external_addresses: Vec<IpAddr>,
@ -33,19 +54,31 @@ pub struct Options {
#[clap(long, env, action = clap::ArgAction::Append, value_delimiter = ',')]
pub connect: Vec<String>,
/// If set, the path to the local certificate file.
#[clap(long, env, default_value = LOCAL_CERT_PATH)]
pub local_cert_path: PathBuf,
/// Bootnodes to be connected to bootstrap the network connectivity.
#[clap(long, env, action = clap::ArgAction::Append, value_delimiter = ',')]
pub bootnodes: Option<Vec<Multiaddr>>,
/// If set, the path to the local key file.
/// Topics to be subscribed on.
#[clap(long, env, action = clap::ArgAction::Append, value_delimiter = ',')]
pub topics: Option<Vec<String>>,
/// The path to the local key file.
#[clap(long, env, default_value = LOCAL_KEY_PATH)]
pub local_key_path: PathBuf,
/// If set, the peer will make autonat client requests (default: true)
/// Connection limits options.
#[command(flatten)]
pub connection_limits_options: ConnectionLimitsOptions,
/// The peer will make autonat client requests.
#[clap(long, env, default_value = "true")]
pub autonat_client: bool,
/// If set, the peer will act as an autonat server
/// Autonat client options.
#[command(flatten)]
pub autonat_client_options: AutonatClientOptions,
/// Make peer act as an autonat server.
#[clap(long, env)]
pub autonat_server: bool,
@ -53,6 +86,23 @@ pub struct Options {
#[clap(long, env, default_value = "true")]
pub dcutr: bool,
/// Gossipsub options.
#[command(flatten)]
pub gossipsub_options: GossipsubOptions,
/// Identify options.
#[command(flatten)]
pub identify_options: IdentifyOptions,
/// Memory connection limits options. New inbound and outbound connections will be denied
/// when the threshold is reached.
#[command(flatten)]
pub memory_connection_limits_options: MemoryConnectionLimitsOptions,
/// Ping options.
#[command(flatten)]
pub ping_options: PingOptions,
/// If set, the peer will not initialize the TUI and will run headless.
#[clap(long, env)]
pub headless: bool,
@ -61,6 +111,10 @@ pub struct Options {
#[clap(long, env, default_value = "true")]
pub kademlia: bool,
/// Kademlia options.
#[command(flatten)]
pub kademlia_options: KademliaOptions,
/// If set, the peer will support relay client connections (default: true)
#[clap(long, env, default_value = "true")]
pub relay_client: bool,
@ -68,4 +122,494 @@ pub struct Options {
/// If set, the peer will act as a relay server
#[clap(long, env)]
pub relay_server: bool,
/// Relay server options.
#[command(flatten)]
pub relay_server_options: RelayServerOptions,
/// Request response options.
#[command(flatten)]
pub request_response_options: RequestResponseOptions,
}
/// Connection limits options.
#[derive(Debug, Clone, Copy, Args)]
pub struct ConnectionLimitsOptions {
/// Configure the maximum number of concurrently incoming connections being established.
#[clap(long, env, default_value = "16")]
pub pending_incoming: u32,
/// Configure the maximum number of concurrently outgoing connections being established.
#[clap(long, env, default_value = "8")]
pub pending_outgoing: u32,
/// Configure the maximum number of concurrent established inbound connections.
#[clap(long, env, default_value = "25")]
pub established_incoming: u32,
/// Configure the maximum number of concurrent established outbound connections.
#[clap(long, env, default_value = "25")]
pub established_outgoing: u32,
/// Configure the maximum number of concurrent established connections (both inbound and
/// outbound).
#[clap(long, env, default_value = "50")]
pub max_established: u32,
/// Configure the maximum number of concurrent established connections per peer, regardless of
/// direction (incoming or outgoing).
#[clap(long, env, default_value = "1")]
pub established_per_peer: u32,
}
/// Autonat client options.
#[derive(Debug, Clone, Copy, Args)]
#[group(requires("autonat_client"))]
pub struct AutonatClientOptions {
/// Maximum number of candidate addresses your node will try to verify at once.
#[clap(long, env, default_value = "10")]
pub max_candidates: usize,
/// Define the time between reachibility probes in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "5")]
pub probe_interval: Duration,
}
/// Determines if published messages should be signed or not.
#[derive(Debug, Clone, Args)]
pub struct GossipAuthenticityOptions {
/// Determines if published messages should be signed or not.
#[clap(long, env, value_enum, default_value_t = GossipAuthenticity::Signed)]
pub authenticity: GossipAuthenticity,
/// Peer id to be used for authoring, local peer will be used by default.
#[clap(long, env, value_parser = parse_peer_id)]
pub peer_id: Option<PeerId>,
/// Keypair to be used for signing, local key will be used by default.
#[clap(long, env, value_parser = parse_keypair)]
pub keypair: Option<Keypair>,
}
/// Gossipsub options.
#[derive(Debug, Clone, Args)]
pub struct GossipsubOptions {
/// Determines if published messages should be signed or not.
#[command(flatten)]
pub authenticity_options: GossipAuthenticityOptions,
/// Number of heartbeats to keep in the memcache.
#[clap(long, env, default_value = "5")]
pub history_length: usize,
/// Number of past heartbeats to gossip about.
#[clap(long, env, default_value = "3")]
pub history_gossip: usize,
/// Target number of peers for the mesh network.
#[clap(long, env, default_value = "6")]
pub mesh_n: usize,
/// Minimum number of peers in mesh network before adding more.
#[clap(long, env, default_value = "1")]
pub mesh_n_low: usize,
/// Maximum number of peers in mesh network before removing some.
#[clap(long, env, default_value = "12")]
pub mesh_n_high: usize,
/// Number of the retained peers that will be high-scoring, while the reminder are chosen
/// randomly.
#[clap(long, env, default_value = "4")]
pub retain_scores: usize,
/// Minimum number of peers to emit gossip to during a heartbeat.
#[clap(long, env, default_value = "6")]
pub lazy: usize,
/// Number of peers will emit gossip to at each heartbeat. "Max(gossip_factor *
/// (total_number_non_mesh_peers), gossip_lazy)".
#[clap(long, env, default_value = "0.25")]
pub factor: f64,
/// Initial delay in each heartbeat in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "5")]
pub heartbeat_initial_delay: Duration,
/// Time between each heartbeat in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "1")]
pub heartbeat_interval: Duration,
/// The number of heartbeat ticks until we recheck the connection to explicit peers and
/// reconnecting if necessary.
#[clap(long, env, default_value = "300")]
pub check_explicit_peers_ticks: u64,
/// Time to live for fanout peers in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "60")]
pub fanout_ttl: Duration,
/// The maximum byte size for each gossip in bytes.
#[clap(long, env, default_value = "2048")]
pub max_transmit_size: usize,
/// Time period that messages are stored in the cache in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "60")]
pub duplicate_cache_time: Duration,
/// Allow message validation before propagation them to peers.
#[clap(long, env, default_value = "false")]
pub validate_messages: bool,
/// Determines the level of validation used when receiving messages.
#[clap(long, env, value_parser = parse_validation_mode, default_value = "permissive")]
pub validation_mode: ValidationMode,
/// How long a peer must wait before attempting to graft into out mesh again after being
/// pruned in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "60")]
pub prune_backoff: Duration,
/// How long to wait before resubscribing to the topic in seconds.
#[clap(long, env, default_value = "10")]
pub unsubscribe_backoff: u64,
/// Number of heartbeat slots considered as slack for backoff.
#[clap(long, env, default_value = "1")]
pub backoff_slack: u32,
/// Whether to do flood publishing or not.
#[clap(long, env, default_value = "true")]
pub flood_publish: bool,
/// Time since the last PRUNE triggers penalty in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "10")]
pub graft_flood_threshold: Duration,
/// Minimum number of outbound peers in the mesh network before adding more. Should be smaller
/// or equal than "mesh_n / 2" and smaller than "mesh_n_low".
#[clap(long, env, default_value = "2")]
pub mesh_outbound_min: usize,
/// Number of heartbeat ticks that specify the interval in which opportunistic grafting is
/// applied.
#[clap(long, env, default_value = "60")]
pub opportunistic_graft_ticks: u64,
/// How many times we will allow a peer to request the same message id through IWANT gossip
/// before we start ignoring them.
#[clap(long, env, default_value = "60")]
pub gossip_retransimission: u32,
/// The maximum number of new peers to graft to during opportunistic grafting.
#[clap(long, env, default_value = "2")]
pub opportunistic_graft_peers: usize,
/// The maximum number of messages we will process in a given RPC.
#[clap(long, env)]
pub max_messages_per_rpc: Option<usize>,
/// The maximum number of messages to include in an IHAVE message.
#[clap(long, env, default_value = "5000")]
pub max_ihave_length: usize,
/// The maximum number of IHAVE messages to accept from a peer within a heartbeat.
#[clap(long, env, default_value = "10")]
pub max_ihave_messages: usize,
/// Allow messages that are sent to us that has the same message source as we have specified
/// locally.
#[clap(long, env, default_value = "false")]
pub allow_self_origin: bool,
/// Time to wait for a message requested through IWANT following an IHAVE advertisement in
/// seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "3")]
pub iwant_followup_time: Duration,
/// Published message ids time cache duration in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "10")]
pub published_message_ids_cache_time: Duration,
/// The max number of messages handler can buffer.
#[clap(long, env, default_value = "5000")]
pub connection_handler_queue_len: usize,
/// The duration a message to be published can wait to be sent before it is abandoned in
/// seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "5")]
pub publish_queue_duration: Duration,
/// The duration a message to be forwarded can wait to be sent before it is abandoned in
/// seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "1")]
pub forward_queue_duration: Duration,
/// The message size threshold for which IDONTWANT messages are sent in bytes.
#[clap(long, env, default_value = "1024")]
pub idontwant_message_size_threshold: usize,
/// Send IDONTWANT messages after publishing message on gossip.
#[clap(long, env, default_value = "false")]
pub idontwant_on_publish: bool,
}
/// Identify options.
#[derive(Debug, Clone, Args)]
pub struct IdentifyOptions {
/// The interval at which identification requests are sent to the remote on established
/// connections after the first request, i.e. the delay between identification requests.
#[clap(long, env, value_parser = parse_seconds, default_value = "300")]
pub identify_interval: Duration,
/// Whether new or expired listen addresses of the local node should trigger an active push of
/// an identify message to all connected peers.
#[clap(long, env, default_value = "false")]
pub push_listen_addr_updates: bool,
/// How many entries of discovered peers to keep before we discard the least-recent used one.
#[clap(long, env, default_value = "100")]
pub cache_size: usize,
/// Whether to include our listen addresses in our responses. If enabled, we will effectively
/// only share our external addresses.
#[clap(long, env, default_value = "false")]
pub hide_listen_addrs: bool,
}
/// New inbound and outbound connections will be denied when the threshold is reached.
#[derive(Debug, Clone, Args)]
#[group(multiple = false)]
pub struct MemoryConnectionLimitsOptions {
/// Sets the process memory usage threshold in the percentage of the total physical memory.
#[clap(long, env, default_value = "0.9", conflicts_with = "max_bytes")]
pub max_percentage: f64,
/// Sets the process memory usage threshold in absolute bytes.
#[clap(long, env)]
pub max_bytes: Option<usize>,
}
/// Ping options.
#[derive(Debug, Clone, Args)]
pub struct PingOptions {
/// Sets the ping timeout.
#[clap(long, env, value_parser = parse_seconds, default_value = "20")]
pub ping_timeout: Duration,
/// Sets the ping interval.
#[clap(long, env, value_parser = parse_seconds, default_value = "15")]
pub ping_interval: Duration,
}
/// Kademlia options.
#[derive(Debug, Clone, Args)]
#[group(requires = "kademlia")]
pub struct KademliaOptions {
/// Sets the timeout for a single query in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "60")]
pub query_timeout: Duration,
/// Sets the replication factor to use.
#[clap(long, env, value_parser = parse_non_zero_usize, default_value = "20")]
pub replication_factor: NonZero<usize>,
/// Sets the allowed level of parallelismfor iterative queries.
#[clap(long, env, value_parser = parse_non_zero_usize, default_value = "3")]
pub parallelism: NonZero<usize>,
/// Require iterative queries to use disjoint paths for increased resiliency in the presence of
/// potentially adversarial nodes. When enabled the number of disjoint paths used equals to
/// configured parallelism.
#[clap(long, env, default_value = "false")]
pub disjoint_query_paths: bool,
/// Sets the TTL for stored records in seconds. The TTL should be significantly longer than the
/// (re-)publication interval, to avoid premature expiration of records. None means records
/// never expired.
#[clap(long, env, value_parser = parse_seconds, default_value = "172800")]
pub record_ttl: Option<Duration>,
/// Sets whether or not records should be filtered before being stored.
#[clap(long, env, value_parser = parse_store_inserts, default_value = "unfiltered")]
pub record_filtering: StoreInserts,
/// Sets the (re-)plication interval for stored seconds. Interval should be significantly
/// shorter than the publication interval, to ensure persistence between re-publications.
#[clap(long, env, value_parser = parse_seconds, default_value = "3600")]
pub record_replication_interval: Option<Duration>,
/// Sets the (re-)publication interval of stored records in seconds. This interval should be significantly
/// shorter than the record TTL, to ensure records do not expire prematurely. None means that
/// stored records are never automatically re-published.
#[clap(long, env, value_parser = parse_seconds, default_value = "79200")]
pub record_publication_interval: Option<Duration>,
/// Sets the TTL for provider records. None means that stored provider records never expire.
/// Must be significantly larger than the provider publication interval.
#[clap(long, env, value_parser = parse_seconds, default_value = "172800")]
pub provider_record_ttl: Option<Duration>,
/// Sets the interval at which provider records for keys provided by the local node are
/// re-published. None means that stored provider records are never automatically re-published.
/// Must be significantly less than the provider record TTL.
#[clap(long, env, value_parser = parse_seconds, default_value = "43200")]
pub provider_publication_interval: Option<Duration>,
/// Modifies the timeout duration of outbound substreams in seconds.
#[clap(long, env, value_parser = parse_seconds, default_value = "10")]
pub substreams_timeout: Duration,
/// Sets the k-bucket insertation strategy for the Kademlia routing table.
#[clap(long, env, value_parser = parse_bucket_inserts, default_value = "onconnected")]
pub kbuckets_inserts: BucketInserts,
/// Sets the caching strategy to use for succesful lookups.
#[clap(long, env, value_parser = parse_caching, default_value = "1")]
pub caching: Caching,
/// Sets the interval in seconds on which bootstrap behavious is called periodically.
#[clap(long, env, value_parser = parse_seconds, default_value = "300")]
pub periodic_bootstrap_interval: Option<Duration>,
/// Sets the configuration for the k-buckets. Setting a size higher that 20 may imply
/// additional memory allocations.
#[clap(long, env, value_parser = parse_non_zero_usize, default_value = "20")]
pub kbucket_size: NonZero<usize>,
/// Sets the timeout duration after creation of a pending entry after which it becomes eligible
/// for insertation into a full bucket, replacing the least-recent (dis)connected node.
#[clap(long, env, value_parser = parse_seconds, default_value = "60")]
pub kbucket_timeout: Duration,
}
/// Relay server options.
#[derive(Debug, Clone, Args)]
#[group(requires = "relay_server")]
pub struct RelayServerOptions {
/// The total number of unique peers relay can hold reservations for at any one time. Once
/// reached, new peers cannot use node as a relay.
#[clap(long, env, default_value = "128")]
pub max_reservations: usize,
/// Limits how many separate reservations a single PeerId can hold.
#[clap(long, env, default_value = "4")]
pub max_reservations_per_peer: usize,
/// How long a reservation lasts before the peer must renew it.
#[clap(long, env, value_parser = parse_seconds, default_value = "3600")]
pub reservation_duration: Duration,
/// The total number of active data tunnels node will allow at once. This prevents node from
/// being overwhelmed by traffic.
#[clap(long, env, default_value = "16")]
pub max_circuits: usize,
/// The maximum number of active tunnels allowed for any single PeerId.
#[clap(long, env, default_value = "4")]
pub max_circuits_per_peer: usize,
/// A time limit for a single data session in seconds. If the connection stays open longer than
/// this, the relay will force-close it to free up resources.
#[clap(long, env, value_parser = parse_seconds, default_value = "120")]
pub max_circuit_duration: Duration,
/// A data cap per circuit in bytes. Once this many bytes have been transferred (uplink +
/// downlink), the relay terminates the connection.
#[clap(long, env, default_value = "524288")]
pub max_circuit_bytes: u64,
}
/// Request response options.
#[derive(Debug, Clone, Args)]
pub struct RequestResponseOptions {
/// Sets the timeout for inbound and outbound requests.
#[clap(long, env, value_parser = parse_seconds, default_value = "10")]
pub request_timeout: Duration,
/// Sets the upper bound for the number of concurrent inbound + outbound streams.
#[clap(long, env, default_value = "100")]
pub concurrent_streams: usize,
}
fn parse_seconds(arg: &str) -> Result<Duration, String> {
arg.parse::<u64>()
.map(Duration::from_secs)
.map_err(|_| format!("{} is not a valid number for seconds", arg))
}
//fn parse_seconds(arg: &str) -> Result<Option<Duration>, String> {
// match arg.to_lowercase().as_str() {
// "" | "none" => Ok(None),
// secs => {
// let duration = parse_seconds(secs)?;
// Ok(Some(duration))
// }
// }
//}
fn parse_non_zero_usize(arg: &str) -> Result<NonZero<usize>, String> {
arg.parse::<usize>()
.map_err(|_| format!("{} is not a valid number for usize", arg))
.and_then(|value| {
NonZero::new(value).ok_or_else(|| format!("value for usize should be greater than 0"))
})
}
fn parse_store_inserts(arg: &str) -> Result<StoreInserts, String> {
match arg.to_lowercase().as_str() {
"unfiltered" => Ok(StoreInserts::Unfiltered),
"filter" | "filterboth" | "filter-both" | "filter_both" => Ok(StoreInserts::FilterBoth),
_ => Err(format!(
"{} is not valid filter strategy, possible values are: unfiltered and filterboth",
arg
)),
}
}
fn parse_bucket_inserts(arg: &str) -> Result<BucketInserts, String> {
match arg.to_lowercase().as_str() {
"manual" => Ok(BucketInserts::Manual),
"onconnected" | "on-connected" | "on_connected" => Ok(BucketInserts::OnConnected),
_ => Err(format!(
"{} is not valid insert strategy, possible values are: manual and onconnected",
arg
)),
}
}
fn parse_caching(arg: &str) -> Result<Caching, String> {
match arg.to_lowercase().as_str() {
"manual" => Ok(Caching::Disabled),
_ => arg
.parse::<u16>()
.map(|max_peers| Caching::Enabled { max_peers })
.map_err(|_| {
format!(
"{} is not valid insert strategy, possible values are: manual and onconnected",
arg
)
}),
}
}
fn parse_validation_mode(arg: &str) -> Result<ValidationMode, String> {
match arg.to_lowercase().as_str() {
"strict" => Ok(ValidationMode::Strict),
"permissive" => Ok(ValidationMode::Permissive),
"anonymous" => Ok(ValidationMode::Anonymous),
"none" => Ok(ValidationMode::None),
_ => Err(format!(
"{} is not valid mode, possible values are: strict, permissive, anonymous, none",
arg
)),
}
}
fn parse_peer_id(arg: &str) -> Result<PeerId, String> {
let bytes = hex::decode(arg).map_err(|e| e.to_string())?;
PeerId::from_bytes(&bytes).map_err(|e| e.to_string())
}
fn parse_keypair(arg: &str) -> Result<Keypair, String> {
let bytes = hex::decode(arg).map_err(|e| e.to_string())?;
Keypair::from_protobuf_encoding(&bytes).map_err(|e| e.to_string())
}

File diff suppressed because it is too large Load Diff

View File

@ -78,18 +78,19 @@ impl Ui for Headless {
String::from_utf8(data).unwrap_or("Invalid UTF-8".to_string());
println!("{}: {}", from, message);
}
Message::AddPeer(peer) => {
if self.peers.insert(peer) {
Message::AddPeer { chat_peer, topic } => {
if self.peers.insert(chat_peer) {
println!(
"Adding peer:\n\tpeer id: {}\n\tname: {}",
peer.id(),
peer.name()
"Adding peer:\n\tpeer id: {}\n\tname: {}\n\t{}",
chat_peer.id(),
chat_peer.name(),
topic.unwrap_or_default(),
);
}
}
Message::RemovePeer(peer) => {
if self.peers.remove(&peer) {
println!("Removing peer: {peer:?}");
Message::RemovePeer { chat_peer, topic } => {
if self.peers.remove(&chat_peer) {
println!("Removing peer {chat_peer:?} for topic {topic:?}");
}
}
Message::Event(event) => {

View File

@ -114,23 +114,27 @@ impl Ui for Tui {
info!("{peer_str}");
}
}
Message::AddPeer(peer) => {
if chat_widget.peers.insert(peer) {
Message::AddPeer { chat_peer, topic } => {
if chat_widget.peers.insert(chat_peer) {
log_widget.add_line(format!(
"Adding peer:\n\tpeer id: {}\n\tname: {}",
peer.id(),
peer.name()
"Adding peer:\n\tpeer id: {}\n\tname: {}\n\ttopic: {}",
chat_peer.id(),
chat_peer.name(),
topic.unwrap_or_default(),
));
}
}
Message::RemovePeer(peer) => {
if chat_widget.peers.remove(&peer) {
log_widget.add_line(format!("Removing peer: {peer:?}"));
Message::RemovePeer { chat_peer, topic } => {
if chat_widget.peers.remove(&chat_peer) {
log_widget.add_line(format!(
"Removing peer {chat_peer:?} for topic {topic:?}"
));
}
}
Message::Event(event) => {
log_widget.add_line(event);
}
_ => {}
}
}