@@ -74,7 +74,7 @@ pub struct Report {
7474}
7575
7676impl Report {
77- pub async fn new ( ) -> Self {
77+ pub async fn new ( ) -> anyhow :: Result < Self > {
7878 let mut upt: f64 = 0.0 ;
7979 if let Ok ( uptime) = uptime_lib:: get ( ) {
8080 upt = uptime. as_secs_f64 ( ) ;
@@ -91,9 +91,9 @@ impl Report {
9191 cpu_count = info. cpus ( ) . len ( ) ;
9292 mem_total = info. total_memory ( ) ;
9393 }
94- let ingestor_metrics = fetch_ingestors_metrics ( ) . await ;
94+ let ingestor_metrics = fetch_ingestors_metrics ( ) . await ? ;
9595
96- Self {
96+ Ok ( Self {
9797 deployment_id : storage:: StorageMetadata :: global ( ) . deployment_id ,
9898 uptime : upt,
9999 report_created_at : Utc :: now ( ) ,
@@ -113,7 +113,7 @@ impl Report {
113113 total_json_bytes : ingestor_metrics. 4 ,
114114 total_parquet_bytes : ingestor_metrics. 5 ,
115115 metrics : build_metrics ( ) . await ,
116- }
116+ } )
117117 }
118118
119119 pub async fn send ( & self ) {
@@ -148,7 +148,7 @@ fn total_event_stats() -> (u64, u64, u64) {
148148 ( total_events, total_json_bytes, total_parquet_bytes)
149149}
150150
151- async fn fetch_ingestors_metrics ( ) -> ( u64 , u64 , usize , u64 , u64 , u64 ) {
151+ async fn fetch_ingestors_metrics ( ) -> anyhow :: Result < ( u64 , u64 , usize , u64 , u64 , u64 ) > {
152152 let event_stats = total_event_stats ( ) ;
153153 let mut node_metrics =
154154 NodeMetrics :: new ( total_streams ( ) , event_stats. 0 , event_stats. 1 , event_stats. 2 ) ;
@@ -181,24 +181,24 @@ async fn fetch_ingestors_metrics() -> (u64, u64, usize, u64, u64, u64) {
181181 . header ( header:: CONTENT_TYPE , "application/json" )
182182 . send ( )
183183 . await
184- . unwrap ( ) ; // should respond
184+ . expect ( " should respond" ) ;
185185
186- let data = serde_json:: from_slice :: < NodeMetrics > ( & resp. bytes ( ) . await . unwrap ( ) ) . unwrap ( ) ;
186+ let data = serde_json:: from_slice :: < NodeMetrics > ( & resp. bytes ( ) . await ? ) ? ;
187187 vec. push ( data) ;
188188 active_ingestors += 1 ;
189189 }
190190
191191 node_metrics. accumulate ( & mut vec) ;
192192 }
193193
194- (
194+ Ok ( (
195195 active_ingestors,
196196 offline_ingestors,
197197 node_metrics. stream_count ,
198198 node_metrics. total_events_count ,
199199 node_metrics. total_json_bytes ,
200200 node_metrics. total_parquet_bytes ,
201- )
201+ ) )
202202}
203203
204204async fn build_metrics ( ) -> HashMap < String , Value > {
@@ -220,14 +220,23 @@ async fn build_metrics() -> HashMap<String, Value> {
220220 metrics
221221}
222222
223- pub fn init_analytics_scheduler ( ) {
223+ pub fn init_analytics_scheduler ( ) -> anyhow :: Result < ( ) > {
224224 log:: info!( "Setting up schedular for anonymous user analytics" ) ;
225225
226226 let mut scheduler = AsyncScheduler :: new ( ) ;
227227 scheduler
228228 . every ( ANALYTICS_SEND_INTERVAL_SECONDS )
229229 . run ( move || async {
230- Report :: new ( ) . await . send ( ) . await ;
230+ Report :: new ( )
231+ . await
232+ . unwrap_or_else ( |err| {
233+ // panicing because seperate thread
234+ // TODO: a better way to handle this
235+ log:: error!( "Error while sending analytics: {}" , err. to_string( ) ) ;
236+ panic ! ( "{}" , err. to_string( ) ) ;
237+ } )
238+ . send ( )
239+ . await ;
231240 } ) ;
232241
233242 tokio:: spawn ( async move {
@@ -236,6 +245,8 @@ pub fn init_analytics_scheduler() {
236245 tokio:: time:: sleep ( Duration :: from_secs ( 10 ) ) . await ;
237246 }
238247 } ) ;
248+
249+ Ok ( ( ) )
239250}
240251
241252#[ derive( Serialize , Deserialize , Default , Debug ) ]
0 commit comments