@@ -22,50 +22,262 @@ use hyperlight_common::outb::OutBAction;
2222use tracing_core:: Event ;
2323use tracing_core:: span:: { Attributes , Id , Record } ;
2424
25+ use crate :: visitor:: FieldsVisitor ;
26+ use crate :: {
27+ GuestEvent , GuestSpan , MAX_FIELD_KEY_LENGTH , MAX_FIELD_VALUE_LENGTH , MAX_NAME_LENGTH ,
28+ MAX_NO_OF_EVENTS , MAX_NO_OF_FIELDS , MAX_NO_OF_SPANS , MAX_TARGET_LENGTH , invariant_tsc,
29+ } ;
30+
31+ pub struct TraceBatchInfo {
32+ /// The timestamp counter at the start of the guest execution.
33+ pub guest_start_tsc : u64 ,
34+ /// Pointer to the spans in the guest memory.
35+ pub spans_ptr : u64 ,
36+ /// Pointer to the events in the guest memory.
37+ pub events_ptr : u64 ,
38+ }
39+
40+ /// Helper type to define the guest state with the configured constants
41+ pub type GuestState = TraceState <
42+ MAX_NO_OF_SPANS ,
43+ MAX_NO_OF_EVENTS ,
44+ MAX_NAME_LENGTH ,
45+ MAX_TARGET_LENGTH ,
46+ MAX_FIELD_KEY_LENGTH ,
47+ MAX_FIELD_VALUE_LENGTH ,
48+ MAX_NO_OF_FIELDS ,
49+ > ;
50+
2551/// Internal state of the tracing subscriber
26- pub ( crate ) struct GuestState {
52+ pub ( crate ) struct TraceState <
53+ const SP : usize ,
54+ const EV : usize ,
55+ const N : usize ,
56+ const T : usize ,
57+ const FK : usize ,
58+ const FV : usize ,
59+ const F : usize ,
60+ > {
61+ /// Whether we need to cleanup the state on next access
62+ cleanup_needed : bool ,
2763 /// The timestamp counter at the start of the guest execution.
2864 guest_start_tsc : u64 ,
65+ /// Next span ID to allocate
66+ next_id : AtomicU64 ,
67+ /// All spans collected
68+ spans : hl:: Vec < GuestSpan < N , T , FK , FV , F > , SP > ,
69+ /// All events collected
70+ events : hl:: Vec < GuestEvent < N , FK , FV , F > , EV > ,
71+ /// Stack of active spans
72+ stack : hl:: Vec < u64 , SP > ,
2973}
3074
31- impl GuestState {
75+ impl <
76+ const SP : usize ,
77+ const EV : usize ,
78+ const N : usize ,
79+ const T : usize ,
80+ const FK : usize ,
81+ const FV : usize ,
82+ const F : usize ,
83+ > TraceState < SP , EV , N , T , FK , FV , F >
84+ {
3285 pub ( crate ) fn new ( guest_start_tsc : u64 ) -> Self {
33- Self { guest_start_tsc }
86+ Self {
87+ cleanup_needed : false ,
88+ guest_start_tsc,
89+ next_id : AtomicU64 :: new ( 1 ) ,
90+ spans : hl:: Vec :: new ( ) ,
91+ stack : hl:: Vec :: new ( ) ,
92+ events : hl:: Vec :: new ( ) ,
93+ }
94+ }
95+
96+ pub ( crate ) fn alloc_id ( & self ) -> ( u64 , Id ) {
97+ let n = self . next_id . load ( Ordering :: Relaxed ) ;
98+ self . next_id . store ( n + 1 , Ordering :: Relaxed ) ;
99+
100+ ( n, Id :: from_u64 ( n) )
101+ }
102+
103+ /// Cleanup internal state by removing closed spans and events
104+ /// This ensures that after a VM exit, we keep the spans that
105+ /// are still active (in the stack) and remove all other spans and events.
106+ pub fn clean ( & mut self ) {
107+ // Remove all spans that have an end timestamp (closed spans)
108+ self . spans . retain ( |s| s. end_tsc . is_none ( ) ) ;
109+
110+ // Remove all events
111+ self . events . clear ( ) ;
112+ }
113+
114+ #[ inline( always) ]
115+ fn verify_and_clean ( & mut self ) {
116+ if self . cleanup_needed {
117+ self . clean ( ) ;
118+ self . cleanup_needed = false ;
119+ }
120+ }
121+
122+ /// Triggers a VM exit to flush the current spans to the host.
123+ /// This also clears the internal state to start fresh.
124+ fn send_to_host ( & mut self ) {
125+ let guest_start_tsc = self . guest_start_tsc ;
126+ let spans_ptr = & self . spans as * const _ as u64 ;
127+ let events_ptr = & self . events as * const _ as u64 ;
128+
129+ unsafe {
130+ core:: arch:: asm!( "out dx, al" ,
131+ // Port value for tracing
132+ in( "dx" ) OutBAction :: TraceBatch as u16 ,
133+ // Additional magic number to identify the action
134+ in( "r8" ) OutBAction :: TraceBatch as u64 ,
135+ in( "r9" ) guest_start_tsc,
136+ in( "r10" ) spans_ptr,
137+ in( "r11" ) events_ptr,
138+ ) ;
139+ }
140+
141+ self . clean ( ) ;
34142 }
35143
36144 /// Set a new guest start tsc
37145 pub ( crate ) fn set_start_tsc ( & mut self , guest_start_tsc : u64 ) {
38146 self . guest_start_tsc = guest_start_tsc;
39147 }
40148
149+ /// Closes the trace by ending all spans
150+ /// NOTE: This expects an outb call to send the spans to the host.
151+ pub ( crate ) fn end_trace ( & mut self ) {
152+ for span in self . spans . iter_mut ( ) {
153+ if span. end_tsc . is_none ( ) {
154+ span. end_tsc = Some ( invariant_tsc:: read_tsc ( ) ) ;
155+ }
156+ }
157+
158+ // Empty the stack
159+ while self . stack . pop ( ) . is_some ( ) {
160+ // Pop all remaining spans from the stack
161+ }
162+
163+ // Mark for clearing when re-entering the VM because we might
164+ // not enter on the same place as we exited (e.g. halt)
165+ self . cleanup_needed = true ;
166+ }
167+
168+ /// Returns information about the information needed by the host to read the spans.
169+ pub ( crate ) fn guest_trace_info ( & mut self ) -> TraceBatchInfo {
170+ TraceBatchInfo {
171+ guest_start_tsc : self . guest_start_tsc ,
172+ spans_ptr : & self . spans as * const _ as u64 ,
173+ events_ptr : & self . events as * const _ as u64 ,
174+ }
175+ }
176+
41177 /// Create a new span and push it on the stack
42178 pub ( crate ) fn new_span ( & mut self , attrs : & Attributes ) -> Id {
43- unimplemented ! ( )
179+ self . verify_and_clean ( ) ;
180+ let ( idn, id) = self . alloc_id ( ) ;
181+
182+ let md = attrs. metadata ( ) ;
183+ let mut name = hl:: String :: < N > :: new ( ) ;
184+ let mut target = hl:: String :: < T > :: new ( ) ;
185+ // Shorten name and target if they are bigger than the space allocated
186+ let _ = name. push_str ( & md. name ( ) [ ..usize:: min ( md. name ( ) . len ( ) , name. capacity ( ) ) ] ) ;
187+ let _ = target. push_str ( & md. target ( ) [ ..usize:: min ( md. target ( ) . len ( ) , target. capacity ( ) ) ] ) ;
188+
189+ // Visit fields to collect them
190+ let mut fields = hl:: Vec :: new ( ) ;
191+ attrs. record ( & mut FieldsVisitor :: < FK , FV , F > { out : & mut fields } ) ;
192+
193+ // Find parent from current stack top (if any)
194+ let parent_id = self . stack . last ( ) . copied ( ) ;
195+
196+ let span = GuestSpan :: < N , T , FK , FV , F > {
197+ id : idn,
198+ parent_id,
199+ level : ( * md. level ( ) ) . into ( ) ,
200+ name,
201+ target,
202+ start_tsc : invariant_tsc:: read_tsc ( ) ,
203+ end_tsc : None ,
204+ fields,
205+ } ;
206+
207+ let spans = & mut self . spans ;
208+ // Should never fail because we flush when full
209+ let _ = spans. push ( span) ;
210+
211+ // In case the spans Vec is full, we need to report them to the host
212+ if spans. len ( ) == spans. capacity ( ) {
213+ self . send_to_host ( ) ;
214+ }
215+
216+ id
44217 }
45218
46219 /// Record an event in the current span (top of the stack)
47220 pub ( crate ) fn event ( & mut self , event : & Event < ' _ > ) {
48- unimplemented ! ( )
221+ self . verify_and_clean ( ) ;
222+ let stack = & mut self . stack ;
223+ let parent_id = stack. last ( ) . copied ( ) . unwrap_or ( 0 ) ;
224+
225+ let md = event. metadata ( ) ;
226+ let mut name = hl:: String :: < N > :: new ( ) ;
227+ // Shorten name and target if they are bigger than the space allocated
228+ let _ = name. push_str ( & md. name ( ) [ ..usize:: min ( md. name ( ) . len ( ) , name. capacity ( ) ) ] ) ;
229+
230+ let mut fields = hl:: Vec :: new ( ) ;
231+ event. record ( & mut FieldsVisitor :: < FK , FV , F > { out : & mut fields } ) ;
232+
233+ let ev = GuestEvent {
234+ parent_id,
235+ level : ( * md. level ( ) ) . into ( ) ,
236+ name,
237+ tsc : invariant_tsc:: read_tsc ( ) ,
238+ fields,
239+ } ;
240+
241+ // Should never fail because we flush when full
242+ let _ = self . events . push ( ev) ;
243+
244+ // Flush buffer to host if full
245+ if self . events . len ( ) >= self . events . capacity ( ) {
246+ self . send_to_host ( ) ;
247+ }
49248 }
50249
51250 /// Record new values for an existing span
52251 pub ( crate ) fn record ( & mut self , id : & Id , values : & Record < ' _ > ) {
53- unimplemented ! ( )
252+ let spans = & mut self . spans ;
253+ if let Some ( s) = spans. iter_mut ( ) . find ( |s| s. id == id. into_u64 ( ) ) {
254+ let mut v = hl:: Vec :: new ( ) ;
255+ values. record ( & mut FieldsVisitor :: < FK , FV , F > { out : & mut v } ) ;
256+ s. fields . extend ( v) ;
257+ }
54258 }
55259
56260 /// Enter a span (push it on the stack)
57261 pub ( crate ) fn enter ( & mut self , id : & Id ) {
58- unimplemented ! ( )
262+ let st = & mut self . stack ;
263+ let _ = st. push ( id. into_u64 ( ) ) ;
59264 }
60265
61266 /// Exit a span (pop it from the stack)
62267 pub ( crate ) fn exit ( & mut self , _id : & Id ) {
63- unimplemented ! ( )
268+ let st = & mut self . stack ;
269+ let _ = st. pop ( ) ;
64270 }
65271
66272 /// Try to close a span by ID, returning true if successful
67273 /// Records the end timestamp for the span.
68274 pub ( crate ) fn try_close ( & mut self , id : Id ) -> bool {
69- unimplemented ! ( )
275+ let spans = & mut self . spans ;
276+ if let Some ( s) = spans. iter_mut ( ) . find ( |s| s. id == id. into_u64 ( ) ) {
277+ s. end_tsc = Some ( invariant_tsc:: read_tsc ( ) ) ;
278+ true
279+ } else {
280+ false
281+ }
70282 }
71283}
0 commit comments