mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-04 20:37:15 +02:00
Move crates under a sub folder to clean up the code
This commit is contained in:
parent
30f3c30389
commit
9c1e54a2c8
1062 changed files with 19 additions and 20 deletions
451
crates/tracing-trace/src/processor/firefox_profiler.rs
Normal file
451
crates/tracing-trace/src/processor/firefox_profiler.rs
Normal file
|
@ -0,0 +1,451 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use fxprof_processed_profile::{
|
||||
CategoryHandle, CategoryPairHandle, CounterHandle, CpuDelta, Frame, FrameFlags, FrameInfo,
|
||||
MarkerDynamicField, MarkerFieldFormat, MarkerLocation, MarkerSchema, MarkerSchemaField,
|
||||
ProcessHandle, Profile, ProfilerMarker, ReferenceTimestamp, SamplingInterval, StringHandle,
|
||||
Timestamp,
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::entry::{
|
||||
Entry, Event, MemoryStats, NewCallsite, NewSpan, ResourceId, SpanClose, SpanEnter, SpanExit,
|
||||
SpanId,
|
||||
};
|
||||
use crate::{Error, TraceReader};
|
||||
|
||||
pub fn to_firefox_profile<R: std::io::Read>(
|
||||
trace: TraceReader<R>,
|
||||
app: &str,
|
||||
) -> Result<Profile, Error> {
|
||||
let mut profile = Profile::new(
|
||||
app,
|
||||
ReferenceTimestamp::from_millis_since_unix_epoch(0.0),
|
||||
SamplingInterval::from_nanos(15),
|
||||
);
|
||||
|
||||
let mut last_timestamp = Timestamp::from_nanos_since_reference(0);
|
||||
let main = profile.add_process(app, 0, last_timestamp);
|
||||
|
||||
let mut calls = HashMap::new();
|
||||
let mut threads = HashMap::new();
|
||||
let mut spans = HashMap::new();
|
||||
|
||||
let category = profile.add_category("general", fxprof_processed_profile::CategoryColor::Blue);
|
||||
let subcategory = profile.add_subcategory(category, "subcategory");
|
||||
|
||||
let mut last_memory = MemoryStats::default();
|
||||
|
||||
let mut memory_counters = None;
|
||||
|
||||
for entry in trace {
|
||||
let entry = entry?;
|
||||
match entry {
|
||||
Entry::NewCallsite(callsite) => {
|
||||
let string_handle = profile.intern_string(callsite.name.as_ref());
|
||||
calls.insert(callsite.call_id, (callsite, string_handle));
|
||||
}
|
||||
Entry::NewThread(thread) => {
|
||||
let thread_handle = profile.add_thread(
|
||||
main,
|
||||
thread.thread_id.to_usize() as u32,
|
||||
last_timestamp,
|
||||
threads.is_empty(),
|
||||
);
|
||||
if let Some(name) = &thread.name {
|
||||
profile.set_thread_name(thread_handle, name)
|
||||
}
|
||||
threads.insert(thread.thread_id, thread_handle);
|
||||
}
|
||||
Entry::NewSpan(span) => {
|
||||
spans.insert(span.id, (span, SpanStatus::Outside));
|
||||
}
|
||||
Entry::SpanEnter(SpanEnter { id, time, memory }) => {
|
||||
let (_span, status) = spans.get_mut(&id).unwrap();
|
||||
|
||||
let SpanStatus::Outside = status else {
|
||||
continue;
|
||||
};
|
||||
|
||||
*status = SpanStatus::Inside { time, memory };
|
||||
|
||||
last_timestamp = Timestamp::from_nanos_since_reference(time.as_nanos() as u64);
|
||||
|
||||
add_memory_samples(
|
||||
&mut profile,
|
||||
main,
|
||||
memory,
|
||||
last_timestamp,
|
||||
&mut memory_counters,
|
||||
&mut last_memory,
|
||||
);
|
||||
}
|
||||
Entry::SpanExit(SpanExit { id, time, memory }) => {
|
||||
let (span, status) = spans.get_mut(&id).unwrap();
|
||||
|
||||
let SpanStatus::Inside { time: begin, memory: begin_memory } = status else {
|
||||
continue;
|
||||
};
|
||||
last_timestamp = Timestamp::from_nanos_since_reference(time.as_nanos() as u64);
|
||||
|
||||
let begin = *begin;
|
||||
let begin_memory = *begin_memory;
|
||||
|
||||
*status = SpanStatus::Outside;
|
||||
|
||||
let span = *span;
|
||||
let thread_handle = threads.get(&span.thread_id).unwrap();
|
||||
|
||||
let frames = make_frames(span, &spans, &calls, subcategory);
|
||||
|
||||
profile.add_sample(
|
||||
*thread_handle,
|
||||
to_timestamp(begin),
|
||||
frames.iter().rev().cloned(),
|
||||
CpuDelta::ZERO,
|
||||
1,
|
||||
);
|
||||
profile.add_sample(
|
||||
*thread_handle,
|
||||
to_timestamp(time),
|
||||
frames.iter().rev().cloned(),
|
||||
CpuDelta::from_nanos((time - begin).as_nanos() as u64),
|
||||
1,
|
||||
);
|
||||
|
||||
add_memory_samples(
|
||||
&mut profile,
|
||||
main,
|
||||
memory,
|
||||
last_timestamp,
|
||||
&mut memory_counters,
|
||||
&mut last_memory,
|
||||
);
|
||||
|
||||
let (callsite, _) = calls.get(&span.call_id).unwrap();
|
||||
|
||||
let memory_delta =
|
||||
begin_memory.zip(memory).and_then(|(begin, end)| end.checked_sub(begin));
|
||||
let marker = SpanMarker { callsite, span: &span, memory_delta };
|
||||
|
||||
profile.add_marker_with_stack(
|
||||
*thread_handle,
|
||||
CategoryHandle::OTHER,
|
||||
&callsite.name,
|
||||
marker,
|
||||
fxprof_processed_profile::MarkerTiming::Interval(
|
||||
to_timestamp(begin),
|
||||
to_timestamp(time),
|
||||
),
|
||||
frames.iter().rev().cloned(),
|
||||
)
|
||||
}
|
||||
Entry::Event(event) => {
|
||||
let span = event
|
||||
.parent_id
|
||||
.as_ref()
|
||||
.and_then(|parent_id| spans.get(parent_id))
|
||||
.and_then(|(span, status)| match status {
|
||||
SpanStatus::Outside => None,
|
||||
SpanStatus::Inside { .. } => Some(span),
|
||||
})
|
||||
.copied();
|
||||
let timestamp = to_timestamp(event.time);
|
||||
|
||||
let thread_handle = threads.get(&event.thread_id).unwrap();
|
||||
|
||||
let frames = span
|
||||
.map(|span| make_frames(span, &spans, &calls, subcategory))
|
||||
.unwrap_or_default();
|
||||
|
||||
profile.add_sample(
|
||||
*thread_handle,
|
||||
timestamp,
|
||||
frames.iter().rev().cloned(),
|
||||
CpuDelta::ZERO,
|
||||
1,
|
||||
);
|
||||
|
||||
let memory_delta = add_memory_samples(
|
||||
&mut profile,
|
||||
main,
|
||||
event.memory,
|
||||
last_timestamp,
|
||||
&mut memory_counters,
|
||||
&mut last_memory,
|
||||
);
|
||||
|
||||
let (callsite, _) = calls.get(&event.call_id).unwrap();
|
||||
|
||||
let marker = EventMarker { callsite, event: &event, memory_delta };
|
||||
|
||||
profile.add_marker_with_stack(
|
||||
*thread_handle,
|
||||
CategoryHandle::OTHER,
|
||||
&callsite.name,
|
||||
marker,
|
||||
fxprof_processed_profile::MarkerTiming::Instant(timestamp),
|
||||
frames.iter().rev().cloned(),
|
||||
);
|
||||
|
||||
last_timestamp = timestamp;
|
||||
}
|
||||
Entry::SpanClose(SpanClose { id, time }) => {
|
||||
spans.remove(&id);
|
||||
last_timestamp = to_timestamp(time);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(profile)
|
||||
}
|
||||
|
||||
struct MemoryCounterHandles {
|
||||
usage: CounterHandle,
|
||||
}
|
||||
|
||||
impl MemoryCounterHandles {
|
||||
fn new(profile: &mut Profile, main: ProcessHandle) -> Self {
|
||||
let usage =
|
||||
profile.add_counter(main, "mimmalloc", "Memory", "Amount of memory currently in use");
|
||||
Self { usage }
|
||||
}
|
||||
}
|
||||
|
||||
fn add_memory_samples(
|
||||
profile: &mut Profile,
|
||||
main: ProcessHandle,
|
||||
memory: Option<MemoryStats>,
|
||||
last_timestamp: Timestamp,
|
||||
memory_counters: &mut Option<MemoryCounterHandles>,
|
||||
last_memory: &mut MemoryStats,
|
||||
) -> Option<MemoryStats> {
|
||||
let stats = memory?;
|
||||
|
||||
let memory_counters =
|
||||
memory_counters.get_or_insert_with(|| MemoryCounterHandles::new(profile, main));
|
||||
|
||||
profile.add_counter_sample(
|
||||
memory_counters.usage,
|
||||
last_timestamp,
|
||||
stats.resident as f64 - last_memory.resident as f64,
|
||||
0,
|
||||
);
|
||||
|
||||
let delta = stats.checked_sub(*last_memory);
|
||||
*last_memory = stats;
|
||||
delta
|
||||
}
|
||||
|
||||
fn to_timestamp(time: std::time::Duration) -> Timestamp {
|
||||
Timestamp::from_nanos_since_reference(time.as_nanos() as u64)
|
||||
}
|
||||
|
||||
fn make_frames(
|
||||
span: NewSpan,
|
||||
spans: &HashMap<SpanId, (NewSpan, SpanStatus)>,
|
||||
calls: &HashMap<ResourceId, (NewCallsite, StringHandle)>,
|
||||
subcategory: CategoryPairHandle,
|
||||
) -> Vec<FrameInfo> {
|
||||
let mut frames = Vec::new();
|
||||
let mut current_span = span;
|
||||
loop {
|
||||
let frame = make_frame(current_span, calls, subcategory);
|
||||
frames.push(frame);
|
||||
if let Some(parent) = current_span.parent_id {
|
||||
current_span = spans.get(&parent).unwrap().0;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
frames
|
||||
}
|
||||
|
||||
fn make_frame(
|
||||
span: NewSpan,
|
||||
calls: &HashMap<ResourceId, (NewCallsite, StringHandle)>,
|
||||
subcategory: CategoryPairHandle,
|
||||
) -> FrameInfo {
|
||||
let (_, call) = calls.get(&span.call_id).unwrap();
|
||||
FrameInfo { frame: Frame::Label(*call), category_pair: subcategory, flags: FrameFlags::empty() }
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum SpanStatus {
|
||||
Outside,
|
||||
Inside { time: std::time::Duration, memory: Option<MemoryStats> },
|
||||
}
|
||||
|
||||
struct SpanMarker<'a> {
|
||||
span: &'a NewSpan,
|
||||
callsite: &'a NewCallsite,
|
||||
memory_delta: Option<MemoryStats>,
|
||||
}
|
||||
|
||||
impl<'a> ProfilerMarker for SpanMarker<'a> {
|
||||
const MARKER_TYPE_NAME: &'static str = "span";
|
||||
|
||||
fn schema() -> MarkerSchema {
|
||||
let fields = vec![
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "filename",
|
||||
label: "File name",
|
||||
format: MarkerFieldFormat::FilePath,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "line",
|
||||
label: "Line",
|
||||
format: MarkerFieldFormat::Integer,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "module_path",
|
||||
label: "Module path",
|
||||
format: MarkerFieldFormat::String,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "span_id",
|
||||
label: "Span ID",
|
||||
format: MarkerFieldFormat::Integer,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "thread_id",
|
||||
label: "Thread ID",
|
||||
format: MarkerFieldFormat::Integer,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "resident",
|
||||
label: "Resident set size, measured in bytes while this function was executing",
|
||||
format: MarkerFieldFormat::Bytes,
|
||||
searchable: false,
|
||||
}),
|
||||
];
|
||||
|
||||
MarkerSchema {
|
||||
type_name: Self::MARKER_TYPE_NAME,
|
||||
locations: vec![
|
||||
MarkerLocation::MarkerTable,
|
||||
MarkerLocation::MarkerChart,
|
||||
MarkerLocation::TimelineOverview,
|
||||
],
|
||||
chart_label: None,
|
||||
tooltip_label: Some("{marker.name} - {marker.data.filename}:{marker.data.line}"),
|
||||
table_label: Some("{marker.data.filename}:{marker.data.line}"),
|
||||
fields,
|
||||
}
|
||||
}
|
||||
|
||||
fn json_marker_data(&self) -> serde_json::Value {
|
||||
let filename = self.callsite.file.as_deref();
|
||||
let line = self.callsite.line;
|
||||
let module_path = self.callsite.module_path.as_deref();
|
||||
let span_id = self.span.id;
|
||||
let thread_id = self.span.thread_id;
|
||||
|
||||
let mut value = json!({
|
||||
"type": Self::MARKER_TYPE_NAME,
|
||||
"filename": filename,
|
||||
"line": line,
|
||||
"module_path": module_path,
|
||||
"span_id": span_id,
|
||||
"thread_id": thread_id,
|
||||
});
|
||||
|
||||
if let Some(MemoryStats { resident }) = self.memory_delta {
|
||||
value["resident"] = json!(resident);
|
||||
}
|
||||
|
||||
value
|
||||
}
|
||||
}
|
||||
|
||||
struct EventMarker<'a> {
|
||||
event: &'a Event,
|
||||
callsite: &'a NewCallsite,
|
||||
memory_delta: Option<MemoryStats>,
|
||||
}
|
||||
|
||||
impl<'a> ProfilerMarker for EventMarker<'a> {
|
||||
const MARKER_TYPE_NAME: &'static str = "tracing-event";
|
||||
|
||||
fn schema() -> MarkerSchema {
|
||||
let fields = vec![
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "filename",
|
||||
label: "File name",
|
||||
format: MarkerFieldFormat::FilePath,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "line",
|
||||
label: "Line",
|
||||
format: MarkerFieldFormat::Integer,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "module_path",
|
||||
label: "Module path",
|
||||
format: MarkerFieldFormat::String,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "parent_span_id",
|
||||
label: "Parent Span ID",
|
||||
format: MarkerFieldFormat::Integer,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "thread_id",
|
||||
label: "Thread ID",
|
||||
format: MarkerFieldFormat::Integer,
|
||||
searchable: true,
|
||||
}),
|
||||
MarkerSchemaField::Dynamic(MarkerDynamicField {
|
||||
key: "resident",
|
||||
label: "Resident set size, measured in bytes while this function was executing",
|
||||
format: MarkerFieldFormat::Bytes,
|
||||
searchable: false,
|
||||
}),
|
||||
];
|
||||
|
||||
MarkerSchema {
|
||||
type_name: Self::MARKER_TYPE_NAME,
|
||||
locations: vec![
|
||||
MarkerLocation::MarkerTable,
|
||||
MarkerLocation::MarkerChart,
|
||||
MarkerLocation::TimelineOverview,
|
||||
],
|
||||
chart_label: None,
|
||||
tooltip_label: Some("{marker.name} - {marker.data.filename}:{marker.data.line}"),
|
||||
table_label: Some("{marker.data.filename}:{marker.data.line}"),
|
||||
fields,
|
||||
}
|
||||
}
|
||||
|
||||
fn json_marker_data(&self) -> serde_json::Value {
|
||||
let filename = self.callsite.file.as_deref();
|
||||
let line = self.callsite.line;
|
||||
let module_path = self.callsite.module_path.as_deref();
|
||||
let span_id = self.event.parent_id;
|
||||
let thread_id = self.event.thread_id;
|
||||
|
||||
let mut value = json!({
|
||||
"type": Self::MARKER_TYPE_NAME,
|
||||
"filename": filename,
|
||||
"line": line,
|
||||
"module_path": module_path,
|
||||
"parent_span_id": span_id,
|
||||
"thread_id": thread_id,
|
||||
});
|
||||
|
||||
if let Some(MemoryStats { resident }) = self.memory_delta {
|
||||
value["resident"] = json!(resident);
|
||||
}
|
||||
|
||||
value
|
||||
}
|
||||
}
|
197
crates/tracing-trace/src/processor/fmt.rs
Normal file
197
crates/tracing-trace/src/processor/fmt.rs
Normal file
|
@ -0,0 +1,197 @@
|
|||
use std::collections::HashMap;
|
||||
use std::io::Read;
|
||||
|
||||
use byte_unit::UnitType;
|
||||
|
||||
use crate::entry::{
|
||||
Entry, Event, MemoryStats, NewCallsite, NewSpan, NewThread, ResourceId, SpanClose, SpanEnter,
|
||||
SpanExit, SpanId,
|
||||
};
|
||||
use crate::{Error, TraceReader};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum SpanStatus {
|
||||
Outside,
|
||||
Inside(std::time::Duration),
|
||||
}
|
||||
|
||||
pub fn print_trace<R: Read>(trace: TraceReader<R>) -> Result<(), Error> {
|
||||
let mut calls = HashMap::new();
|
||||
let mut threads = HashMap::new();
|
||||
let mut spans = HashMap::new();
|
||||
for entry in trace {
|
||||
let entry = entry?;
|
||||
match entry {
|
||||
Entry::NewCallsite(callsite) => {
|
||||
calls.insert(callsite.call_id, callsite);
|
||||
}
|
||||
Entry::NewThread(NewThread { thread_id, name }) => {
|
||||
threads.insert(thread_id, name);
|
||||
}
|
||||
Entry::NewSpan(span) => {
|
||||
spans.insert(span.id, (span, SpanStatus::Outside));
|
||||
}
|
||||
Entry::SpanEnter(SpanEnter { id, time, memory }) => {
|
||||
let (span, status) = spans.get_mut(&id).unwrap();
|
||||
|
||||
let SpanStatus::Outside = status else {
|
||||
continue;
|
||||
};
|
||||
|
||||
*status = SpanStatus::Inside(time);
|
||||
|
||||
let span = *span;
|
||||
|
||||
match memory {
|
||||
Some(stats) => println!(
|
||||
"[{}]{}::{} ({}) <-",
|
||||
print_thread(&threads, span.thread_id),
|
||||
print_backtrace(&spans, &calls, &span),
|
||||
print_span(&calls, &span),
|
||||
print_memory(stats),
|
||||
),
|
||||
None => println!(
|
||||
"[{}]{}::{} <-",
|
||||
print_thread(&threads, span.thread_id),
|
||||
print_backtrace(&spans, &calls, &span),
|
||||
print_span(&calls, &span),
|
||||
),
|
||||
}
|
||||
}
|
||||
Entry::SpanExit(SpanExit { id, time, memory }) => {
|
||||
let (span, status) = spans.get_mut(&id).unwrap();
|
||||
|
||||
let SpanStatus::Inside(begin) = status else {
|
||||
continue;
|
||||
};
|
||||
let begin = *begin;
|
||||
|
||||
*status = SpanStatus::Outside;
|
||||
|
||||
let span = *span;
|
||||
|
||||
match memory {
|
||||
Some(stats) => println!(
|
||||
"[{}]{}::{} ({}) -> {}",
|
||||
print_thread(&threads, span.thread_id),
|
||||
print_backtrace(&spans, &calls, &span),
|
||||
print_span(&calls, &span),
|
||||
print_memory(stats),
|
||||
print_duration(time - begin),
|
||||
),
|
||||
None => println!(
|
||||
"[{}]{}::{} -> {}",
|
||||
print_thread(&threads, span.thread_id),
|
||||
print_backtrace(&spans, &calls, &span),
|
||||
print_span(&calls, &span),
|
||||
print_duration(time - begin),
|
||||
),
|
||||
}
|
||||
}
|
||||
Entry::SpanClose(SpanClose { id, time: _ }) => {
|
||||
spans.remove(&id);
|
||||
}
|
||||
Entry::Event(Event { call_id, thread_id, parent_id, time: _, memory }) => {
|
||||
let parent_span = parent_id.and_then(|parent_id| spans.get(&parent_id)).and_then(
|
||||
|(span, status)| match status {
|
||||
SpanStatus::Outside => None,
|
||||
SpanStatus::Inside(_) => Some(span),
|
||||
},
|
||||
);
|
||||
match (parent_span, memory) {
|
||||
(Some(parent_span), Some(stats)) => println!(
|
||||
"[{}]{}::{} ({}) event: {}",
|
||||
print_thread(&threads, thread_id),
|
||||
print_backtrace(&spans, &calls, parent_span),
|
||||
print_span(&calls, parent_span),
|
||||
print_memory(stats),
|
||||
print_call(&calls, call_id)
|
||||
),
|
||||
(Some(parent_span), None) => println!(
|
||||
"[{}]{}::{} event: {}",
|
||||
print_thread(&threads, thread_id),
|
||||
print_backtrace(&spans, &calls, parent_span),
|
||||
print_span(&calls, parent_span),
|
||||
print_call(&calls, call_id)
|
||||
),
|
||||
(None, None) => println!(
|
||||
"[{}] event: {}",
|
||||
print_thread(&threads, thread_id),
|
||||
print_call(&calls, call_id)
|
||||
),
|
||||
(None, Some(stats)) => println!(
|
||||
"[{}] ({}) event: {}",
|
||||
print_thread(&threads, thread_id),
|
||||
print_memory(stats),
|
||||
print_call(&calls, call_id)
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_thread(threads: &HashMap<ResourceId, Option<String>>, thread_id: ResourceId) -> String {
|
||||
let thread = threads.get(&thread_id).unwrap();
|
||||
let thread =
|
||||
thread.as_ref().cloned().unwrap_or_else(|| format!("ThreadId({})", thread_id.to_usize()));
|
||||
thread
|
||||
}
|
||||
|
||||
fn print_backtrace(
|
||||
spans: &HashMap<SpanId, (NewSpan, SpanStatus)>,
|
||||
calls: &HashMap<ResourceId, NewCallsite>,
|
||||
span: &NewSpan,
|
||||
) -> String {
|
||||
let mut parents = Vec::new();
|
||||
let mut current = span.parent_id;
|
||||
while let Some(current_id) = ¤t {
|
||||
let (span, _) = spans.get(current_id).unwrap();
|
||||
let callsite = calls.get(&span.call_id).unwrap();
|
||||
parents.push(callsite.name.clone());
|
||||
|
||||
current = span.parent_id;
|
||||
}
|
||||
|
||||
let x: Vec<String> = parents.into_iter().rev().map(|x| x.to_string()).collect();
|
||||
x.join("::")
|
||||
}
|
||||
|
||||
fn print_span(calls: &HashMap<ResourceId, NewCallsite>, span: &NewSpan) -> String {
|
||||
print_call(calls, span.call_id)
|
||||
}
|
||||
|
||||
fn print_call(calls: &HashMap<ResourceId, NewCallsite>, call_id: ResourceId) -> String {
|
||||
let callsite = calls.get(&call_id).unwrap();
|
||||
match (callsite.file.clone(), callsite.line) {
|
||||
(Some(file), None) => format!("{} ({})", callsite.name, file),
|
||||
(Some(file), Some(line)) => format!("{} ({}:{})", callsite.name, file, line),
|
||||
_ => callsite.name.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn print_duration(duration: std::time::Duration) -> String {
|
||||
if duration.as_nanos() < 1000 {
|
||||
format!("{}ns", duration.as_nanos())
|
||||
} else if duration.as_micros() < 1000 {
|
||||
format!("{}μs", duration.as_micros())
|
||||
} else if duration.as_millis() < 1000 {
|
||||
format!("{}ms", duration.as_millis())
|
||||
} else if duration.as_secs() < 120 {
|
||||
format!("{}s", duration.as_secs())
|
||||
} else if duration.as_secs_f64() / 60.0 < 60.0 {
|
||||
format!("{}min", duration.as_secs_f64() / 60.0)
|
||||
} else if duration.as_secs_f64() / 3600.0 < 8.0 {
|
||||
format!("{}h", duration.as_secs_f64() / 3600.0)
|
||||
} else {
|
||||
format!("{}d", duration.as_secs_f64() / 3600.0 / 24.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Format only the allocated bytes, deallocated bytes and reallocated bytes in GiB, MiB, KiB, Bytes.
|
||||
fn print_memory(MemoryStats { resident }: MemoryStats) -> String {
|
||||
use byte_unit::Byte;
|
||||
let rss_bytes = Byte::from_u64(resident).get_appropriate_unit(UnitType::Binary);
|
||||
format!("RSS {rss_bytes:.2}")
|
||||
}
|
3
crates/tracing-trace/src/processor/mod.rs
Normal file
3
crates/tracing-trace/src/processor/mod.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
pub mod firefox_profiler;
|
||||
pub mod fmt;
|
||||
pub mod span_stats;
|
157
crates/tracing-trace/src/processor/span_stats.rs
Normal file
157
crates/tracing-trace/src/processor/span_stats.rs
Normal file
|
@ -0,0 +1,157 @@
|
|||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::ops::Range;
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::entry::{Entry, NewCallsite, SpanClose, SpanEnter, SpanExit};
|
||||
use crate::{Error, TraceReader};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum SpanStatus {
|
||||
Outside,
|
||||
Inside(std::time::Duration),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CallStats {
|
||||
pub call_count: usize,
|
||||
pub time: u64,
|
||||
pub self_time: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SelfTime {
|
||||
child_ranges: Vec<Range<Duration>>,
|
||||
}
|
||||
|
||||
impl SelfTime {
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
pub fn add_child_range(&mut self, child_range: Range<Duration>) {
|
||||
self.child_ranges.push(child_range)
|
||||
}
|
||||
|
||||
pub fn self_duration(&mut self, self_range: Range<Duration>) -> Duration {
|
||||
if self.child_ranges.is_empty() {
|
||||
return self_range.end - self_range.start;
|
||||
}
|
||||
|
||||
// by sorting child ranges by their start time,
|
||||
// we make sure that no child will start before the last one we visited.
|
||||
self.child_ranges
|
||||
.sort_by(|left, right| left.start.cmp(&right.start).then(left.end.cmp(&right.end)));
|
||||
// self duration computed by adding all the segments where the span is not executing a child
|
||||
let mut self_duration = Duration::from_nanos(0);
|
||||
|
||||
// last point in time where we are certain that this span was not executing a child.
|
||||
let mut committed_point = self_range.start;
|
||||
|
||||
for child_range in &self.child_ranges {
|
||||
if child_range.start > committed_point {
|
||||
// we add to the self duration the point between the end of the latest span and the beginning of the next span
|
||||
self_duration += child_range.start - committed_point;
|
||||
}
|
||||
if committed_point < child_range.end {
|
||||
// then we set ourselves to the end of the latest span
|
||||
committed_point = child_range.end;
|
||||
}
|
||||
}
|
||||
|
||||
self_duration
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_call_stats<R: std::io::Read>(
|
||||
trace: TraceReader<R>,
|
||||
) -> Result<BTreeMap<String, CallStats>, Error> {
|
||||
let mut calls = HashMap::new();
|
||||
let mut spans = HashMap::new();
|
||||
let mut last_point = Duration::from_nanos(0);
|
||||
let mut first_point = None;
|
||||
let mut total_self_time = SelfTime::new();
|
||||
for entry in trace {
|
||||
let entry = entry?;
|
||||
match entry {
|
||||
Entry::NewCallsite(callsite) => {
|
||||
calls.insert(callsite.call_id, (callsite, vec![]));
|
||||
}
|
||||
Entry::NewThread(_) => {}
|
||||
Entry::NewSpan(span) => {
|
||||
spans.insert(span.id, (span, SpanStatus::Outside, SelfTime::new()));
|
||||
}
|
||||
Entry::SpanEnter(SpanEnter { id, time, memory: _ }) => {
|
||||
first_point.get_or_insert(time);
|
||||
let (_, status, _) = spans.get_mut(&id).unwrap();
|
||||
|
||||
let SpanStatus::Outside = status else {
|
||||
continue;
|
||||
};
|
||||
|
||||
*status = SpanStatus::Inside(time);
|
||||
}
|
||||
Entry::SpanExit(SpanExit { id, time: end, memory: _ }) => {
|
||||
let (span, status, self_time) = spans.get_mut(&id).unwrap();
|
||||
|
||||
let SpanStatus::Inside(begin) = status else {
|
||||
continue;
|
||||
};
|
||||
let begin = *begin;
|
||||
|
||||
if last_point < end {
|
||||
last_point = end;
|
||||
}
|
||||
|
||||
*status = SpanStatus::Outside;
|
||||
|
||||
let self_range = begin..end;
|
||||
|
||||
let self_duration = self_time.self_duration(self_range.clone());
|
||||
*self_time = SelfTime::new();
|
||||
|
||||
let span = *span;
|
||||
if let Some(parent_id) = span.parent_id {
|
||||
let (_, _, parent_self_time) = spans.get_mut(&parent_id).unwrap();
|
||||
parent_self_time.add_child_range(self_range.clone())
|
||||
}
|
||||
total_self_time.add_child_range(self_range);
|
||||
let (_, call_list) = calls.get_mut(&span.call_id).unwrap();
|
||||
call_list.push((end - begin, self_duration));
|
||||
}
|
||||
Entry::SpanClose(SpanClose { id, time: _ }) => {
|
||||
spans.remove(&id);
|
||||
}
|
||||
Entry::Event(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
let total_self_time = first_point
|
||||
.map(|first_point| (first_point, total_self_time.self_duration(first_point..last_point)));
|
||||
|
||||
Ok(calls
|
||||
.into_iter()
|
||||
.map(|(_, (call_site, calls))| (site_to_string(call_site), calls_to_stats(calls)))
|
||||
.chain(total_self_time.map(|(first_point, total_self_time)| {
|
||||
(
|
||||
"::meta::total".to_string(),
|
||||
CallStats {
|
||||
call_count: 1,
|
||||
time: (last_point - first_point).as_nanos() as u64,
|
||||
self_time: total_self_time.as_nanos() as u64,
|
||||
},
|
||||
)
|
||||
}))
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn site_to_string(call_site: NewCallsite) -> String {
|
||||
format!("{}::{}", call_site.target, call_site.name)
|
||||
}
|
||||
fn calls_to_stats(calls: Vec<(Duration, Duration)>) -> CallStats {
|
||||
let nb = calls.len();
|
||||
let sum: Duration = calls.iter().map(|(total, _)| total).sum();
|
||||
let self_sum: Duration = calls.iter().map(|(_, self_duration)| self_duration).sum();
|
||||
CallStats { call_count: nb, time: sum.as_nanos() as u64, self_time: self_sum.as_nanos() as u64 }
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue