diff --git a/CHANGELOG.md b/CHANGELOG.md index e55b9a6..6116dd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## [11.0.0] - 2023-12-14 + +### Changed +- `measureme`: Update StringId and Addr sizes from u32 to u64 ([GH-216]) +- `analyzeme`: v9 file format, which uses larger events ([GH-216]) + ## [10.1.2] - 2023-12-14 ### Changed @@ -232,3 +238,4 @@ [GH-208]: https://github.com/rust-lang/measureme/pull/208 [GH-209]: https://github.com/rust-lang/measureme/pull/209 [GH-211]: https://github.com/rust-lang/measureme/pull/211 +[GH-216]: https://github.com/rust-lang/measureme/pull/216 diff --git a/analyzeme/Cargo.toml b/analyzeme/Cargo.toml index 463d732..ee30421 100644 --- a/analyzeme/Cargo.toml +++ b/analyzeme/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "analyzeme" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser ", "Michael Woerister "] edition = "2018" license = "MIT OR Apache-2.0" @@ -14,7 +14,13 @@ serde = { version = "1.0", features = ["derive"] } # Depending on older versions of this crate allows us to keep supporting older # file formats. + +# File format: v7 analyzeme_9_2_0 = { package = "analyzeme", git = "https://github.com/rust-lang/measureme", tag = "9.2.0" } +# File format: v8 +decodeme_10_1_2 = { package = "decodeme", git = "https://github.com/rust-lang/measureme", tag = "10.1.2" } +measureme_10_1_2 = { package = "measureme", git = "https://github.com/rust-lang/measureme", tag = "10.1.2" } + [dev-dependencies] flate2 = "1.0" diff --git a/analyzeme/src/file_formats/mod.rs b/analyzeme/src/file_formats/mod.rs index 2864f47..7c2e27f 100644 --- a/analyzeme/src/file_formats/mod.rs +++ b/analyzeme/src/file_formats/mod.rs @@ -3,13 +3,14 @@ use std::fmt::Debug; pub mod v7; pub mod v8; +pub mod v9; -pub use v8 as current; +pub use v9 as current; /// The [EventDecoder] knows how to decode events for a specific file format. pub trait EventDecoder: Debug + Send + Sync { fn num_events(&self) -> usize; - fn metadata(&self) -> &Metadata; + fn metadata(&self) -> Metadata; fn decode_full_event<'a>(&'a self, event_index: usize) -> Event<'a>; fn decode_lightweight_event<'a>(&'a self, event_index: usize) -> LightweightEvent; } diff --git a/analyzeme/src/file_formats/v7.rs b/analyzeme/src/file_formats/v7.rs index 995853d..57e4570 100644 --- a/analyzeme/src/file_formats/v7.rs +++ b/analyzeme/src/file_formats/v7.rs @@ -41,8 +41,8 @@ impl super::EventDecoder for EventDecoder { self.legacy_profiling_data.num_events() } - fn metadata(&self) -> &Metadata { - &self.metadata + fn metadata(&self) -> Metadata { + self.metadata.clone() } fn decode_full_event(&self, event_index: usize) -> Event<'_> { diff --git a/analyzeme/src/file_formats/v8.rs b/analyzeme/src/file_formats/v8.rs index 4ea4c9c..23d01fc 100644 --- a/analyzeme/src/file_formats/v8.rs +++ b/analyzeme/src/file_formats/v8.rs @@ -1,26 +1,84 @@ //! This module implements file loading for the v8 file format used until -//! crate version 10.0.0 +//! crate version 10.0.0. +//! +//! The difference from v8 to v9 copes with the expansion of StringId and Addr +//! types from u32 to u64. Most of the EventDecoder interface is actually +//! unchanged, but the construction of "EventDecoder::new", which parses +//! the stream of events, varies based on these sizes. +//! +//! This file provides conversions to current interfaces, relying on an +//! old version of this crate to parse the u32-based v8 version. -use crate::{Event, LightweightEvent}; -pub use decodeme::EventDecoder; +use crate::{Event, EventPayload, LightweightEvent, Timestamp}; use decodeme::Metadata; +use decodeme_10_1_2::event_payload::EventPayload as OldEventPayload; +use decodeme_10_1_2::event_payload::Timestamp as OldTimestamp; +use decodeme_10_1_2::lightweight_event::LightweightEvent as OldLightweightEvent; +pub use decodeme_10_1_2::EventDecoder; +use decodeme_10_1_2::Metadata as OldMetadata; -pub const FILE_FORMAT: u32 = decodeme::CURRENT_FILE_FORMAT_VERSION; +pub const FILE_FORMAT: u32 = measureme_10_1_2::file_header::CURRENT_FILE_FORMAT_VERSION; + +// NOTE: These are functionally a hand-rolled "impl From -> New", but +// given orphan rules, it seems undesirable to spread version-specific +// converters around the codebase. +// +// In lieu of an idiomatic type conversion, we at least centralize compatibility +// with the old "v8" version to this file. + +fn v8_metadata_as_current(old: &OldMetadata) -> Metadata { + Metadata { + start_time: old.start_time, + process_id: old.process_id, + cmd: old.cmd.clone(), + } +} + +fn v8_timestamp_as_current(old: OldTimestamp) -> Timestamp { + match old { + OldTimestamp::Interval { start, end } => Timestamp::Interval { start, end }, + OldTimestamp::Instant(t) => Timestamp::Instant(t), + } +} + +fn v8_event_payload_as_current(old: OldEventPayload) -> EventPayload { + match old { + OldEventPayload::Timestamp(t) => EventPayload::Timestamp(v8_timestamp_as_current(t)), + OldEventPayload::Integer(t) => EventPayload::Integer(t), + } +} + +fn v8_lightweightevent_as_current(old: OldLightweightEvent) -> LightweightEvent { + LightweightEvent { + event_index: old.event_index, + thread_id: old.thread_id, + payload: v8_event_payload_as_current(old.payload), + } +} impl super::EventDecoder for EventDecoder { fn num_events(&self) -> usize { self.num_events() } - fn metadata(&self) -> &Metadata { - self.metadata() + fn metadata(&self) -> Metadata { + let old = self.metadata(); + v8_metadata_as_current(&old) } fn decode_full_event(&self, event_index: usize) -> Event<'_> { - self.decode_full_event(event_index) + let old = self.decode_full_event(event_index); + + Event { + event_kind: old.event_kind, + label: old.label, + additional_data: old.additional_data, + payload: v8_event_payload_as_current(old.payload), + thread_id: old.thread_id, + } } fn decode_lightweight_event(&self, event_index: usize) -> LightweightEvent { - self.decode_lightweight_event(event_index) + v8_lightweightevent_as_current(self.decode_lightweight_event(event_index)) } } diff --git a/analyzeme/src/file_formats/v9.rs b/analyzeme/src/file_formats/v9.rs new file mode 100644 index 0000000..762bc2d --- /dev/null +++ b/analyzeme/src/file_formats/v9.rs @@ -0,0 +1,25 @@ +//! This module implements file loading for the v9 file format + +use crate::{Event, LightweightEvent}; +pub use decodeme::EventDecoder; +use decodeme::Metadata; + +pub const FILE_FORMAT: u32 = decodeme::CURRENT_FILE_FORMAT_VERSION; + +impl super::EventDecoder for EventDecoder { + fn num_events(&self) -> usize { + self.num_events() + } + + fn metadata(&self) -> Metadata { + self.metadata() + } + + fn decode_full_event(&self, event_index: usize) -> Event<'_> { + self.decode_full_event(event_index) + } + + fn decode_lightweight_event(&self, event_index: usize) -> LightweightEvent { + self.decode_lightweight_event(event_index) + } +} diff --git a/analyzeme/src/profiling_data.rs b/analyzeme/src/profiling_data.rs index 90e572e..d8fda6b 100644 --- a/analyzeme/src/profiling_data.rs +++ b/analyzeme/src/profiling_data.rs @@ -7,6 +7,7 @@ use measureme::file_header::{ use measureme::{ EventId, PageTag, RawEvent, SerializationSink, SerializationSinkBuilder, StringTableBuilder, }; +use std::cell::OnceCell; use std::fs; use std::path::Path; use std::sync::Arc; @@ -15,6 +16,7 @@ use std::{error::Error, path::PathBuf}; #[derive(Debug)] pub struct ProfilingData { event_decoder: Box, + metadata: OnceCell, } impl ProfilingData { @@ -50,9 +52,6 @@ impl ProfilingData { data: Vec, diagnostic_file_path: Option<&Path>, ) -> Result> { - // let event_decoder = EventDecoder::new(data, diagnostic_file_path)?; - // Ok(ProfilingData { event_decoder }) - let file_format_version = read_file_header( &data, FILE_MAGIC_TOP_LEVEL, @@ -66,6 +65,10 @@ impl ProfilingData { data, diagnostic_file_path, )?), + file_formats::v9::FILE_FORMAT => Box::new(file_formats::v9::EventDecoder::new( + data, + diagnostic_file_path, + )?), unsupported_version => { let msg = if unsupported_version > file_formats::current::FILE_FORMAT { format!( @@ -83,11 +86,15 @@ impl ProfilingData { } }; - Ok(ProfilingData { event_decoder }) + Ok(ProfilingData { + event_decoder, + metadata: OnceCell::new(), + }) } pub fn metadata(&self) -> &Metadata { - self.event_decoder.metadata() + // Cache the metadata during the first access + self.metadata.get_or_init(|| self.event_decoder.metadata()) } pub fn iter<'a>(&'a self) -> ProfilerEventIterator<'a> { @@ -301,6 +308,7 @@ impl ProfilingDataBuilder { ) .unwrap(), ), + metadata: OnceCell::new(), } } @@ -641,6 +649,86 @@ mod tests { ); } + // To generate this revision, a v9 revision of the rust toolchain was + // created, and "rustup toolchain link" was used to name it "bespoke". + // Then, the following commands were executed: + // + // # Make a small test binary and profile it. + // cargo new --bin testbinary + // cargo +bespoke rustc --bin testbinary -- -Zself-profile + // + // # Gzip the output profdata. + // gzip testbinary-...mm_profdata + // mv testbinary-...mm_profdata.gz v9.mm_profdata.gz + #[test] + fn can_read_v9_profdata_files() { + let (data, file_format_version) = + read_data_and_version("tests/profdata/v9.mm_profdata.gz"); + assert_eq!(file_format_version, file_formats::v9::FILE_FORMAT); + let profiling_data = ProfilingData::from_paged_buffer(data, None) + .expect("Creating the profiling data failed"); + let grouped_events = group_events(&profiling_data); + let event_kinds = grouped_events + .keys() + .map(|k| k.as_str()) + .collect::>(); + let expect_event_kinds = vec![ + "GenericActivity", + "IncrementalResultHashing", + "Query", + "ArtifactSize", + ] + .into_iter() + .collect::>(); + assert_eq!(event_kinds, expect_event_kinds); + + let generic_activity_len = 5125; + let incremental_hashing_len = 1844; + let query_len = 1877; + let artifact_size_len = 24; + assert_eq!( + grouped_events["GenericActivity"].len(), + generic_activity_len + ); + assert_eq!( + grouped_events["IncrementalResultHashing"].len(), + incremental_hashing_len + ); + assert_eq!(grouped_events["Query"].len(), query_len); + assert_eq!(grouped_events["ArtifactSize"].len(), artifact_size_len); + + assert_eq!( + grouped_events["GenericActivity"][generic_activity_len / 2].label, + "metadata_decode_entry_item_attrs" + ); + assert_eq!( + grouped_events["GenericActivity"][generic_activity_len / 2].duration(), + Some(Duration::from_nanos(376)) + ); + + assert_eq!( + grouped_events["IncrementalResultHashing"][incremental_hashing_len - 1].label, + "crate_hash" + ); + assert_eq!( + grouped_events["IncrementalResultHashing"][incremental_hashing_len - 1].duration(), + Some(Duration::from_nanos(461)) + ); + + assert_eq!(grouped_events["Query"][0].label, "registered_tools"); + assert_eq!( + grouped_events["Query"][0].duration(), + Some(Duration::from_nanos(45077)) + ); + + assert_eq!( + grouped_events["ArtifactSize"][0].label, + "codegen_unit_size_estimate" + ); + assert_eq!(grouped_events["ArtifactSize"][0].duration(), None); + assert_eq!(grouped_events["ArtifactSize"][0].integer(), Some(3)); + } + fn read_data_and_version(file_path: &str) -> (Vec, u32) { let data = std::fs::read(file_path).expect("Test data not found"); let mut gz = flate2::read::GzDecoder::new(&data[..]); diff --git a/analyzeme/tests/profdata/v9.mm_profdata.gz b/analyzeme/tests/profdata/v9.mm_profdata.gz new file mode 100644 index 0000000..2333504 Binary files /dev/null and b/analyzeme/tests/profdata/v9.mm_profdata.gz differ diff --git a/crox/Cargo.toml b/crox/Cargo.toml index a42b820..21a9414 100644 --- a/crox/Cargo.toml +++ b/crox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "crox" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser "] edition = "2018" diff --git a/decodeme/Cargo.toml b/decodeme/Cargo.toml index bf6a94d..d15e64b 100644 --- a/decodeme/Cargo.toml +++ b/decodeme/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "decodeme" -version = "10.1.2" +version = "11.0.0" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/decodeme/src/lib.rs b/decodeme/src/lib.rs index 6a5f7bd..2087f62 100644 --- a/decodeme/src/lib.rs +++ b/decodeme/src/lib.rs @@ -38,7 +38,7 @@ where .expect("a time that can be represented as SystemTime")) } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Metadata { #[serde(deserialize_with = "system_time_from_nanos")] pub start_time: SystemTime, @@ -113,9 +113,15 @@ impl EventDecoder { let mut split_data = measureme::split_streams(&entire_file_data[FILE_HEADER_SIZE..]); - let string_data = split_data.remove(&PageTag::StringData).expect("Invalid file: No string data found"); - let index_data = split_data.remove(&PageTag::StringIndex).expect("Invalid file: No string index data found"); - let event_data = split_data.remove(&PageTag::Events).expect("Invalid file: No event data found"); + let string_data = split_data + .remove(&PageTag::StringData) + .expect("Invalid file: No string data found"); + let index_data = split_data + .remove(&PageTag::StringIndex) + .expect("Invalid file: No string index data found"); + let event_data = split_data + .remove(&PageTag::Events) + .expect("Invalid file: No event data found"); Self::from_separate_buffers(string_data, index_data, event_data, diagnostic_file_path) } @@ -151,8 +157,8 @@ impl EventDecoder { event_byte_count / RAW_EVENT_SIZE } - pub fn metadata(&self) -> &Metadata { - &self.metadata + pub fn metadata(&self) -> Metadata { + self.metadata.clone() } pub fn decode_full_event<'a>(&'a self, event_index: usize) -> Event<'a> { diff --git a/decodeme/src/stringtable.rs b/decodeme/src/stringtable.rs index fed6fcf..b600937 100644 --- a/decodeme/src/stringtable.rs +++ b/decodeme/src/stringtable.rs @@ -17,10 +17,12 @@ use std::convert::TryInto; use std::error::Error; use std::path::Path; +const INDEX_ENTRY_SIZE: usize = std::mem::size_of::() + std::mem::size_of::(); + fn deserialize_index_entry(bytes: &[u8]) -> (StringId, Addr) { ( - StringId::new(u32::from_le_bytes(bytes[0..4].try_into().unwrap())), - Addr(u32::from_le_bytes(bytes[4..8].try_into().unwrap())), + StringId::new(u64::from_le_bytes(bytes[0..8].try_into().unwrap())), + Addr(u64::from_le_bytes(bytes[8..16].try_into().unwrap())), ) } @@ -152,9 +154,9 @@ fn decode_string_ref_from_data(bytes: &[u8]) -> StringId { // refs, where the first byte is STRING_REF_TAG and the // following 4 bytes are a little-endian u32 string ID value. assert!(bytes[0] == STRING_REF_TAG); - assert!(STRING_REF_ENCODED_SIZE == 5); + assert!(STRING_REF_ENCODED_SIZE == 9); - let id = u32::from_le_bytes(bytes[1..5].try_into().unwrap()); + let id = u64::from_le_bytes(bytes[1..9].try_into().unwrap()); StringId::new(id) } @@ -185,9 +187,15 @@ impl StringTable { "StringTable Index", )?; - assert!(index_data.len() % 8 == 0); + // The non-header data should be divisible into index entries. + assert!( + (index_data.len() - measureme::file_header::FILE_HEADER_SIZE) % INDEX_ENTRY_SIZE == 0, + "StringTable index size appears malformed", + ); + assert_eq!(INDEX_ENTRY_SIZE, 16); + let index: FxHashMap<_, _> = strip_file_header(&index_data) - .chunks(8) + .chunks(INDEX_ENTRY_SIZE) .map(deserialize_index_entry) .collect(); diff --git a/flamegraph/Cargo.toml b/flamegraph/Cargo.toml index b5d0c15..edf4c3b 100644 --- a/flamegraph/Cargo.toml +++ b/flamegraph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "flamegraph" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser ", "Michael Woerister "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/measureme/Cargo.toml b/measureme/Cargo.toml index 6a370e7..d057d2c 100644 --- a/measureme/Cargo.toml +++ b/measureme/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "measureme" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser ", "Michael Woerister "] edition = "2018" description = "Support crate for rustc's self-profiling feature" diff --git a/measureme/src/event_id.rs b/measureme/src/event_id.rs index e2b0f53..d8dd251 100644 --- a/measureme/src/event_id.rs +++ b/measureme/src/event_id.rs @@ -33,24 +33,24 @@ impl EventId { } #[inline] - pub fn as_u32(self) -> u32 { - self.0.as_u32() + pub fn as_u64(self) -> u64 { + self.0.as_u64() } #[inline] - pub fn from_label(label: StringId) -> EventId { + pub fn from_label(label: StringId) -> Self { EventId(label) } #[inline] - pub fn from_virtual(virtual_id: StringId) -> EventId { + pub fn from_virtual(virtual_id: StringId) -> Self { EventId(virtual_id) } - /// Create an EventId from a raw u32 value. Only used internally for + /// Create an EventId from a raw u64 value. Only used internally for /// deserialization. #[inline] - pub fn from_u32(raw_id: u32) -> EventId { + pub fn from_u64(raw_id: u64) -> Self { EventId(StringId::new(raw_id)) } } diff --git a/measureme/src/file_header.rs b/measureme/src/file_header.rs index 8ad1928..f576b6d 100644 --- a/measureme/src/file_header.rs +++ b/measureme/src/file_header.rs @@ -5,7 +5,7 @@ use std::convert::TryInto; use std::error::Error; use std::path::Path; -pub const CURRENT_FILE_FORMAT_VERSION: u32 = 8; +pub const CURRENT_FILE_FORMAT_VERSION: u32 = 9; pub const FILE_MAGIC_TOP_LEVEL: &[u8; 4] = b"MMPD"; pub const FILE_MAGIC_EVENT_STREAM: &[u8; 4] = b"MMES"; diff --git a/measureme/src/raw_event.rs b/measureme/src/raw_event.rs index f181fb5..ec17427 100644 --- a/measureme/src/raw_event.rs +++ b/measureme/src/raw_event.rs @@ -214,7 +214,7 @@ mod tests { #[test] fn raw_event_has_expected_size() { // A test case to prevent accidental regressions of RawEvent's size. - assert_eq!(std::mem::size_of::(), 24); + assert_eq!(std::mem::size_of::(), 32); } #[test] diff --git a/measureme/src/serialization.rs b/measureme/src/serialization.rs index 28bfd96..487504e 100644 --- a/measureme/src/serialization.rs +++ b/measureme/src/serialization.rs @@ -70,7 +70,7 @@ impl std::convert::TryFrom for PageTag { // TODO: Evaluate if it makes sense to add a type tag to `Addr` in order to // prevent accidental use of `Addr` values with the wrong address space. #[derive(Clone, Copy, Eq, PartialEq, Debug)] -pub struct Addr(pub u32); +pub struct Addr(pub u64); impl Addr { pub fn as_usize(self) -> usize { @@ -169,7 +169,7 @@ impl<'a> Write for StdWriteAdapter<'a> { #[derive(Debug)] struct SerializationSinkInner { buffer: Vec, - addr: u32, + addr: u64, } /// This state is shared between all `SerializationSink`s writing to the same @@ -326,7 +326,7 @@ impl SerializationSink { buffer.resize(buf_end, 0u8); write(&mut buffer[buf_start..buf_end]); - *addr += num_bytes as u32; + *addr += num_bytes as u64; Addr(curr_addr) } @@ -356,7 +356,7 @@ impl SerializationSink { } = *data; let curr_addr = Addr(*addr); - *addr += bytes.len() as u32; + *addr += bytes.len() as u64; let mut bytes_left = bytes; @@ -438,7 +438,7 @@ mod tests { tags.iter().map(|&tag| sink_builder.new_sink(tag)).collect(); for chunk_index in 0..chunk_count { - let expected_addr = Addr((chunk_index * chunk_size) as u32); + let expected_addr = Addr((chunk_index * chunk_size) as u64); for sink in sinks.iter() { assert_eq!(write(sink, &expected_chunk[..]), expected_addr); } diff --git a/measureme/src/stringtable.rs b/measureme/src/stringtable.rs index a56bbcb..33bcf07 100644 --- a/measureme/src/stringtable.rs +++ b/measureme/src/stringtable.rs @@ -74,18 +74,18 @@ use std::{error::Error, sync::Arc}; /// that maps virtual `StringId`s to addresses. #[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)] #[repr(C)] -pub struct StringId(u32); +pub struct StringId(u64); impl StringId { pub const INVALID: StringId = StringId(INVALID_STRING_ID); #[inline] - pub fn new(id: u32) -> StringId { + pub fn new(id: u64) -> StringId { StringId(id) } #[inline] - pub fn new_virtual(id: u32) -> StringId { + pub fn new_virtual(id: u64) -> StringId { assert!(id <= MAX_USER_VIRTUAL_STRING_ID); StringId(id) } @@ -96,7 +96,7 @@ impl StringId { } #[inline] - pub fn as_u32(self) -> u32 { + pub fn as_u64(self) -> u64 { self.0 } @@ -115,18 +115,18 @@ impl StringId { // See module-level documentation for more information on the encoding. pub const TERMINATOR: u8 = 0xFF; pub const STRING_REF_TAG: u8 = 0xFE; -pub const STRING_REF_ENCODED_SIZE: usize = 5; +pub const STRING_REF_ENCODED_SIZE: usize = 9; /// The maximum id value a virtual string may be. -const MAX_USER_VIRTUAL_STRING_ID: u32 = 100_000_000; +const MAX_USER_VIRTUAL_STRING_ID: u64 = 100_000_000; /// The id of the profile metadata string entry. -pub const METADATA_STRING_ID: u32 = MAX_USER_VIRTUAL_STRING_ID + 1; +pub const METADATA_STRING_ID: u64 = MAX_USER_VIRTUAL_STRING_ID + 1; /// Some random string ID that we make sure cannot be generated or assigned to. -const INVALID_STRING_ID: u32 = METADATA_STRING_ID + 1; +const INVALID_STRING_ID: u64 = METADATA_STRING_ID + 1; -pub const FIRST_REGULAR_STRING_ID: u32 = INVALID_STRING_ID + 1; +pub const FIRST_REGULAR_STRING_ID: u64 = INVALID_STRING_ID + 1; /// Write-only version of the string table pub struct StringTableBuilder { @@ -180,14 +180,14 @@ impl<'s> StringComponent<'s> { &mut bytes[s.len()..] } StringComponent::Ref(string_id) => { - // The code below assumes we use a 5-byte encoding for string + // The code below assumes we use a 9-byte encoding for string // refs, where the first byte is STRING_REF_TAG and the - // following 4 bytes are a little-endian u32 string ID value. - assert!(STRING_REF_ENCODED_SIZE == 5); + // following 8 bytes are a little-endian u64 string ID value. + assert!(STRING_REF_ENCODED_SIZE == 9); bytes[0] = STRING_REF_TAG; - bytes[1..5].copy_from_slice(&string_id.0.to_le_bytes()); - &mut bytes[5..] + bytes[1..9].copy_from_slice(&string_id.0.to_le_bytes()); + &mut bytes[9..] } } } @@ -248,9 +248,9 @@ impl_serializable_string_for_fixed_size!(15); impl_serializable_string_for_fixed_size!(16); fn serialize_index_entry(sink: &SerializationSink, id: StringId, addr: Addr) { - sink.write_atomic(8, |bytes| { - bytes[0..4].copy_from_slice(&id.0.to_le_bytes()); - bytes[4..8].copy_from_slice(&addr.0.to_le_bytes()); + sink.write_atomic(16, |bytes| { + bytes[0..8].copy_from_slice(&id.0.to_le_bytes()); + bytes[8..16].copy_from_slice(&addr.0.to_le_bytes()); }); } @@ -289,8 +289,8 @@ impl StringTableBuilder { // multiple StringIds to the same addr, so we don't have to repeat // the `concrete_id` over and over. - type MappingEntry = [u32; 2]; - assert!(std::mem::size_of::() == 8); + type MappingEntry = [u64; 2]; + assert!(std::mem::size_of::() == 16); let to_addr_le = concrete_id.to_addr().0.to_le(); diff --git a/mmedit/Cargo.toml b/mmedit/Cargo.toml index ee3f794..ca0bc35 100644 --- a/mmedit/Cargo.toml +++ b/mmedit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mmedit" -version = "10.1.2" +version = "11.0.0" edition = "2018" [dependencies] diff --git a/mmview/Cargo.toml b/mmview/Cargo.toml index c11dfff..5a720c7 100644 --- a/mmview/Cargo.toml +++ b/mmview/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mmview" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser ", "Michael Woerister "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/stack_collapse/Cargo.toml b/stack_collapse/Cargo.toml index 539d9d9..e536097 100644 --- a/stack_collapse/Cargo.toml +++ b/stack_collapse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stack_collapse" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser ", "Michael Woerister "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/summarize/Cargo.toml b/summarize/Cargo.toml index ebe5d4c..5d8f871 100644 --- a/summarize/Cargo.toml +++ b/summarize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "summarize" -version = "10.1.2" +version = "11.0.0" authors = ["Wesley Wiser ", "Michael Woerister "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/version_checker/Cargo.toml b/version_checker/Cargo.toml index c9667c6..75bccd2 100644 --- a/version_checker/Cargo.toml +++ b/version_checker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "version_checker" -version = "10.1.2" +version = "11.0.0" authors = ["Michael Woerister "] edition = "2018" license = "MIT OR Apache-2.0"