16
16
// FIXME: Iteration should probably be considered separately
17
17
18
18
use collections:: { Collection , MutableSeq } ;
19
+ use io:: { IoError , IoResult , Reader } ;
20
+ use io;
19
21
use iter:: Iterator ;
22
+ use num:: Int ;
20
23
use option:: { Option , Some , None } ;
24
+ use ptr:: RawPtr ;
21
25
use result:: { Ok , Err } ;
22
- use io;
23
- use io:: { IoError , IoResult , Reader } ;
24
26
use slice:: { ImmutableSlice , Slice } ;
25
- use ptr:: RawPtr ;
26
27
27
28
/// An iterator that reads a single byte on each iteration,
28
29
/// until `.read_byte()` returns `EndOfFile`.
@@ -76,16 +77,15 @@ impl<'r, R: Reader> Iterator<IoResult<u8>> for Bytes<'r, R> {
76
77
///
77
78
/// This function returns the value returned by the callback, for convenience.
78
79
pub fn u64_to_le_bytes < T > ( n : u64 , size : uint , f: |v: & [ u8 ] | -> T ) -> T {
79
- use mem:: { to_le16, to_le32, to_le64} ;
80
80
use mem:: transmute;
81
81
82
82
// LLVM fails to properly optimize this when using shifts instead of the to_le* intrinsics
83
83
assert ! ( size <= 8 u) ;
84
84
match size {
85
85
1 u => f ( & [ n as u8 ] ) ,
86
- 2 u => f ( unsafe { transmute :: < _ , [ u8 , ..2 ] > ( to_le16 ( n as u16 ) ) } ) ,
87
- 4 u => f ( unsafe { transmute :: < _ , [ u8 , ..4 ] > ( to_le32 ( n as u32 ) ) } ) ,
88
- 8 u => f ( unsafe { transmute :: < _ , [ u8 , ..8 ] > ( to_le64 ( n ) ) } ) ,
86
+ 2 u => f ( unsafe { transmute :: < _ , [ u8 , ..2 ] > ( ( n as u16 ) . to_le ( ) ) } ) ,
87
+ 4 u => f ( unsafe { transmute :: < _ , [ u8 , ..4 ] > ( ( n as u32 ) . to_le ( ) ) } ) ,
88
+ 8 u => f ( unsafe { transmute :: < _ , [ u8 , ..8 ] > ( n . to_le ( ) ) } ) ,
89
89
_ => {
90
90
91
91
let mut bytes = vec ! ( ) ;
@@ -116,16 +116,15 @@ pub fn u64_to_le_bytes<T>(n: u64, size: uint, f: |v: &[u8]| -> T) -> T {
116
116
///
117
117
/// This function returns the value returned by the callback, for convenience.
118
118
pub fn u64_to_be_bytes < T > ( n : u64 , size : uint , f: |v: & [ u8 ] | -> T ) -> T {
119
- use mem:: { to_be16, to_be32, to_be64} ;
120
119
use mem:: transmute;
121
120
122
121
// LLVM fails to properly optimize this when using shifts instead of the to_be* intrinsics
123
122
assert ! ( size <= 8 u) ;
124
123
match size {
125
124
1 u => f ( & [ n as u8 ] ) ,
126
- 2 u => f ( unsafe { transmute :: < _ , [ u8 , ..2 ] > ( to_be16 ( n as u16 ) ) } ) ,
127
- 4 u => f ( unsafe { transmute :: < _ , [ u8 , ..4 ] > ( to_be32 ( n as u32 ) ) } ) ,
128
- 8 u => f ( unsafe { transmute :: < _ , [ u8 , ..8 ] > ( to_be64 ( n ) ) } ) ,
125
+ 2 u => f ( unsafe { transmute :: < _ , [ u8 , ..2 ] > ( ( n as u16 ) . to_be ( ) ) } ) ,
126
+ 4 u => f ( unsafe { transmute :: < _ , [ u8 , ..4 ] > ( ( n as u32 ) . to_be ( ) ) } ) ,
127
+ 8 u => f ( unsafe { transmute :: < _ , [ u8 , ..8 ] > ( n . to_be ( ) ) } ) ,
129
128
_ => {
130
129
let mut bytes = vec ! ( ) ;
131
130
let mut i = size;
@@ -152,7 +151,6 @@ pub fn u64_to_be_bytes<T>(n: u64, size: uint, f: |v: &[u8]| -> T) -> T {
152
151
/// 32-bit value is parsed.
153
152
pub fn u64_from_be_bytes ( data : & [ u8 ] , start : uint , size : uint ) -> u64 {
154
153
use ptr:: { copy_nonoverlapping_memory} ;
155
- use mem:: from_be64;
156
154
use slice:: MutableSlice ;
157
155
158
156
assert ! ( size <= 8 u) ;
@@ -166,7 +164,7 @@ pub fn u64_from_be_bytes(data: &[u8], start: uint, size: uint) -> u64 {
166
164
let ptr = data. as_ptr ( ) . offset ( start as int ) ;
167
165
let out = buf. as_mut_ptr ( ) ;
168
166
copy_nonoverlapping_memory ( out. offset ( ( 8 - size) as int ) , ptr, size) ;
169
- from_be64 ( * ( out as * const u64 ) )
167
+ ( * ( out as * const u64 ) ) . to_be ( )
170
168
}
171
169
}
172
170
0 commit comments