(index<- ) ./libstd/unstable/intrinsics.rs
1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*! rustc compiler intrinsics.
12
13 The corresponding definitions are in librustc/middle/trans/foreign.rs.
14
15 # Atomics
16
17 The atomic intrinsics provide common atomic operations on machine
18 words, with multiple possible memory orderings. They obey the same
19 semantics as C++11. See the LLVM documentation on [[atomics]].
20
21 [atomics]: http://llvm.org/docs/Atomics.html
22
23 A quick refresher on memory ordering:
24
25 * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
26 take place after the barrier.
27 * Release - a barrier for releasing a lock. Preceding reads and writes
28 take place before the barrier.
29 * Sequentially consistent - sequentially consistent operations are
30 guaranteed to happen in order. This is the standard mode for working
31 with atomic types and is equivalent to Java's `volatile`.
32
33 */
34
35 // This is needed to prevent duplicate lang item definitions.
36 #[cfg(test)]
37 pub use realstd::unstable::intrinsics::{TyDesc, Opaque, TyVisitor};
38
39 pub type GlueFn = extern "Rust" fn(*i8);
40
41 // NB: this has to be kept in sync with `type_desc` in `rt`
42 #[lang="ty_desc"]
43 #[cfg(not(test))]
44 pub struct TyDesc {
45 // sizeof(T)
46 size: uint,
47
48 // alignof(T)
49 align: uint,
50
51 // Called on a copy of a value of type `T` *after* memcpy
52 take_glue: GlueFn,
53
54 // Called when a value of type `T` is no longer needed
55 drop_glue: GlueFn,
56
57 // Called by drop glue when a value of type `T` can be freed
58 free_glue: GlueFn,
59
60 // Called by reflection visitor to visit a value of type `T`
61 visit_glue: GlueFn,
62
63 // If T represents a box pointer (`@U` or `~U`), then
64 // `borrow_offset` is the amount that the pointer must be adjusted
65 // to find the payload. This is always derivable from the type
66 // `U`, but in the case of `@Trait` or `~Trait` objects, the type
67 // `U` is unknown.
68 borrow_offset: uint,
69
70 // Name corresponding to the type
71 name: &'static str
72 }
73
74 #[lang="opaque"]
75 #[cfg(not(test))]
76 pub enum Opaque { }
77
78 #[lang="ty_visitor"]
79 #[cfg(not(test))]
80 pub trait TyVisitor {
81 fn visit_bot(&mut self) -> bool;
82 fn visit_nil(&mut self) -> bool;
83 fn visit_bool(&mut self) -> bool;
84
85 fn visit_int(&mut self) -> bool;
86 fn visit_i8(&mut self) -> bool;
87 fn visit_i16(&mut self) -> bool;
88 fn visit_i32(&mut self) -> bool;
89 fn visit_i64(&mut self) -> bool;
90
91 fn visit_uint(&mut self) -> bool;
92 fn visit_u8(&mut self) -> bool;
93 fn visit_u16(&mut self) -> bool;
94 fn visit_u32(&mut self) -> bool;
95 fn visit_u64(&mut self) -> bool;
96
97 fn visit_f32(&mut self) -> bool;
98 fn visit_f64(&mut self) -> bool;
99
100 fn visit_char(&mut self) -> bool;
101
102 fn visit_estr_box(&mut self) -> bool;
103 fn visit_estr_uniq(&mut self) -> bool;
104 fn visit_estr_slice(&mut self) -> bool;
105 fn visit_estr_fixed(&mut self, n: uint, sz: uint, align: uint) -> bool;
106
107 fn visit_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
108 fn visit_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
109 fn visit_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
110 fn visit_ptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
111 fn visit_rptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
112
113 fn visit_vec(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
114 fn visit_unboxed_vec(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
115 fn visit_evec_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
116 fn visit_evec_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
117 fn visit_evec_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
118 fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
119 fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
120 mtbl: uint, inner: *TyDesc) -> bool;
121
122 fn visit_enter_rec(&mut self, n_fields: uint,
123 sz: uint, align: uint) -> bool;
124 fn visit_rec_field(&mut self, i: uint, name: &str,
125 mtbl: uint, inner: *TyDesc) -> bool;
126 fn visit_leave_rec(&mut self, n_fields: uint,
127 sz: uint, align: uint) -> bool;
128
129 fn visit_enter_class(&mut self, name: &str, named_fields: bool, n_fields: uint,
130 sz: uint, align: uint) -> bool;
131 fn visit_class_field(&mut self, i: uint, name: &str, named: bool,
132 mtbl: uint, inner: *TyDesc) -> bool;
133 fn visit_leave_class(&mut self, name: &str, named_fields: bool, n_fields: uint,
134 sz: uint, align: uint) -> bool;
135
136 fn visit_enter_tup(&mut self, n_fields: uint,
137 sz: uint, align: uint) -> bool;
138 fn visit_tup_field(&mut self, i: uint, inner: *TyDesc) -> bool;
139 fn visit_leave_tup(&mut self, n_fields: uint,
140 sz: uint, align: uint) -> bool;
141
142 fn visit_enter_enum(&mut self, n_variants: uint,
143 get_disr: extern unsafe fn(ptr: *Opaque) -> int,
144 sz: uint, align: uint) -> bool;
145 fn visit_enter_enum_variant(&mut self, variant: uint,
146 disr_val: int,
147 n_fields: uint,
148 name: &str) -> bool;
149 fn visit_enum_variant_field(&mut self, i: uint, offset: uint, inner: *TyDesc) -> bool;
150 fn visit_leave_enum_variant(&mut self, variant: uint,
151 disr_val: int,
152 n_fields: uint,
153 name: &str) -> bool;
154 fn visit_leave_enum(&mut self, n_variants: uint,
155 get_disr: extern unsafe fn(ptr: *Opaque) -> int,
156 sz: uint, align: uint) -> bool;
157
158 fn visit_enter_fn(&mut self, purity: uint, proto: uint,
159 n_inputs: uint, retstyle: uint) -> bool;
160 fn visit_fn_input(&mut self, i: uint, mode: uint, inner: *TyDesc) -> bool;
161 fn visit_fn_output(&mut self, retstyle: uint, inner: *TyDesc) -> bool;
162 fn visit_leave_fn(&mut self, purity: uint, proto: uint,
163 n_inputs: uint, retstyle: uint) -> bool;
164
165 fn visit_trait(&mut self, name: &str) -> bool;
166 fn visit_param(&mut self, i: uint) -> bool;
167 fn visit_self(&mut self) -> bool;
168 fn visit_type(&mut self) -> bool;
169 fn visit_opaque_box(&mut self) -> bool;
170 fn visit_closure_ptr(&mut self, ck: uint) -> bool;
171 }
172
173 extern "rust-intrinsic" {
174 /// Abort the execution of the process.
175 pub fn abort() -> !;
176
177 /// Atomic compare and exchange, sequentially consistent.
178 pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
179 /// Atomic compare and exchange, acquire ordering.
180 pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
181 /// Atomic compare and exchange, release ordering.
182 pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
183
184 pub fn atomic_cxchg_acqrel(dst: &mut int, old: int, src: int) -> int;
185 pub fn atomic_cxchg_relaxed(dst: &mut int, old: int, src: int) -> int;
186
187
188 /// Atomic load, sequentially consistent.
189 pub fn atomic_load(src: &int) -> int;
190 /// Atomic load, acquire ordering.
191 pub fn atomic_load_acq(src: &int) -> int;
192
193 pub fn atomic_load_relaxed(src: &int) -> int;
194
195 /// Atomic store, sequentially consistent.
196 pub fn atomic_store(dst: &mut int, val: int);
197 /// Atomic store, release ordering.
198 pub fn atomic_store_rel(dst: &mut int, val: int);
199
200 pub fn atomic_store_relaxed(dst: &mut int, val: int);
201
202 /// Atomic exchange, sequentially consistent.
203 pub fn atomic_xchg(dst: &mut int, src: int) -> int;
204 /// Atomic exchange, acquire ordering.
205 pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
206 /// Atomic exchange, release ordering.
207 pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
208 pub fn atomic_xchg_acqrel(dst: &mut int, src: int) -> int;
209 pub fn atomic_xchg_relaxed(dst: &mut int, src: int) -> int;
210
211 /// Atomic addition, sequentially consistent.
212 pub fn atomic_xadd(dst: &mut int, src: int) -> int;
213 /// Atomic addition, acquire ordering.
214 pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
215 /// Atomic addition, release ordering.
216 pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
217 pub fn atomic_xadd_acqrel(dst: &mut int, src: int) -> int;
218 pub fn atomic_xadd_relaxed(dst: &mut int, src: int) -> int;
219
220 /// Atomic subtraction, sequentially consistent.
221 pub fn atomic_xsub(dst: &mut int, src: int) -> int;
222 /// Atomic subtraction, acquire ordering.
223 pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
224 /// Atomic subtraction, release ordering.
225 pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
226 pub fn atomic_xsub_acqrel(dst: &mut int, src: int) -> int;
227 pub fn atomic_xsub_relaxed(dst: &mut int, src: int) -> int;
228
229 pub fn atomic_and(dst: &mut int, src: int) -> int;
230 pub fn atomic_and_acq(dst: &mut int, src: int) -> int;
231 pub fn atomic_and_rel(dst: &mut int, src: int) -> int;
232 pub fn atomic_and_acqrel(dst: &mut int, src: int) -> int;
233 pub fn atomic_and_relaxed(dst: &mut int, src: int) -> int;
234
235 pub fn atomic_nand(dst: &mut int, src: int) -> int;
236 pub fn atomic_nand_acq(dst: &mut int, src: int) -> int;
237 pub fn atomic_nand_rel(dst: &mut int, src: int) -> int;
238 pub fn atomic_nand_acqrel(dst: &mut int, src: int) -> int;
239 pub fn atomic_nand_relaxed(dst: &mut int, src: int) -> int;
240
241 pub fn atomic_or(dst: &mut int, src: int) -> int;
242 pub fn atomic_or_acq(dst: &mut int, src: int) -> int;
243 pub fn atomic_or_rel(dst: &mut int, src: int) -> int;
244 pub fn atomic_or_acqrel(dst: &mut int, src: int) -> int;
245 pub fn atomic_or_relaxed(dst: &mut int, src: int) -> int;
246
247 pub fn atomic_xor(dst: &mut int, src: int) -> int;
248 pub fn atomic_xor_acq(dst: &mut int, src: int) -> int;
249 pub fn atomic_xor_rel(dst: &mut int, src: int) -> int;
250 pub fn atomic_xor_acqrel(dst: &mut int, src: int) -> int;
251 pub fn atomic_xor_relaxed(dst: &mut int, src: int) -> int;
252
253 pub fn atomic_max(dst: &mut int, src: int) -> int;
254 pub fn atomic_max_acq(dst: &mut int, src: int) -> int;
255 pub fn atomic_max_rel(dst: &mut int, src: int) -> int;
256 pub fn atomic_max_acqrel(dst: &mut int, src: int) -> int;
257 pub fn atomic_max_relaxed(dst: &mut int, src: int) -> int;
258
259 pub fn atomic_min(dst: &mut int, src: int) -> int;
260 pub fn atomic_min_acq(dst: &mut int, src: int) -> int;
261 pub fn atomic_min_rel(dst: &mut int, src: int) -> int;
262 pub fn atomic_min_acqrel(dst: &mut int, src: int) -> int;
263 pub fn atomic_min_relaxed(dst: &mut int, src: int) -> int;
264
265 pub fn atomic_umin(dst: &mut int, src: int) -> int;
266 pub fn atomic_umin_acq(dst: &mut int, src: int) -> int;
267 pub fn atomic_umin_rel(dst: &mut int, src: int) -> int;
268 pub fn atomic_umin_acqrel(dst: &mut int, src: int) -> int;
269 pub fn atomic_umin_relaxed(dst: &mut int, src: int) -> int;
270
271 pub fn atomic_umax(dst: &mut int, src: int) -> int;
272 pub fn atomic_umax_acq(dst: &mut int, src: int) -> int;
273 pub fn atomic_umax_rel(dst: &mut int, src: int) -> int;
274 pub fn atomic_umax_acqrel(dst: &mut int, src: int) -> int;
275 pub fn atomic_umax_relaxed(dst: &mut int, src: int) -> int;
276
277 pub fn atomic_fence();
278 pub fn atomic_fence_acq();
279 pub fn atomic_fence_rel();
280 pub fn atomic_fence_acqrel();
281
282 /// The size of a type in bytes.
283 ///
284 /// This is the exact number of bytes in memory taken up by a
285 /// value of the given type. In other words, a memset of this size
286 /// would *exactly* overwrite a value. When laid out in vectors
287 /// and structures there may be additional padding between
288 /// elements.
289 pub fn size_of<T>() -> uint;
290
291 /// Move a value to a memory location containing a value.
292 ///
293 /// Drop glue is run on the destination, which must contain a
294 /// valid Rust value.
295 pub fn move_val<T>(dst: &mut T, src: T);
296
297 /// Move a value to an uninitialized memory location.
298 ///
299 /// Drop glue is not run on the destination.
300 pub fn move_val_init<T>(dst: &mut T, src: T);
301
302 pub fn min_align_of<T>() -> uint;
303 pub fn pref_align_of<T>() -> uint;
304
305 /// Get a static pointer to a type descriptor.
306 pub fn get_tydesc<T>() -> *TyDesc;
307
308 /// Create a value initialized to zero.
309 ///
310 /// `init` is unsafe because it returns a zeroed-out datum,
311 /// which is unsafe unless T is POD. We don't have a POD
312 /// kind yet. (See #4074).
313 pub fn init<T>() -> T;
314
315 /// Create an uninitialized value.
316 pub fn uninit<T>() -> T;
317
318 /// Move a value out of scope without running drop glue.
319 ///
320 /// `forget` is unsafe because the caller is responsible for
321 /// ensuring the argument is deallocated already.
322 pub fn forget<T>(_: T) -> ();
323 pub fn transmute<T,U>(e: T) -> U;
324
325 /// Returns `true` if a type requires drop glue.
326 pub fn needs_drop<T>() -> bool;
327
328 /// Returns `true` if a type is managed (will be allocated on the local heap)
329 pub fn contains_managed<T>() -> bool;
330
331 pub fn visit_tydesc(td: *TyDesc, tv: &mut TyVisitor);
332
333 #[cfg(not(stage0))]
334 pub fn frame_address(f: &fn(*u8));
335
336 /// Get the address of the `__morestack` stack growth function.
337 pub fn morestack_addr() -> *();
338
339 /// Calculates the offset from a pointer. The offset *must* be in-bounds of
340 /// the object, or one-byte-past-the-end. An arithmetic overflow is also
341 /// undefined behaviour.
342 ///
343 /// This is implemented as an intrinsic to avoid converting to and from an
344 /// integer, since the conversion would throw away aliasing information.
345 pub fn offset<T>(dst: *T, offset: int) -> *T;
346
347 /// Equivalent to the `llvm.memcpy.p0i8.0i8.i32` intrinsic, with a size of
348 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
349 pub fn memcpy32<T>(dst: *mut T, src: *T, count: u32);
350 /// Equivalent to the `llvm.memcpy.p0i8.0i8.i64` intrinsic, with a size of
351 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
352 pub fn memcpy64<T>(dst: *mut T, src: *T, count: u64);
353
354 /// Equivalent to the `llvm.memmove.p0i8.0i8.i32` intrinsic, with a size of
355 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
356 pub fn memmove32<T>(dst: *mut T, src: *T, count: u32);
357 /// Equivalent to the `llvm.memmove.p0i8.0i8.i64` intrinsic, with a size of
358 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
359 pub fn memmove64<T>(dst: *mut T, src: *T, count: u64);
360
361 /// Equivalent to the `llvm.memset.p0i8.i32` intrinsic, with a size of
362 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
363 pub fn memset32<T>(dst: *mut T, val: u8, count: u32);
364 /// Equivalent to the `llvm.memset.p0i8.i64` intrinsic, with a size of
365 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
366 pub fn memset64<T>(dst: *mut T, val: u8, count: u64);
367
368 pub fn sqrtf32(x: f32) -> f32;
369 pub fn sqrtf64(x: f64) -> f64;
370
371 pub fn powif32(a: f32, x: i32) -> f32;
372 pub fn powif64(a: f64, x: i32) -> f64;
373
374 // the following kill the stack canary without
375 // `fixed_stack_segment`. This possibly only affects the f64
376 // variants, but it's hard to be sure since it seems to only
377 // occur with fairly specific arguments.
378 #[fixed_stack_segment]
379 pub fn sinf32(x: f32) -> f32;
380 #[fixed_stack_segment]
381 pub fn sinf64(x: f64) -> f64;
382
383 #[fixed_stack_segment]
384 pub fn cosf32(x: f32) -> f32;
385 #[fixed_stack_segment]
386 pub fn cosf64(x: f64) -> f64;
387
388 #[fixed_stack_segment]
389 pub fn powf32(a: f32, x: f32) -> f32;
390 #[fixed_stack_segment]
391 pub fn powf64(a: f64, x: f64) -> f64;
392
393 #[fixed_stack_segment]
394 pub fn expf32(x: f32) -> f32;
395 #[fixed_stack_segment]
396 pub fn expf64(x: f64) -> f64;
397
398 pub fn exp2f32(x: f32) -> f32;
399 pub fn exp2f64(x: f64) -> f64;
400
401 pub fn logf32(x: f32) -> f32;
402 pub fn logf64(x: f64) -> f64;
403
404 pub fn log10f32(x: f32) -> f32;
405 pub fn log10f64(x: f64) -> f64;
406
407 pub fn log2f32(x: f32) -> f32;
408 pub fn log2f64(x: f64) -> f64;
409
410 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
411 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
412
413 pub fn fabsf32(x: f32) -> f32;
414 pub fn fabsf64(x: f64) -> f64;
415
416 pub fn floorf32(x: f32) -> f32;
417 pub fn floorf64(x: f64) -> f64;
418
419 pub fn ceilf32(x: f32) -> f32;
420 pub fn ceilf64(x: f64) -> f64;
421
422 pub fn truncf32(x: f32) -> f32;
423 pub fn truncf64(x: f64) -> f64;
424
425 pub fn ctpop8(x: i8) -> i8;
426 pub fn ctpop16(x: i16) -> i16;
427 pub fn ctpop32(x: i32) -> i32;
428 pub fn ctpop64(x: i64) -> i64;
429
430 pub fn ctlz8(x: i8) -> i8;
431 pub fn ctlz16(x: i16) -> i16;
432 pub fn ctlz32(x: i32) -> i32;
433 pub fn ctlz64(x: i64) -> i64;
434
435 pub fn cttz8(x: i8) -> i8;
436 pub fn cttz16(x: i16) -> i16;
437 pub fn cttz32(x: i32) -> i32;
438 pub fn cttz64(x: i64) -> i64;
439
440 pub fn bswap16(x: i16) -> i16;
441 pub fn bswap32(x: i32) -> i32;
442 pub fn bswap64(x: i64) -> i64;
443
444 pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
445 pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
446 pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
447 pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
448
449 pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
450 pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
451 pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
452 pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
453
454 pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
455 pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
456 pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
457 pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
458
459 pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
460 pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
461 pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
462 pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
463
464 pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
465 pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
466 pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
467 pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
468
469 pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
470 pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
471 pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
472 pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
473 }
474
475 #[cfg(target_endian = "little")] pub fn to_le16(x: i16) -> i16 { x }
476 #[cfg(target_endian = "big")] pub fn to_le16(x: i16) -> i16 { unsafe { bswap16(x) } }
477 #[cfg(target_endian = "little")] pub fn to_le32(x: i32) -> i32 { x }
478 #[cfg(target_endian = "big")] pub fn to_le32(x: i32) -> i32 { unsafe { bswap32(x) } }
479 #[cfg(target_endian = "little")] pub fn to_le64(x: i64) -> i64 { x }
480 #[cfg(target_endian = "big")] pub fn to_le64(x: i64) -> i64 { unsafe { bswap64(x) } }
481
482 #[cfg(target_endian = "little")] pub fn to_be16(x: i16) -> i16 { unsafe { bswap16(x) } }
483 #[cfg(target_endian = "big")] pub fn to_be16(x: i16) -> i16 { x }
484 #[cfg(target_endian = "little")] pub fn to_be32(x: i32) -> i32 { unsafe { bswap32(x) } }
485 #[cfg(target_endian = "big")] pub fn to_be32(x: i32) -> i32 { x }
486 #[cfg(target_endian = "little")] pub fn to_be64(x: i64) -> i64 { unsafe { bswap64(x) } }
487 #[cfg(target_endian = "big")] pub fn to_be64(x: i64) -> i64 { x }
libstd/unstable/intrinsics.rs:43:18-43:18 -struct- definition:
#[cfg(not(test))]
pub struct TyDesc {
references:-331: pub fn visit_tydesc(td: *TyDesc, tv: &mut TyVisitor);
116: fn visit_evec_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
110: fn visit_ptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
120: mtbl: uint, inner: *TyDesc) -> bool;
306: pub fn get_tydesc<T>() -> *TyDesc;
138: fn visit_tup_field(&mut self, i: uint, inner: *TyDesc) -> bool;
132: mtbl: uint, inner: *TyDesc) -> bool;
149: fn visit_enum_variant_field(&mut self, i: uint, offset: uint, inner: *TyDesc) -> bool;
113: fn visit_vec(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
109: fn visit_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
108: fn visit_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
114: fn visit_unboxed_vec(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
115: fn visit_evec_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
117: fn visit_evec_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
125: mtbl: uint, inner: *TyDesc) -> bool;
118: fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
111: fn visit_rptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
160: fn visit_fn_input(&mut self, i: uint, mode: uint, inner: *TyDesc) -> bool;
161: fn visit_fn_output(&mut self, retstyle: uint, inner: *TyDesc) -> bool;
107: fn visit_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
libstd/unstable/raw.rs:
17: type_desc: *TyDesc,
libstd/rt/global_heap.rs:
79: let td = td as *TyDesc;
libstd/at_vec.rs:
234: pub fn reserve_raw(ty: *TyDesc, ptr: *mut *mut Box<Vec<()>>, n: uint) {
libstd/repr.rs:
457: fn visit_tup_field(&mut self, i: uint, inner: *TyDesc) -> bool {
342: fn visit_unboxed_vec(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
299: fn visit_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
382: _: uint, inner: *TyDesc) -> bool {
308: fn visit_uniq(&mut self, _mtbl: uint, inner: *TyDesc) -> bool {
340: fn visit_vec(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool { fail2!(); }
396: mtbl: uint, inner: *TyDesc) -> bool {
(575)(370)(315)(356)(211)(427)(348)(323)(147)(142)(517)(331)(566)(363)(189)libstd/cleanup.rs:
(18)libstd/reflect.rs:
(420)(275)(234)(289)(268)(360)(227)(385)(313)(297)(255)(248)(337)(220)(241)(261)(282)(380)libstd/unstable/intrinsics.rs:75:18-75:18 -enum- definition:
#[cfg(not(test))]
pub enum Opaque { }
references:-155: get_disr: extern unsafe fn(ptr: *Opaque) -> int,
143: get_disr: extern unsafe fn(ptr: *Opaque) -> int,
libstd/repr.rs:
476: get_disr: extern unsafe fn(ptr: *Opaque) -> int,
550: _get_disr: extern unsafe fn(ptr: *Opaque) -> int,
libstd/reflect.rs:
399: get_disr: extern unsafe fn(ptr: *Opaque) -> int,
440: get_disr: extern unsafe fn(ptr: *Opaque) -> int,
libstd/unstable/intrinsics.rs:38:1-38:1 -ty- definition:
pub type GlueFn = extern "Rust" fn(*i8);
references:-52: take_glue: GlueFn,
58: free_glue: GlueFn,
55: drop_glue: GlueFn,
61: visit_glue: GlueFn,
libstd/unstable/intrinsics.rs:79:18-79:18 -trait- definition:
#[cfg(not(test))]
pub trait TyVisitor {
references:-331: pub fn visit_tydesc(td: *TyDesc, tv: &mut TyVisitor);
libstd/repr.rs:
245: impl<'self> TyVisitor for ReprVisitor<'self> {
615: visit_tydesc(tydesc, &mut v as &mut TyVisitor);
159: visit_tydesc(inner, &mut v as &mut TyVisitor);
libstd/reflect.rs:
77: impl<V:TyVisitor + MovePtr> TyVisitor for MovePtrAdaptor<V> {
50: impl<V:TyVisitor + MovePtr> MovePtrAdaptor<V> {
450: self.align_to::<@TyVisitor>();
77: impl<V:TyVisitor + MovePtr> TyVisitor for MovePtrAdaptor<V> {
452: self.bump_past::<@TyVisitor>();
46: pub fn MovePtrAdaptor<V:TyVisitor + MovePtr>(v: V) -> MovePtrAdaptor<V> {