1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_uppercase_pattern_statics)]
12
13 use arena::TypedArena;
14 use lib::llvm::{SequentiallyConsistent, Acquire, Release, Xchg};
15 use lib::llvm::{ValueRef, Pointer, Array, Struct};
16 use lib;
17 use middle::trans::base::*;
18 use middle::trans::build::*;
19 use middle::trans::common::*;
20 use middle::trans::datum::*;
21 use middle::trans::glue;
22 use middle::trans::type_of::*;
23 use middle::trans::type_of;
24 use middle::trans::machine;
25 use middle::trans::machine::llsize_of;
26 use middle::trans::type_::Type;
27 use middle::ty;
28 use syntax::ast;
29 use syntax::ast_map;
30 use syntax::parse::token;
31 use util::ppaux::ty_to_str;
32
33 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
34 let name = match token::get_ident(item.ident).get() {
35 "sqrtf32" => "llvm.sqrt.f32",
36 "sqrtf64" => "llvm.sqrt.f64",
37 "powif32" => "llvm.powi.f32",
38 "powif64" => "llvm.powi.f64",
39 "sinf32" => "llvm.sin.f32",
40 "sinf64" => "llvm.sin.f64",
41 "cosf32" => "llvm.cos.f32",
42 "cosf64" => "llvm.cos.f64",
43 "powf32" => "llvm.pow.f32",
44 "powf64" => "llvm.pow.f64",
45 "expf32" => "llvm.exp.f32",
46 "expf64" => "llvm.exp.f64",
47 "exp2f32" => "llvm.exp2.f32",
48 "exp2f64" => "llvm.exp2.f64",
49 "logf32" => "llvm.log.f32",
50 "logf64" => "llvm.log.f64",
51 "log10f32" => "llvm.log10.f32",
52 "log10f64" => "llvm.log10.f64",
53 "log2f32" => "llvm.log2.f32",
54 "log2f64" => "llvm.log2.f64",
55 "fmaf32" => "llvm.fma.f32",
56 "fmaf64" => "llvm.fma.f64",
57 "fabsf32" => "llvm.fabs.f32",
58 "fabsf64" => "llvm.fabs.f64",
59 "copysignf32" => "llvm.copysign.f32",
60 "copysignf64" => "llvm.copysign.f64",
61 "floorf32" => "llvm.floor.f32",
62 "floorf64" => "llvm.floor.f64",
63 "ceilf32" => "llvm.ceil.f32",
64 "ceilf64" => "llvm.ceil.f64",
65 "truncf32" => "llvm.trunc.f32",
66 "truncf64" => "llvm.trunc.f64",
67 "rintf32" => "llvm.rint.f32",
68 "rintf64" => "llvm.rint.f64",
69 "nearbyintf32" => "llvm.nearbyint.f32",
70 "nearbyintf64" => "llvm.nearbyint.f64",
71 "roundf32" => "llvm.round.f32",
72 "roundf64" => "llvm.round.f64",
73 "ctpop8" => "llvm.ctpop.i8",
74 "ctpop16" => "llvm.ctpop.i16",
75 "ctpop32" => "llvm.ctpop.i32",
76 "ctpop64" => "llvm.ctpop.i64",
77 "bswap16" => "llvm.bswap.i16",
78 "bswap32" => "llvm.bswap.i32",
79 "bswap64" => "llvm.bswap.i64",
80 _ => return None
81 };
82 Some(ccx.get_intrinsic(&name))
83 }
84
85 pub fn trans_intrinsic(ccx: &CrateContext,
86 decl: ValueRef,
87 item: &ast::ForeignItem,
88 substs: ¶m_substs,
89 ref_id: Option<ast::NodeId>) {
90 debug!("trans_intrinsic(item.ident={})", token::get_ident(item.ident));
91
92 fn with_overflow_instrinsic(bcx: &Block, name: &'static str, t: ty::t) {
93 let first_real_arg = bcx.fcx.arg_pos(0u);
94 let a = get_param(bcx.fcx.llfn, first_real_arg);
95 let b = get_param(bcx.fcx.llfn, first_real_arg + 1);
96 let llfn = bcx.ccx().get_intrinsic(&name);
97
98 // convert `i1` to a `bool`, and write to the out parameter
99 let val = Call(bcx, llfn, [a, b], []);
100 let result = ExtractValue(bcx, val, 0);
101 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
102 let ret = C_undef(type_of::type_of(bcx.ccx(), t));
103 let ret = InsertValue(bcx, ret, result, 0);
104 let ret = InsertValue(bcx, ret, overflow, 1);
105
106 if type_is_immediate(bcx.ccx(), t) {
107 Ret(bcx, ret);
108 } else {
109 let retptr = get_param(bcx.fcx.llfn, bcx.fcx.out_arg_pos());
110 Store(bcx, ret, retptr);
111 RetVoid(bcx);
112 }
113 }
114
115 fn volatile_load_intrinsic(bcx: &Block) {
116 let first_real_arg = bcx.fcx.arg_pos(0u);
117 let src = get_param(bcx.fcx.llfn, first_real_arg);
118
119 let val = VolatileLoad(bcx, src);
120 Ret(bcx, val);
121 }
122
123 fn volatile_store_intrinsic(bcx: &Block) {
124 let first_real_arg = bcx.fcx.arg_pos(0u);
125 let dst = get_param(bcx.fcx.llfn, first_real_arg);
126 let val = get_param(bcx.fcx.llfn, first_real_arg + 1);
127
128 VolatileStore(bcx, val, dst);
129 RetVoid(bcx);
130 }
131
132 fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool, tp_ty: ty::t) {
133 let ccx = bcx.ccx();
134 let lltp_ty = type_of::type_of(ccx, tp_ty);
135 let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
136 let size = machine::llsize_of(ccx, lltp_ty);
137 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
138 let name = if allow_overlap {
139 if int_size == 32 {
140 "llvm.memmove.p0i8.p0i8.i32"
141 } else {
142 "llvm.memmove.p0i8.p0i8.i64"
143 }
144 } else {
145 if int_size == 32 {
146 "llvm.memcpy.p0i8.p0i8.i32"
147 } else {
148 "llvm.memcpy.p0i8.p0i8.i64"
149 }
150 };
151
152 let decl = bcx.fcx.llfn;
153 let first_real_arg = bcx.fcx.arg_pos(0u);
154 let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
155 let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p(ccx));
156 let count = get_param(decl, first_real_arg + 2);
157 let llfn = ccx.get_intrinsic(&name);
158 Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []);
159 RetVoid(bcx);
160 }
161
162 fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t) {
163 let ccx = bcx.ccx();
164 let lltp_ty = type_of::type_of(ccx, tp_ty);
165 let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
166 let size = machine::llsize_of(ccx, lltp_ty);
167 let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
168 "llvm.memset.p0i8.i32"
169 } else {
170 "llvm.memset.p0i8.i64"
171 };
172
173 let decl = bcx.fcx.llfn;
174 let first_real_arg = bcx.fcx.arg_pos(0u);
175 let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
176 let val = get_param(decl, first_real_arg + 1);
177 let count = get_param(decl, first_real_arg + 2);
178 let llfn = ccx.get_intrinsic(&name);
179 Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []);
180 RetVoid(bcx);
181 }
182
183 fn count_zeros_intrinsic(bcx: &Block, name: &'static str) {
184 let x = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(0u));
185 let y = C_i1(bcx.ccx(), false);
186 let llfn = bcx.ccx().get_intrinsic(&name);
187 let llcall = Call(bcx, llfn, [x, y], []);
188 Ret(bcx, llcall);
189 }
190
191 let output_type = ty::ty_fn_ret(ty::node_id_to_type(ccx.tcx(), item.id));
192
193 let arena = TypedArena::new();
194 let fcx = new_fn_ctxt(ccx, decl, item.id, false, output_type,
195 Some(&*substs), Some(item.span), &arena);
196 init_function(&fcx, true, output_type);
197
198 set_always_inline(fcx.llfn);
199
200 let mut bcx = fcx.entry_bcx.borrow().clone().unwrap();
201 let first_real_arg = fcx.arg_pos(0u);
202
203 let name = token::get_ident(item.ident);
204
205 // This requires that atomic intrinsics follow a specific naming pattern:
206 // "atomic_<operation>[_<ordering>], and no ordering means SeqCst
207 if name.get().starts_with("atomic_") {
208 let split: Vec<&str> = name.get().split('_').collect();
209 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
210 let order = if split.len() == 2 {
211 lib::llvm::SequentiallyConsistent
212 } else {
213 match *split.get(2) {
214 "relaxed" => lib::llvm::Monotonic,
215 "acq" => lib::llvm::Acquire,
216 "rel" => lib::llvm::Release,
217 "acqrel" => lib::llvm::AcquireRelease,
218 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
219 }
220 };
221
222 match *split.get(1) {
223 "cxchg" => {
224 // See include/llvm/IR/Instructions.h for their implementation
225 // of this, I assume that it's good enough for us to use for
226 // now.
227 let strongest_failure_ordering = match order {
228 lib::llvm::NotAtomic | lib::llvm::Unordered =>
229 ccx.sess().fatal("cmpxchg must be atomic"),
230 lib::llvm::Monotonic | lib::llvm::Release =>
231 lib::llvm::Monotonic,
232 lib::llvm::Acquire | lib::llvm::AcquireRelease =>
233 lib::llvm::Acquire,
234 lib::llvm::SequentiallyConsistent =>
235 lib::llvm::SequentiallyConsistent,
236 };
237 let old = AtomicCmpXchg(bcx, get_param(decl, first_real_arg),
238 get_param(decl, first_real_arg + 1u),
239 get_param(decl, first_real_arg + 2u),
240 order, strongest_failure_ordering);
241 Ret(bcx, old);
242 }
243 "load" => {
244 let old = AtomicLoad(bcx, get_param(decl, first_real_arg),
245 order);
246 Ret(bcx, old);
247 }
248 "store" => {
249 AtomicStore(bcx, get_param(decl, first_real_arg + 1u),
250 get_param(decl, first_real_arg),
251 order);
252 RetVoid(bcx);
253 }
254 "fence" => {
255 AtomicFence(bcx, order);
256 RetVoid(bcx);
257 }
258 op => {
259 // These are all AtomicRMW ops
260 let atom_op = match op {
261 "xchg" => lib::llvm::Xchg,
262 "xadd" => lib::llvm::Add,
263 "xsub" => lib::llvm::Sub,
264 "and" => lib::llvm::And,
265 "nand" => lib::llvm::Nand,
266 "or" => lib::llvm::Or,
267 "xor" => lib::llvm::Xor,
268 "max" => lib::llvm::Max,
269 "min" => lib::llvm::Min,
270 "umax" => lib::llvm::UMax,
271 "umin" => lib::llvm::UMin,
272 _ => ccx.sess().fatal("unknown atomic operation")
273 };
274
275 let old = AtomicRMW(bcx, atom_op, get_param(decl, first_real_arg),
276 get_param(decl, first_real_arg + 1u),
277 order);
278 Ret(bcx, old);
279 }
280 }
281
282 fcx.cleanup();
283 return;
284 }
285
286 match name.get() {
287 "abort" => {
288 let llfn = bcx.ccx().get_intrinsic(&("llvm.trap"));
289 Call(bcx, llfn, [], []);
290 Unreachable(bcx);
291 }
292 "breakpoint" => {
293 let llfn = bcx.ccx().get_intrinsic(&("llvm.debugtrap"));
294 Call(bcx, llfn, [], []);
295 RetVoid(bcx);
296 }
297 "size_of" => {
298 let tp_ty = *substs.tys.get(0);
299 let lltp_ty = type_of::type_of(ccx, tp_ty);
300 Ret(bcx, C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint));
301 }
302 "move_val_init" => {
303 // Create a datum reflecting the value being moved.
304 // Use `appropriate_mode` so that the datum is by ref
305 // if the value is non-immediate. Note that, with
306 // intrinsics, there are no argument cleanups to
307 // concern ourselves with, so we can use an rvalue datum.
308 let tp_ty = *substs.tys.get(0);
309 let mode = appropriate_rvalue_mode(ccx, tp_ty);
310 let src = Datum {val: get_param(decl, first_real_arg + 1u),
311 ty: tp_ty,
312 kind: Rvalue(mode)};
313 bcx = src.store_to(bcx, get_param(decl, first_real_arg));
314 RetVoid(bcx);
315 }
316 "min_align_of" => {
317 let tp_ty = *substs.tys.get(0);
318 let lltp_ty = type_of::type_of(ccx, tp_ty);
319 Ret(bcx, C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty) as uint));
320 }
321 "pref_align_of"=> {
322 let tp_ty = *substs.tys.get(0);
323 let lltp_ty = type_of::type_of(ccx, tp_ty);
324 Ret(bcx, C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint));
325 }
326 "get_tydesc" => {
327 let tp_ty = *substs.tys.get(0);
328 let static_ti = get_tydesc(ccx, tp_ty);
329 glue::lazily_emit_visit_glue(ccx, &*static_ti);
330
331 // FIXME (#3730): ideally this shouldn't need a cast,
332 // but there's a circularity between translating rust types to llvm
333 // types and having a tydesc type available. So I can't directly access
334 // the llvm type of intrinsic::TyDesc struct.
335 let userland_tydesc_ty = type_of::type_of(ccx, output_type);
336 let td = PointerCast(bcx, static_ti.tydesc, userland_tydesc_ty);
337 Ret(bcx, td);
338 }
339 "type_id" => {
340 let hash = ty::hash_crate_independent(
341 ccx.tcx(),
342 *substs.tys.get(0),
343 &ccx.link_meta.crate_hash);
344 // NB: This needs to be kept in lockstep with the TypeId struct in
345 // libstd/unstable/intrinsics.rs
346 let val = C_named_struct(type_of::type_of(ccx, output_type),
347 [C_u64(ccx, hash)]);
348 match bcx.fcx.llretptr.get() {
349 Some(ptr) => {
350 Store(bcx, val, ptr);
351 RetVoid(bcx);
352 },
353 None => Ret(bcx, val)
354 }
355 }
356 "init" => {
357 let tp_ty = *substs.tys.get(0);
358 let lltp_ty = type_of::type_of(ccx, tp_ty);
359 match bcx.fcx.llretptr.get() {
360 Some(ptr) => { Store(bcx, C_null(lltp_ty), ptr); RetVoid(bcx); }
361 None if ty::type_is_nil(tp_ty) => RetVoid(bcx),
362 None => Ret(bcx, C_null(lltp_ty)),
363 }
364 }
365 "uninit" => {
366 // Do nothing, this is effectively a no-op
367 let retty = *substs.tys.get(0);
368 if type_is_immediate(ccx, retty) && !return_type_is_void(ccx, retty) {
369 unsafe {
370 Ret(bcx, lib::llvm::llvm::LLVMGetUndef(type_of(ccx, retty).to_ref()));
371 }
372 } else {
373 RetVoid(bcx)
374 }
375 }
376 "forget" => {
377 RetVoid(bcx);
378 }
379 "transmute" => {
380 let (in_type, out_type) = (*substs.tys.get(0), *substs.tys.get(1));
381 let llintype = type_of::type_of(ccx, in_type);
382 let llouttype = type_of::type_of(ccx, out_type);
383
384 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
385 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
386 if in_type_size != out_type_size {
387 let sp = match ccx.tcx.map.get(ref_id.unwrap()) {
388 ast_map::NodeExpr(e) => e.span,
389 _ => fail!("transmute has non-expr arg"),
390 };
391 ccx.sess().span_fatal(sp,
392 format!("transmute called on types with different sizes: \
393 {intype} ({insize, plural, =1{# bit} other{# bits}}) to \
394 {outtype} ({outsize, plural, =1{# bit} other{# bits}})",
395 intype = ty_to_str(ccx.tcx(), in_type),
396 insize = in_type_size as uint,
397 outtype = ty_to_str(ccx.tcx(), out_type),
398 outsize = out_type_size as uint));
399 }
400
401 if !return_type_is_void(ccx, out_type) {
402 let llsrcval = get_param(decl, first_real_arg);
403 if type_is_immediate(ccx, in_type) {
404 match fcx.llretptr.get() {
405 Some(llretptr) => {
406 Store(bcx, llsrcval, PointerCast(bcx, llretptr, llintype.ptr_to()));
407 RetVoid(bcx);
408 }
409 None => match (llintype.kind(), llouttype.kind()) {
410 (Pointer, other) | (other, Pointer) if other != Pointer => {
411 let tmp = Alloca(bcx, llouttype, "");
412 Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
413 Ret(bcx, Load(bcx, tmp));
414 }
415 (Array, _) | (_, Array) | (Struct, _) | (_, Struct) => {
416 let tmp = Alloca(bcx, llouttype, "");
417 Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
418 Ret(bcx, Load(bcx, tmp));
419 }
420 _ => {
421 let llbitcast = BitCast(bcx, llsrcval, llouttype);
422 Ret(bcx, llbitcast)
423 }
424 }
425 }
426 } else if type_is_immediate(ccx, out_type) {
427 let llsrcptr = PointerCast(bcx, llsrcval, llouttype.ptr_to());
428 let ll_load = Load(bcx, llsrcptr);
429 Ret(bcx, ll_load);
430 } else {
431 // NB: Do not use a Load and Store here. This causes massive
432 // code bloat when `transmute` is used on large structural
433 // types.
434 let lldestptr = fcx.llretptr.get().unwrap();
435 let lldestptr = PointerCast(bcx, lldestptr, Type::i8p(ccx));
436 let llsrcptr = PointerCast(bcx, llsrcval, Type::i8p(ccx));
437
438 let llsize = llsize_of(ccx, llintype);
439 call_memcpy(bcx, lldestptr, llsrcptr, llsize, 1);
440 RetVoid(bcx);
441 };
442 } else {
443 RetVoid(bcx);
444 }
445 }
446 "needs_drop" => {
447 let tp_ty = *substs.tys.get(0);
448 Ret(bcx, C_bool(ccx, ty::type_needs_drop(ccx.tcx(), tp_ty)));
449 }
450 "owns_managed" => {
451 let tp_ty = *substs.tys.get(0);
452 Ret(bcx, C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed()));
453 }
454 "visit_tydesc" => {
455 let td = get_param(decl, first_real_arg);
456 let visitor = get_param(decl, first_real_arg + 1u);
457 let td = PointerCast(bcx, td, ccx.tydesc_type().ptr_to());
458 glue::call_visit_glue(bcx, visitor, td, None);
459 RetVoid(bcx);
460 }
461 "offset" => {
462 let ptr = get_param(decl, first_real_arg);
463 let offset = get_param(decl, first_real_arg + 1);
464 let lladdr = InBoundsGEP(bcx, ptr, [offset]);
465 Ret(bcx, lladdr);
466 }
467 "copy_nonoverlapping_memory" => copy_intrinsic(bcx, false, false, *substs.tys.get(0)),
468 "copy_memory" => copy_intrinsic(bcx, true, false, *substs.tys.get(0)),
469 "set_memory" => memset_intrinsic(bcx, false, *substs.tys.get(0)),
470
471 "volatile_copy_nonoverlapping_memory" =>
472 copy_intrinsic(bcx, false, true, *substs.tys.get(0)),
473 "volatile_copy_memory" => copy_intrinsic(bcx, true, true, *substs.tys.get(0)),
474 "volatile_set_memory" => memset_intrinsic(bcx, true, *substs.tys.get(0)),
475
476 "ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
477 "ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
478 "ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"),
479 "ctlz64" => count_zeros_intrinsic(bcx, "llvm.ctlz.i64"),
480 "cttz8" => count_zeros_intrinsic(bcx, "llvm.cttz.i8"),
481 "cttz16" => count_zeros_intrinsic(bcx, "llvm.cttz.i16"),
482 "cttz32" => count_zeros_intrinsic(bcx, "llvm.cttz.i32"),
483 "cttz64" => count_zeros_intrinsic(bcx, "llvm.cttz.i64"),
484
485 "volatile_load" => volatile_load_intrinsic(bcx),
486 "volatile_store" => volatile_store_intrinsic(bcx),
487
488 "i8_add_with_overflow" =>
489 with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i8", output_type),
490 "i16_add_with_overflow" =>
491 with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i16", output_type),
492 "i32_add_with_overflow" =>
493 with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i32", output_type),
494 "i64_add_with_overflow" =>
495 with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i64", output_type),
496
497 "u8_add_with_overflow" =>
498 with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i8", output_type),
499 "u16_add_with_overflow" =>
500 with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i16", output_type),
501 "u32_add_with_overflow" =>
502 with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i32", output_type),
503 "u64_add_with_overflow" =>
504 with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i64", output_type),
505
506 "i8_sub_with_overflow" =>
507 with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i8", output_type),
508 "i16_sub_with_overflow" =>
509 with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i16", output_type),
510 "i32_sub_with_overflow" =>
511 with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i32", output_type),
512 "i64_sub_with_overflow" =>
513 with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i64", output_type),
514
515 "u8_sub_with_overflow" =>
516 with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i8", output_type),
517 "u16_sub_with_overflow" =>
518 with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i16", output_type),
519 "u32_sub_with_overflow" =>
520 with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i32", output_type),
521 "u64_sub_with_overflow" =>
522 with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i64", output_type),
523
524 "i8_mul_with_overflow" =>
525 with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i8", output_type),
526 "i16_mul_with_overflow" =>
527 with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i16", output_type),
528 "i32_mul_with_overflow" =>
529 with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i32", output_type),
530 "i64_mul_with_overflow" =>
531 with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i64", output_type),
532
533 "u8_mul_with_overflow" =>
534 with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i8", output_type),
535 "u16_mul_with_overflow" =>
536 with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i16", output_type),
537 "u32_mul_with_overflow" =>
538 with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i32", output_type),
539 "u64_mul_with_overflow" =>
540 with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i64", output_type),
541
542 _ => {
543 // Could we make this an enum rather than a string? does it get
544 // checked earlier?
545 ccx.sess().span_bug(item.span, "unknown intrinsic");
546 }
547 }
548 fcx.cleanup();
549 }
librustc/middle/trans/intrinsic.rs:162:4-162:4 -fn- definition:
fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t) {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
references:- 2473: "volatile_copy_memory" => copy_intrinsic(bcx, true, true, *substs.tys.get(0)),
474: "volatile_set_memory" => memset_intrinsic(bcx, true, *substs.tys.get(0)),
librustc/middle/trans/intrinsic.rs:92:4-92:4 -fn- definition:
fn with_overflow_instrinsic(bcx: &Block, name: &'static str, t: ty::t) {
let first_real_arg = bcx.fcx.arg_pos(0u);
let a = get_param(bcx.fcx.llfn, first_real_arg);
references:- 24492: "i32_add_with_overflow" =>
493: with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i32", output_type),
494: "i64_add_with_overflow" =>
--
537: "u32_mul_with_overflow" =>
538: with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i32", output_type),
539: "u64_mul_with_overflow" =>
540: with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i64", output_type),
librustc/middle/trans/intrinsic.rs:132:4-132:4 -fn- definition:
fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool, tp_ty: ty::t) {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
references:- 4467: "copy_nonoverlapping_memory" => copy_intrinsic(bcx, false, false, *substs.tys.get(0)),
468: "copy_memory" => copy_intrinsic(bcx, true, false, *substs.tys.get(0)),
469: "set_memory" => memset_intrinsic(bcx, false, *substs.tys.get(0)),
--
471: "volatile_copy_nonoverlapping_memory" =>
472: copy_intrinsic(bcx, false, true, *substs.tys.get(0)),
473: "volatile_copy_memory" => copy_intrinsic(bcx, true, true, *substs.tys.get(0)),
474: "volatile_set_memory" => memset_intrinsic(bcx, true, *substs.tys.get(0)),
librustc/middle/trans/intrinsic.rs:183:4-183:4 -fn- definition:
fn count_zeros_intrinsic(bcx: &Block, name: &'static str) {
let x = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(0u));
let y = C_i1(bcx.ccx(), false);
references:- 8476: "ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
477: "ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
--
479: "ctlz64" => count_zeros_intrinsic(bcx, "llvm.ctlz.i64"),
480: "cttz8" => count_zeros_intrinsic(bcx, "llvm.cttz.i8"),
481: "cttz16" => count_zeros_intrinsic(bcx, "llvm.cttz.i16"),
482: "cttz32" => count_zeros_intrinsic(bcx, "llvm.cttz.i32"),
483: "cttz64" => count_zeros_intrinsic(bcx, "llvm.cttz.i64"),