|
| 1 | +//! Implementation for Linux / Android using `asm!`-based syscalls. |
| 2 | +use crate::{Error, MaybeUninit}; |
| 3 | + |
| 4 | +pub use crate::util::{inner_u32, inner_u64}; |
| 5 | + |
| 6 | +#[cfg(not(any(target_os = "android", target_os = "linux")))] |
| 7 | +compile_error!("`linux_raw` backend can be enabled only for Linux/Android targets!"); |
| 8 | + |
| 9 | +#[allow(non_upper_case_globals)] |
| 10 | +unsafe fn getrandom_syscall(buf: *mut u8, buflen: usize, flags: u32) -> isize { |
| 11 | + let r0; |
| 12 | + |
| 13 | + // Based on `rustix` and `linux-raw-sys` code. |
| 14 | + cfg_if! { |
| 15 | + if #[cfg(target_arch = "arm")] { |
| 16 | + const __NR_getrandom: u32 = 384; |
| 17 | + // In thumb-mode, r7 is the frame pointer and is not permitted to be used in |
| 18 | + // an inline asm operand, so we have to use a different register and copy it |
| 19 | + // into r7 inside the inline asm. |
| 20 | + // Theoretically, we could detect thumb mode in the build script, but several |
| 21 | + // register moves are cheap enough compared to the syscall cost, so we do not |
| 22 | + // bother with it. |
| 23 | + core::arch::asm!( |
| 24 | + "mov {tmp}, r7", |
| 25 | + "mov r7, {nr}", |
| 26 | + "svc 0", |
| 27 | + "mov r7, {tmp}", |
| 28 | + nr = const __NR_getrandom, |
| 29 | + tmp = out(reg) _, |
| 30 | + inlateout("r0") buf => r0, |
| 31 | + in("r1") buflen, |
| 32 | + in("r2") flags, |
| 33 | + options(nostack, preserves_flags) |
| 34 | + ); |
| 35 | + } else if #[cfg(target_arch = "aarch64")] { |
| 36 | + const __NR_getrandom: u32 = 278; |
| 37 | + core::arch::asm!( |
| 38 | + "svc 0", |
| 39 | + in("x8") __NR_getrandom, |
| 40 | + inlateout("x0") buf => r0, |
| 41 | + in("x1") buflen, |
| 42 | + in("x2") flags, |
| 43 | + options(nostack, preserves_flags) |
| 44 | + ); |
| 45 | + } else if #[cfg(target_arch = "loongarch64")] { |
| 46 | + const __NR_getrandom: u32 = 278; |
| 47 | + core::arch::asm!( |
| 48 | + "syscall 0", |
| 49 | + in("$a7") __NR_getrandom, |
| 50 | + inlateout("$a0") buf => r0, |
| 51 | + in("$a1") buflen, |
| 52 | + in("$a2") flags, |
| 53 | + options(nostack, preserves_flags) |
| 54 | + ); |
| 55 | + } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { |
| 56 | + const __NR_getrandom: u32 = 278; |
| 57 | + core::arch::asm!( |
| 58 | + "ecall", |
| 59 | + in("a7") __NR_getrandom, |
| 60 | + inlateout("a0") buf => r0, |
| 61 | + in("a1") buflen, |
| 62 | + in("a2") flags, |
| 63 | + options(nostack, preserves_flags) |
| 64 | + ); |
| 65 | + } else if #[cfg(target_arch = "s390x")] { |
| 66 | + const __NR_getrandom: u32 = 349; |
| 67 | + core::arch::asm!( |
| 68 | + "svc 0", |
| 69 | + in("r1") __NR_getrandom, |
| 70 | + inlateout("r2") buf => r0, |
| 71 | + in("r3") buflen, |
| 72 | + in("r4") flags, |
| 73 | + options(nostack, preserves_flags) |
| 74 | + ); |
| 75 | + } else if #[cfg(target_arch = "x86")] { |
| 76 | + const __NR_getrandom: u32 = 355; |
| 77 | + // `int 0x80` is famously slow, but implementing vDSO is too complex |
| 78 | + // and `sysenter`/`syscall` have their own portability issues, |
| 79 | + // so we use the simple "legacy" way of doing syscalls. |
| 80 | + core::arch::asm!( |
| 81 | + "int $$0x80", |
| 82 | + in("eax") __NR_getrandom, |
| 83 | + in("ebx") buf, |
| 84 | + in("ecx") buflen, |
| 85 | + in("edx") flags, |
| 86 | + lateout("eax") r0, |
| 87 | + options(nostack, preserves_flags) |
| 88 | + ); |
| 89 | + } else if #[cfg(target_arch = "x86_64")] { |
| 90 | + #[cfg(target_pointer_width = "64")] |
| 91 | + const __NR_getrandom: u32 = 318; |
| 92 | + #[cfg(target_pointer_width = "32")] |
| 93 | + const __NR_getrandom: u32 = (1 << 30) + 318; |
| 94 | + |
| 95 | + core::arch::asm!( |
| 96 | + "syscall", |
| 97 | + in("rax") __NR_getrandom, |
| 98 | + in("rdi") buf, |
| 99 | + in("rsi") buflen, |
| 100 | + in("rdx") flags, |
| 101 | + lateout("rax") r0, |
| 102 | + lateout("rcx") _, |
| 103 | + lateout("r11") _, |
| 104 | + options(nostack, preserves_flags) |
| 105 | + ); |
| 106 | + } else { |
| 107 | + compile_error!("`linux_raw` backend does not support this target arch"); |
| 108 | + } |
| 109 | + } |
| 110 | + |
| 111 | + r0 |
| 112 | +} |
| 113 | + |
| 114 | +#[inline] |
| 115 | +pub fn fill_inner(mut dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> { |
| 116 | + // Value of this error code is stable across all target arches. |
| 117 | + const EINTR: isize = -4; |
| 118 | + |
| 119 | + loop { |
| 120 | + let ret = unsafe { getrandom_syscall(dest.as_mut_ptr().cast(), dest.len(), 0) }; |
| 121 | + match usize::try_from(ret) { |
| 122 | + Ok(0) => return Err(Error::UNEXPECTED), |
| 123 | + Ok(len) => { |
| 124 | + dest = dest.get_mut(len..).ok_or(Error::UNEXPECTED)?; |
| 125 | + if dest.is_empty() { |
| 126 | + return Ok(()); |
| 127 | + } |
| 128 | + } |
| 129 | + Err(_) if ret == EINTR => continue, |
| 130 | + Err(_) => { |
| 131 | + let code: u32 = ret |
| 132 | + .wrapping_neg() |
| 133 | + .try_into() |
| 134 | + .map_err(|_| Error::UNEXPECTED)?; |
| 135 | + return Err(Error::from_os_error(code)); |
| 136 | + } |
| 137 | + } |
| 138 | + } |
| 139 | +} |
0 commit comments