retour\arch\x86/
patcher.rs

1use super::thunk;
2use crate::error::{Error, Result};
3use crate::{pic, util};
4use std::{mem, slice};
5
6pub struct Patcher {
7  patch_area: &'static mut [u8],
8  original_prolog: Vec<u8>,
9  detour_prolog: Vec<u8>,
10}
11
12impl Patcher {
13  /// Creates a new detour patcher for an address.
14  ///
15  /// # Arguments
16  ///
17  /// * `target` - An address that should be hooked.
18  /// * `detour` - An address that the target should be redirected to.
19  /// * `prolog_size` - The available inline space for the hook.
20  pub unsafe fn new(target: *const (), detour: *const (), prolog_size: usize) -> Result<Patcher> {
21    // Calculate the patch area (i.e if a short or long jump should be used)
22    let patch_area = Self::patch_area(target, prolog_size)?;
23    let emitter = Self::hook_template(detour, patch_area);
24
25    let patch_address = patch_area.as_ptr() as *const ();
26    let original_prolog = patch_area.to_vec();
27
28    Ok(Patcher {
29      detour_prolog: emitter.emit(patch_address),
30      original_prolog,
31      patch_area,
32    })
33  }
34
35  /// Returns the target's patch area.
36  pub fn area(&self) -> &[u8] {
37    self.patch_area
38  }
39
40  /// Either patches or unpatches the function.
41  pub unsafe fn toggle(&mut self, enable: bool) {
42    // Copy either the detour or the original bytes of the function
43    self.patch_area.copy_from_slice(if enable {
44      &self.detour_prolog
45    } else {
46      &self.original_prolog
47    });
48  }
49
50  /// Returns the patch area for a function, consisting of a long jump and
51  /// possibly a short jump.
52  unsafe fn patch_area(target: *const (), prolog_size: usize) -> Result<&'static mut [u8]> {
53    let jump_rel08_size = mem::size_of::<thunk::x86::JumpShort>();
54    let jump_rel32_size = mem::size_of::<thunk::x86::JumpRel>();
55
56    // Check if there isn't enough space for a relative long jump
57    if !Self::is_patchable(target, prolog_size, jump_rel32_size) {
58      // ... check if a relative small jump fits instead
59      if Self::is_patchable(target, prolog_size, jump_rel08_size) {
60        // A small jump relies on there being a hot patch area above the
61        // function, that consists of at least 5 bytes (a rel32 jump).
62        let hot_patch = target as usize - jump_rel32_size;
63        let hot_patch_area = slice::from_raw_parts(hot_patch as *const u8, jump_rel32_size);
64
65        // Ensure that the hot patch area only contains padding and is executable
66        if !Self::is_code_padding(hot_patch_area)
67          || !util::is_executable_address(hot_patch_area.as_ptr() as *const _)?
68        {
69          Err(Error::NoPatchArea)?;
70        }
71
72        // The range is from the start of the hot patch to the end of the jump
73        let patch_size = jump_rel32_size + jump_rel08_size;
74        Ok(slice::from_raw_parts_mut(hot_patch as *mut u8, patch_size))
75      } else {
76        Err(Error::NoPatchArea)
77      }
78    } else {
79      // The range is from the start of the function to the end of the jump
80      Ok(slice::from_raw_parts_mut(
81        target as *mut u8,
82        jump_rel32_size,
83      ))
84    }
85  }
86
87  /// Creates a redirect code template for the targetted patch area.
88  fn hook_template(detour: *const (), patch_area: &[u8]) -> pic::CodeEmitter {
89    let mut emitter = pic::CodeEmitter::new();
90
91    // Both hot patch and normal detours use a relative long jump
92    emitter.add_thunk(thunk::x86::jmp_rel32(detour as usize));
93
94    // The hot patch relies on a small jump to get to the long jump
95    let jump_rel32_size = mem::size_of::<thunk::x86::JumpRel>();
96    let uses_hot_patch = patch_area.len() > jump_rel32_size;
97
98    if uses_hot_patch {
99      let displacement = -(jump_rel32_size as i8);
100      emitter.add_thunk(thunk::x86::jmp_rel8(displacement));
101    }
102
103    // Pad leftover bytes with nops
104    while emitter.len() < patch_area.len() {
105      emitter.add_thunk(thunk::x86::nop());
106    }
107
108    emitter
109  }
110
111  /// Returns whether an address can be inline patched or not.
112  unsafe fn is_patchable(target: *const (), prolog_size: usize, patch_size: usize) -> bool {
113    if prolog_size >= patch_size {
114      // If the whole patch fits it's good to go!
115      return true;
116    }
117
118    // Otherwise the inline patch relies on padding after the prolog
119    let slice = slice::from_raw_parts(
120      (target as usize + prolog_size) as *const u8,
121      patch_size - prolog_size,
122    );
123
124    Self::is_code_padding(slice)
125  }
126
127  /// Returns true if the slice only contains code padding.
128  fn is_code_padding(buffer: &[u8]) -> bool {
129    const PADDING: [u8; 3] = [0x00, 0x90, 0xCC];
130    buffer.iter().all(|code| PADDING.contains(code))
131  }
132}