    let (_, offset_is_strictly_in_slice) = offset.overflowing_sub(cs, length);
    let offset_is_beyond_the_slice = offset_is_strictly_in_slice.negated(cs);
    let skip_if_legitimate_fat_ptr =
        Boolean::multi_and(cs, &[offset_is_beyond_the_slice, is_fat_ptr]);
    ......
    let skip_memory_access = Boolean::multi_or(
        cs,
        &[
            already_panicked,
            skip_if_legitimate_fat_ptr,
            is_non_addressable,
        ],
    );
    bytes_out_of_bound = bytes_out_of_bound.mask_negated(cs, skip_memory_access);
    bytes_out_of_bound = bytes_out_of_bound.mask_negated(cs, uf);

    let (_, bytes_out_of_bound) = bytes_out_of_bound.div_by_constant(cs, 32);
    // remainder fits into 8 bits too
    let bytes_to_cleanup_out_of_bounds =
        unsafe { UInt8::from_variable_unchecked(bytes_out_of_bound.get_variable()) };

    let new = Self {
        absolute_address,
        page_candidate: page,
        incremented_offset,
        heap_deref_out_of_bounds: is_non_addressable,
        skip_memory_access: skip_memory_access,
        should_set_panic,
        bytes_to_cleanup_out_of_bounds,
    };
let apply_any = Boolean::multi_and(cs, &[should_apply, no_panic]);
let update_dst0 = Boolean::multi_or(cs, &[is_read_access, is_write_access_and_increment]);
let should_update_dst0 = Boolean::multi_and(cs, &[apply_any, update_dst0]);
diffs_accumulator
    .dst_0_values
    .push((can_write_into_memory, should_update_dst0, dst0_value));
// implement shift register
let zero_u8 = UInt8::zero(cs);
let mut bytes_array = [zero_u8; 64];

let memory_value_a_bytes = memory_value_a.value.to_be_bytes(cs);
bytes_array[..32].copy_from_slice(&memory_value_a_bytes);

let memory_value_b_bytes = memory_value_b.value.to_be_bytes(cs);
bytes_array[32..].copy_from_slice(&memory_value_b_bytes);
// now mask-shift
let mut selected_word = [zero_u8; 32];

// idx 0 is unalignment of 0 (aligned), idx 31 is unalignment of 31
for (idx, mask_bit) in unalignment_bit_mask.iter().enumerate() {
    let src = &bytes_array[idx..(idx + 32)]; // source
    debug_assert_eq!(src.len(), selected_word.len());

    for (dst, src) in selected_word
        .array_chunks_mut::<4>()
        .zip(src.array_chunks::<4>())
    {
        *dst = UInt8::parallel_select(cs, *mask_bit, src, &*dst);
    }

use crate::tables::uma_ptr_read_cleanup::UMAPtrReadCleanupTable;

let table_id = cs
    .get_table_id_for_marker::<UMAPtrReadCleanupTable>()
    .expect("table must exist");
let bytes_to_cleanup_out_of_bound = quasi_fat_ptr.bytes_to_cleanup_out_of_bounds;
let bytes_to_cleanup_out_of_bound_if_ptr_read =
    bytes_to_cleanup_out_of_bound.mask(cs, is_uma_fat_ptr_read);
let [uma_cleanup_bitspread, _] = cs.perform_lookup::<1, 2>(
    table_id,
    &[bytes_to_cleanup_out_of_bound_if_ptr_read.get_variable()],
);
let uma_ptr_read_cleanup_mask =
    Num::from_variable(uma_cleanup_bitspread).spread_into_bits::<_, 32>(cs);

for (dst, masking_bit) in selected_word
    .iter_mut()
    .zip(uma_ptr_read_cleanup_mask.iter().rev())
{
    *dst = dst.mask(cs, *masking_bit);
}

.......

let dst0_value = VMRegister::conditionally_select(
    cs,
    is_write_access_and_increment,
    &incremented_src0_register,
    &read_value_as_register,
);
let should_read_a_cell = Boolean::multi_and(cs, &[should_apply, do_not_skip_memory_access]);
let should_read_b_cell = is_unaligned_read;
let table_id = cs
    .get_table_id_for_marker::<UMAPtrReadCleanupTable>()
    .expect("table must exist");
let bytes_to_cleanup_out_of_bound = quasi_fat_ptr.bytes_to_cleanup_out_of_bounds;
let bytes_to_cleanup_out_of_bound_if_ptr_read =
    bytes_to_cleanup_out_of_bound.mask(cs, is_uma_fat_ptr_read);
let [uma_cleanup_bitspread, _] = cs.perform_lookup::<1, 2>(
    table_id,
    &[bytes_to_cleanup_out_of_bound_if_ptr_read.get_variable()],
);
let uma_ptr_read_cleanup_mask =
    Num::from_variable(uma_cleanup_bitspread).spread_into_bits::<_, 32>(cs);
