diff --git a/circuits/src/builtins/poseidon/poseidon_chunk_stark.rs b/circuits/src/builtins/poseidon/poseidon_chunk_stark.rs index 7dc44447..f87b88a9 100644 --- a/circuits/src/builtins/poseidon/poseidon_chunk_stark.rs +++ b/circuits/src/builtins/poseidon/poseidon_chunk_stark.rs @@ -122,7 +122,7 @@ impl, const D: usize> Stark for PoseidonChunk yield_constr.constraint( lv[COL_POSEIDON_CHUNK_IS_EXT_LINE] * (P::ONES - lv[COL_POSEIDON_CHUNK_IS_EXT_LINE]), ); - // in ext line, tx_idx, env_idx, clk, opcode, op1, dst donnot change. + // in ext line, tx_idx, env_idx, clk, opcode, op1, dst do not change. yield_constr.constraint( nv[COL_POSEIDON_CHUNK_IS_EXT_LINE] * (nv[COL_POSEIDON_CHUNK_TX_IDX] - lv[COL_POSEIDON_CHUNK_TX_IDX]), diff --git a/circuits/src/builtins/storage/storage_access_stark.rs b/circuits/src/builtins/storage/storage_access_stark.rs index 61180628..ecd99468 100644 --- a/circuits/src/builtins/storage/storage_access_stark.rs +++ b/circuits/src/builtins/storage/storage_access_stark.rs @@ -131,7 +131,7 @@ impl, const D: usize> Stark for StorageAccess yield_constr.constraint_transition( (nv_is_padding - lv_is_padding) * (nv_is_padding - lv_is_padding - P::ONES), ); - // st_access_idx: from 1, donnot change or increase by 1 + // st_access_idx: from 1, do not change or increase by 1 yield_constr.constraint_first_row((P::ONES - lv_is_padding) * (lv_st_access_idx - P::ONES)); yield_constr.constraint_transition( (P::ONES - nv_is_padding) diff --git a/circuits/src/memory/memory_stark.rs b/circuits/src/memory/memory_stark.rs index 653d69f0..67c37b55 100644 --- a/circuits/src/memory/memory_stark.rs +++ b/circuits/src/memory/memory_stark.rs @@ -46,7 +46,7 @@ pub fn ctl_data() -> Vec> { } pub fn ctl_filter() -> Column { - // poseidon data is different, prophet write donnot lookup + // poseidon data is different, prophet write do not lookup Column::sum([ COL_MEM_S_MLOAD, COL_MEM_S_MSTORE, diff --git a/core/src/merkle_tree/tree.rs b/core/src/merkle_tree/tree.rs index 60a7cbf6..3259e11b 100644 --- a/core/src/merkle_tree/tree.rs +++ b/core/src/merkle_tree/tree.rs @@ -252,7 +252,7 @@ impl AccountTree { }) } None => Err(TreeError::EmptyPatch(String::from( - "Empty matadata in apply_update_batch", + "Empty metadata in apply_update_batch", ))), } }) diff --git a/plonky2/field/src/arch/x86_64/avx2_goldilocks_field.rs b/plonky2/field/src/arch/x86_64/avx2_goldilocks_field.rs index b2da3515..e504a24d 100644 --- a/plonky2/field/src/arch/x86_64/avx2_goldilocks_field.rs +++ b/plonky2/field/src/arch/x86_64/avx2_goldilocks_field.rs @@ -437,7 +437,7 @@ unsafe fn add_small_64s_64_s(x_s: __m256i, y: __m256i) -> __m256i { // the addition of the low 32 bits generated a carry. This can never occur if y // <= 0xffffffff00000000: if y >> 32 = 0xffffffff, then no carry can occur. let mask = _mm256_cmpgt_epi32(x_s, res_wrapped_s); // -1 if overflowed else 0. - // The mask contains 0xffffffff in the high 32 bits if wraparound occured and 0 + // The mask contains 0xffffffff in the high 32 bits if wraparound occurredand 0 // otherwise. let wrapback_amt = _mm256_srli_epi64::<32>(mask); // -FIELD_ORDER if overflowed else 0. let res_s = _mm256_add_epi64(res_wrapped_s, wrapback_amt); @@ -458,7 +458,7 @@ unsafe fn sub_small_64s_64_s(x_s: __m256i, y: __m256i) -> __m256i { // if y <= 0xffffffff00000000: if y >> 32 = 0xffffffff, then no borrow can // occur. let mask = _mm256_cmpgt_epi32(res_wrapped_s, x_s); // -1 if underflowed else 0. - // The mask contains 0xffffffff in the high 32 bits if wraparound occured and 0 + // The mask contains 0xffffffff in the high 32 bits if wraparound occurredand 0 // otherwise. let wrapback_amt = _mm256_srli_epi64::<32>(mask); // -FIELD_ORDER if underflowed else 0. let res_s = _mm256_sub_epi64(res_wrapped_s, wrapback_amt); diff --git a/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs b/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs index 18466d90..34ab7b76 100644 --- a/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs +++ b/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs @@ -92,7 +92,7 @@ unsafe fn add_with_wraparound(a: u64, b: u64) -> u64 { adj = lateout(reg) adj, options(pure, nomem, nostack), ); - res + adj // adj is EPSILON if wraparound occured and 0 otherwise + res + adj // adj is EPSILON if wraparound occurredand 0 otherwise } /// Subtraction of a and (b >> 32) modulo ORDER accounting for wraparound. diff --git a/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs b/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs index b40b4277..143be847 100644 --- a/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs +++ b/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs @@ -183,10 +183,10 @@ unsafe fn const_layer( // occur if all round constants are < 0xffffffff00000001 = ORDER: if the high bits are // 0xffffffff, then the low bits are 0, so the carry bit cannot occur. So this trick is valid // as long as all the round constants are in canonical form. - // The mask contains 0xffffffff in the high doubleword if wraparound occured and 0 otherwise. + // The mask contains 0xffffffff in the high doubleword if wraparound occurredand 0 otherwise. // We will ignore the low doubleword. let wraparound_mask = map3!(_mm256_cmpgt_epi32, state_s, res_maybe_wrapped_s); - // wraparound_adjustment contains 0xffffffff = EPSILON if wraparound occured and 0 otherwise. + // wraparound_adjustment contains 0xffffffff = EPSILON if wraparound occurredand 0 otherwise. let wraparound_adjustment = map3!(_mm256_srli_epi64::<32>, wraparound_mask); // XOR commutes with the addition below. Placing it here helps mask latency. let res_maybe_wrapped = map3!(_mm256_xor_si256, res_maybe_wrapped_s, rep sign_bit);