From 651f16cb251014d61dcebef50bc13b024bc94d98 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Fri, 28 Jun 2019 11:16:50 -0700 Subject: [PATCH 1/4] kernel: Add new uniform time unit conversion API Zephyr has always had an ad hoc collection of time unit macros and conversion routines in a selection of different units, precisions, rounding modes and naming conventions. This adds a single optimized generator to produce any such conversion, and enumerates it to produce a collection of 48 utilities in all useful combinations as a single supported kernel API going forward. Signed-off-by: Andy Ross --- include/sys_clock.h | 24 +- include/time_units.h | 1032 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1034 insertions(+), 22 deletions(-) create mode 100644 include/time_units.h diff --git a/include/sys_clock.h b/include/sys_clock.h index fa95409afb45b..ababc85cfea26 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -22,6 +22,8 @@ #include #include +#include + #ifdef __cplusplus extern "C" { #endif @@ -31,26 +33,6 @@ extern int _sys_clock_always_on; extern void z_enable_sys_clock(void); #endif -#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) -__syscall int z_clock_hw_cycles_per_sec_runtime_get(void); - -static inline int z_impl_z_clock_hw_cycles_per_sec_runtime_get(void) -{ - extern int z_clock_hw_cycles_per_sec; - - return z_clock_hw_cycles_per_sec; -} -#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ - -static inline int sys_clock_hw_cycles_per_sec(void) -{ -#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) - return z_clock_hw_cycles_per_sec_runtime_get(); -#else - return CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; -#endif -} - /* Note that some systems with comparatively slow cycle counters * experience precision loss when doing math like this. In the * general case it is not correct that "cycles" are much faster than @@ -243,6 +225,4 @@ struct _timeout { } #endif -#include - #endif /* ZEPHYR_INCLUDE_SYS_CLOCK_H_ */ diff --git a/include/time_units.h b/include/time_units.h new file mode 100644 index 0000000000000..372ced9072a3f --- /dev/null +++ b/include/time_units.h @@ -0,0 +1,1032 @@ +/* + * Copyright (c) 2019 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_TIME_UNITS_H_ +#define ZEPHYR_INCLUDE_TIME_UNITS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Exhaustively enumerated, highly optimized time unit conversion API */ + +#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) +__syscall int z_clock_hw_cycles_per_sec_runtime_get(void); + +static inline int z_impl_z_clock_hw_cycles_per_sec_runtime_get(void) +{ + extern int z_clock_hw_cycles_per_sec; + + return z_clock_hw_cycles_per_sec; +} +#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ + +static inline int sys_clock_hw_cycles_per_sec(void) +{ +#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) + return z_clock_hw_cycles_per_sec_runtime_get(); +#else + return CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; +#endif +} + +/* Time converter generator gadget. Selects from one of three + * conversion algorithms: ones that take advantage when the + * frequencies are an integer ratio (in either direction), or a full + * precision conversion. Clever use of extra arguments causes all the + * selection logic to be optimized out, and the generated code even + * reduces to 32 bit only if a ratio conversion is available and the + * result is 32 bits. + * + * This isn't intended to be used directly, instead being wrapped + * appropriately in a user-facing API. The boolean arguments are: + * + * const_hz - The hz arguments are known to be compile-time + * constants (because otherwise the modulus test would + * have to be done at runtime) + * result32 - The result will be truncated to 32 bits on use + * round_up - Return the ceiling of the resulting fraction + * round_off - Return the nearest value to the resulting fraction + * (pass both round_up/off as false to get "round_down") + */ +static ALWAYS_INLINE u64_t z_tmcvt(u64_t t, u32_t from_hz, u32_t to_hz, + bool const_hz, bool result32, + bool round_up, bool round_off) +{ + bool mul_ratio = const_hz && + (to_hz > from_hz) && ((to_hz % from_hz) == 0); + bool div_ratio = const_hz && + (from_hz > to_hz) && ((from_hz % to_hz) == 0); + + if (from_hz == to_hz) { + return result32 ? ((u32_t)t) : t; + } + + u64_t off = 0; + + if (!mul_ratio) { + u32_t rdivisor = div_ratio ? (from_hz / to_hz) : from_hz; + + if (round_up) { + off = rdivisor - 1; + } else if (round_off) { + off = rdivisor / 2; + } + } + + /* Select (at build time!) between three different expressions for + * the same mathematical relationship, each expressed with and + * without truncation to 32 bits (I couldn't find a way to make + * the compiler correctly guess at the 32 bit result otherwise). + */ + if (div_ratio) { + t += off; + if (result32) { + return ((u32_t)t) / (from_hz / to_hz); + } else { + return t / (from_hz / to_hz); + } + } else if (mul_ratio) { + if (result32) { + return ((u32_t)t) * (to_hz / from_hz); + } else { + return t * (to_hz / from_hz); + } + } else { + if (result32) { + return (u32_t)((t * to_hz + off) / from_hz); + } else { + return (t * to_hz + off) / from_hz; + } + } +} + +/* The following code is programmatically generated using this perl + * code, which enumerates all possible combinations of units, rounding + * modes and precision. Do not edit directly. + * + * Note that no "microsecond" conversions are defined with anything + * but 64 bit precision. This unit was added to Zephyr after the + * introduction of 64 bit timeout support, so there is no backward + * compatibility requirement. And doing 32 bit math with units that + * small microseconds has precision traps that we probably don't want + * to support in an official API. + * + * #!/usr/bin/perl -w + * use strict; + * + * my %human = ("ms" => "milliseconds", + * "us" => "microseconds", + * "ns" => "nanoseconds", + * "cyc" => "hardware cycles", + * "ticks" => "ticks"); + * + * sub big { return $_[0] eq "us" || $_[0] eq "ns"; } + * sub prefix { return $_[0] eq "ms" || $_[0] eq "us" || $_[0] eq "ns"; } + * + * for my $from_unit ("ms", "us", "ns", "cyc", "ticks") { + * for my $to_unit ("ms", "us", "ns", "cyc", "ticks") { + * next if $from_unit eq $to_unit; + * next if prefix($from_unit) && prefix($to_unit); + * for my $round ("floor", "near", "ceil") { + * for(my $big=0; $big <= 1; $big++) { + * next if !$big && (big($from_unit) || big($to_unit)); + * my $sz = $big ? 64 : 32; + * my $sym = "k_${from_unit}_to_${to_unit}_$round$sz"; + * my $type = "u${sz}_t"; + * my $const_hz = ($from_unit eq "cyc" || $to_unit eq "cyc") + * ? "Z_CCYC" : "true"; + * my $ret32 = $big ? "false" : "true"; + * my $rup = $round eq "ceil" ? "true" : "false"; + * my $roff = $round eq "near" ? "true" : "false"; + * + * my $hfrom = $human{$from_unit}; + * my $hto = $human{$to_unit}; + * print "/", "** \@brief Convert $hfrom to $hto\n"; + * print " *\n"; + * print " * Converts time values in $hfrom to $hto.\n"; + * print " * Computes result in $sz bit precision.\n"; + * if ($round eq "ceil") { + * print " * Rounds up to the next highest output unit.\n"; + * } elsif ($round eq "near") { + * print " * Rounds to the nearest output unit.\n"; + * } else { + * print " * Truncates to the next lowest output unit.\n"; + * } + * print " *\n"; + * print " * \@return The converted time value\n"; + * print " *", "/\n"; + * + * print "static inline $type $sym($type t)\n{\n\t"; + * print "/", "* Generated. Do not edit. See above. *", "/\n\t"; + * print "return z_tmcvt(t, Z_HZ_$from_unit, Z_HZ_$to_unit,"; + * print " $const_hz, $ret32, $rup, $roff);\n"; + * print "}\n\n"; + * } + * } + * } + * } + */ + +/* Some more concise declarations to simplify the generator script and + * save bytes below + */ +#define Z_HZ_ms 1000 +#define Z_HZ_us 1000000 +#define Z_HZ_ns 1000000000 +#define Z_HZ_cyc sys_clock_hw_cycles_per_sec() +#define Z_HZ_ticks CONFIG_SYS_CLOCK_TICKS_PER_SEC +#define Z_CCYC (!IS_ENABLED(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)) + +/** @brief Convert milliseconds to hardware cycles + * + * Converts time values in milliseconds to hardware cycles. + * Computes result in 32 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ms_to_cyc_floor32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_cyc, Z_CCYC, true, false, false); +} + +/** @brief Convert milliseconds to hardware cycles + * + * Converts time values in milliseconds to hardware cycles. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ms_to_cyc_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_cyc, Z_CCYC, false, false, false); +} + +/** @brief Convert milliseconds to hardware cycles + * + * Converts time values in milliseconds to hardware cycles. + * Computes result in 32 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ms_to_cyc_near32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_cyc, Z_CCYC, true, false, true); +} + +/** @brief Convert milliseconds to hardware cycles + * + * Converts time values in milliseconds to hardware cycles. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ms_to_cyc_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_cyc, Z_CCYC, false, false, true); +} + +/** @brief Convert milliseconds to hardware cycles + * + * Converts time values in milliseconds to hardware cycles. + * Computes result in 32 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ms_to_cyc_ceil32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_cyc, Z_CCYC, true, true, false); +} + +/** @brief Convert milliseconds to hardware cycles + * + * Converts time values in milliseconds to hardware cycles. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ms_to_cyc_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_cyc, Z_CCYC, false, true, false); +} + +/** @brief Convert milliseconds to ticks + * + * Converts time values in milliseconds to ticks. + * Computes result in 32 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ms_to_ticks_floor32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_ticks, true, true, false, false); +} + +/** @brief Convert milliseconds to ticks + * + * Converts time values in milliseconds to ticks. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ms_to_ticks_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_ticks, true, false, false, false); +} + +/** @brief Convert milliseconds to ticks + * + * Converts time values in milliseconds to ticks. + * Computes result in 32 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ms_to_ticks_near32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_ticks, true, true, false, true); +} + +/** @brief Convert milliseconds to ticks + * + * Converts time values in milliseconds to ticks. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ms_to_ticks_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_ticks, true, false, false, true); +} + +/** @brief Convert milliseconds to ticks + * + * Converts time values in milliseconds to ticks. + * Computes result in 32 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ms_to_ticks_ceil32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_ticks, true, true, true, false); +} + +/** @brief Convert milliseconds to ticks + * + * Converts time values in milliseconds to ticks. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ms_to_ticks_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ms, Z_HZ_ticks, true, false, true, false); +} + +/** @brief Convert microseconds to hardware cycles + * + * Converts time values in microseconds to hardware cycles. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_us_to_cyc_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_us, Z_HZ_cyc, Z_CCYC, false, false, false); +} + +/** @brief Convert microseconds to hardware cycles + * + * Converts time values in microseconds to hardware cycles. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_us_to_cyc_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_us, Z_HZ_cyc, Z_CCYC, false, false, true); +} + +/** @brief Convert microseconds to hardware cycles + * + * Converts time values in microseconds to hardware cycles. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_us_to_cyc_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_us, Z_HZ_cyc, Z_CCYC, false, true, false); +} + +/** @brief Convert microseconds to ticks + * + * Converts time values in microseconds to ticks. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_us_to_ticks_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_us, Z_HZ_ticks, true, false, false, false); +} + +/** @brief Convert microseconds to ticks + * + * Converts time values in microseconds to ticks. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_us_to_ticks_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_us, Z_HZ_ticks, true, false, false, true); +} + +/** @brief Convert microseconds to ticks + * + * Converts time values in microseconds to ticks. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_us_to_ticks_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_us, Z_HZ_ticks, true, false, true, false); +} + +/** @brief Convert nanoseconds to hardware cycles + * + * Converts time values in nanoseconds to hardware cycles. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ns_to_cyc_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ns, Z_HZ_cyc, Z_CCYC, false, false, false); +} + +/** @brief Convert nanoseconds to hardware cycles + * + * Converts time values in nanoseconds to hardware cycles. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ns_to_cyc_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ns, Z_HZ_cyc, Z_CCYC, false, false, true); +} + +/** @brief Convert nanoseconds to hardware cycles + * + * Converts time values in nanoseconds to hardware cycles. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ns_to_cyc_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ns, Z_HZ_cyc, Z_CCYC, false, true, false); +} + +/** @brief Convert nanoseconds to ticks + * + * Converts time values in nanoseconds to ticks. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ns_to_ticks_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ns, Z_HZ_ticks, true, false, false, false); +} + +/** @brief Convert nanoseconds to ticks + * + * Converts time values in nanoseconds to ticks. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ns_to_ticks_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ns, Z_HZ_ticks, true, false, false, true); +} + +/** @brief Convert nanoseconds to ticks + * + * Converts time values in nanoseconds to ticks. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ns_to_ticks_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ns, Z_HZ_ticks, true, false, true, false); +} + +/** @brief Convert hardware cycles to milliseconds + * + * Converts time values in hardware cycles to milliseconds. + * Computes result in 32 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u32_t k_cyc_to_ms_floor32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ms, Z_CCYC, true, false, false); +} + +/** @brief Convert hardware cycles to milliseconds + * + * Converts time values in hardware cycles to milliseconds. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ms_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ms, Z_CCYC, false, false, false); +} + +/** @brief Convert hardware cycles to milliseconds + * + * Converts time values in hardware cycles to milliseconds. + * Computes result in 32 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u32_t k_cyc_to_ms_near32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ms, Z_CCYC, true, false, true); +} + +/** @brief Convert hardware cycles to milliseconds + * + * Converts time values in hardware cycles to milliseconds. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ms_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ms, Z_CCYC, false, false, true); +} + +/** @brief Convert hardware cycles to milliseconds + * + * Converts time values in hardware cycles to milliseconds. + * Computes result in 32 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u32_t k_cyc_to_ms_ceil32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ms, Z_CCYC, true, true, false); +} + +/** @brief Convert hardware cycles to milliseconds + * + * Converts time values in hardware cycles to milliseconds. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ms_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ms, Z_CCYC, false, true, false); +} + +/** @brief Convert hardware cycles to microseconds + * + * Converts time values in hardware cycles to microseconds. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_us_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_us, Z_CCYC, false, false, false); +} + +/** @brief Convert hardware cycles to microseconds + * + * Converts time values in hardware cycles to microseconds. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_us_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_us, Z_CCYC, false, false, true); +} + +/** @brief Convert hardware cycles to microseconds + * + * Converts time values in hardware cycles to microseconds. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_us_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_us, Z_CCYC, false, true, false); +} + +/** @brief Convert hardware cycles to nanoseconds + * + * Converts time values in hardware cycles to nanoseconds. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ns_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ns, Z_CCYC, false, false, false); +} + +/** @brief Convert hardware cycles to nanoseconds + * + * Converts time values in hardware cycles to nanoseconds. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ns_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ns, Z_CCYC, false, false, true); +} + +/** @brief Convert hardware cycles to nanoseconds + * + * Converts time values in hardware cycles to nanoseconds. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ns_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ns, Z_CCYC, false, true, false); +} + +/** @brief Convert hardware cycles to ticks + * + * Converts time values in hardware cycles to ticks. + * Computes result in 32 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u32_t k_cyc_to_ticks_floor32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ticks, Z_CCYC, true, false, false); +} + +/** @brief Convert hardware cycles to ticks + * + * Converts time values in hardware cycles to ticks. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ticks_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ticks, Z_CCYC, false, false, false); +} + +/** @brief Convert hardware cycles to ticks + * + * Converts time values in hardware cycles to ticks. + * Computes result in 32 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u32_t k_cyc_to_ticks_near32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ticks, Z_CCYC, true, false, true); +} + +/** @brief Convert hardware cycles to ticks + * + * Converts time values in hardware cycles to ticks. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ticks_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ticks, Z_CCYC, false, false, true); +} + +/** @brief Convert hardware cycles to ticks + * + * Converts time values in hardware cycles to ticks. + * Computes result in 32 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u32_t k_cyc_to_ticks_ceil32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ticks, Z_CCYC, true, true, false); +} + +/** @brief Convert hardware cycles to ticks + * + * Converts time values in hardware cycles to ticks. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_cyc_to_ticks_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_cyc, Z_HZ_ticks, Z_CCYC, false, true, false); +} + +/** @brief Convert ticks to milliseconds + * + * Converts time values in ticks to milliseconds. + * Computes result in 32 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ticks_to_ms_floor32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ms, true, true, false, false); +} + +/** @brief Convert ticks to milliseconds + * + * Converts time values in ticks to milliseconds. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_ms_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ms, true, false, false, false); +} + +/** @brief Convert ticks to milliseconds + * + * Converts time values in ticks to milliseconds. + * Computes result in 32 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ticks_to_ms_near32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ms, true, true, false, true); +} + +/** @brief Convert ticks to milliseconds + * + * Converts time values in ticks to milliseconds. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_ms_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ms, true, false, false, true); +} + +/** @brief Convert ticks to milliseconds + * + * Converts time values in ticks to milliseconds. + * Computes result in 32 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ticks_to_ms_ceil32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ms, true, true, true, false); +} + +/** @brief Convert ticks to milliseconds + * + * Converts time values in ticks to milliseconds. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_ms_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ms, true, false, true, false); +} + +/** @brief Convert ticks to microseconds + * + * Converts time values in ticks to microseconds. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_us_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_us, true, false, false, false); +} + +/** @brief Convert ticks to microseconds + * + * Converts time values in ticks to microseconds. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_us_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_us, true, false, false, true); +} + +/** @brief Convert ticks to microseconds + * + * Converts time values in ticks to microseconds. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_us_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_us, true, false, true, false); +} + +/** @brief Convert ticks to nanoseconds + * + * Converts time values in ticks to nanoseconds. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_ns_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ns, true, false, false, false); +} + +/** @brief Convert ticks to nanoseconds + * + * Converts time values in ticks to nanoseconds. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_ns_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ns, true, false, false, true); +} + +/** @brief Convert ticks to nanoseconds + * + * Converts time values in ticks to nanoseconds. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_ns_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_ns, true, false, true, false); +} + +/** @brief Convert ticks to hardware cycles + * + * Converts time values in ticks to hardware cycles. + * Computes result in 32 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ticks_to_cyc_floor32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_cyc, Z_CCYC, true, false, false); +} + +/** @brief Convert ticks to hardware cycles + * + * Converts time values in ticks to hardware cycles. + * Computes result in 64 bit precision. + * Truncates to the next lowest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_cyc_floor64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_cyc, Z_CCYC, false, false, false); +} + +/** @brief Convert ticks to hardware cycles + * + * Converts time values in ticks to hardware cycles. + * Computes result in 32 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ticks_to_cyc_near32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_cyc, Z_CCYC, true, false, true); +} + +/** @brief Convert ticks to hardware cycles + * + * Converts time values in ticks to hardware cycles. + * Computes result in 64 bit precision. + * Rounds to the nearest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_cyc_near64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_cyc, Z_CCYC, false, false, true); +} + +/** @brief Convert ticks to hardware cycles + * + * Converts time values in ticks to hardware cycles. + * Computes result in 32 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u32_t k_ticks_to_cyc_ceil32(u32_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_cyc, Z_CCYC, true, true, false); +} + +/** @brief Convert ticks to hardware cycles + * + * Converts time values in ticks to hardware cycles. + * Computes result in 64 bit precision. + * Rounds up to the next highest output unit. + * + * @return The converted time value + */ +static inline u64_t k_ticks_to_cyc_ceil64(u64_t t) +{ + /* Generated. Do not edit. See above. */ + return z_tmcvt(t, Z_HZ_ticks, Z_HZ_cyc, Z_CCYC, false, true, false); +} + +#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) +#include +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* ZEPHYR_INCLUDE_TIME_UNITS_H_ */ From afc00cd5a5cbfed992fd590ef428be4ab6b3d6cb Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Fri, 26 Jul 2019 13:25:08 -0700 Subject: [PATCH 2/4] tests/kernel/timer/timer_api: Add tests for the new conversion routines The new conversion API has a ton of generated utilities. Test it via enumerating each one of them and throwing a selection of both hand-picked and random numbers at it. Works by using slightly different math to compute the expected result and assuming that we don't have symmetric bugs in both. Signed-off-by: Andy Ross --- include/time_units.h | 11 +- tests/kernel/timer/timer_api/prj.conf | 1 + .../kernel/timer/timer_api/prj_tickless.conf | 1 + tests/kernel/timer/timer_api/src/main.c | 3 + .../timer/timer_api/src/timer_convert.c | 179 ++++++++++++++++++ 5 files changed, 189 insertions(+), 6 deletions(-) create mode 100644 tests/kernel/timer/timer_api/src/timer_convert.c diff --git a/include/time_units.h b/include/time_units.h index 372ced9072a3f..ec48f2e0c8f86 100644 --- a/include/time_units.h +++ b/include/time_units.h @@ -108,12 +108,11 @@ static ALWAYS_INLINE u64_t z_tmcvt(u64_t t, u32_t from_hz, u32_t to_hz, * code, which enumerates all possible combinations of units, rounding * modes and precision. Do not edit directly. * - * Note that no "microsecond" conversions are defined with anything - * but 64 bit precision. This unit was added to Zephyr after the - * introduction of 64 bit timeout support, so there is no backward - * compatibility requirement. And doing 32 bit math with units that - * small microseconds has precision traps that we probably don't want - * to support in an official API. + * Note that nano/microsecond conversions are only defined with 64 bit + * precision. These units conversions were not available in 32 bit + * variants historically, and doing 32 bit math with units that small + * has precision traps that we probably don't want to support in an + * official API. * * #!/usr/bin/perl -w * use strict; diff --git a/tests/kernel/timer/timer_api/prj.conf b/tests/kernel/timer/timer_api/prj.conf index 45f8b25f05b60..a6dafd41f324b 100644 --- a/tests/kernel/timer/timer_api/prj.conf +++ b/tests/kernel/timer/timer_api/prj.conf @@ -3,3 +3,4 @@ CONFIG_QEMU_TICKLESS_WORKAROUND=y CONFIG_TEST_USERSPACE=y CONFIG_MP_NUM_CPUS=1 +CONFIG_TEST_RANDOM_GENERATOR=y diff --git a/tests/kernel/timer/timer_api/prj_tickless.conf b/tests/kernel/timer/timer_api/prj_tickless.conf index e1f4414b42d8c..05a3837ea2663 100644 --- a/tests/kernel/timer/timer_api/prj_tickless.conf +++ b/tests/kernel/timer/timer_api/prj_tickless.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_SYS_POWER_MANAGEMENT=y CONFIG_TICKLESS_KERNEL=y CONFIG_MP_NUM_CPUS=1 +CONFIG_TEST_RANDOM_GENERATOR=y diff --git a/tests/kernel/timer/timer_api/src/main.c b/tests/kernel/timer/timer_api/src/main.c index 477419a4f38b8..2ffb81aad270c 100644 --- a/tests/kernel/timer/timer_api/src/main.c +++ b/tests/kernel/timer/timer_api/src/main.c @@ -37,6 +37,8 @@ static struct k_timer remain_timer; static ZTEST_BMEM struct timer_data tdata; +extern void test_time_conversions(void); + #define TIMER_ASSERT(exp, tmr) \ do { \ if (!(exp)) { \ @@ -556,6 +558,7 @@ void test_main(void) &timer2, &timer3, &timer4); ztest_test_suite(timer_api, + ztest_unit_test(test_time_conversions), ztest_user_unit_test(test_timer_duration_period), ztest_user_unit_test(test_timer_period_0), ztest_user_unit_test(test_timer_expirefn_null), diff --git a/tests/kernel/timer/timer_api/src/timer_convert.c b/tests/kernel/timer/timer_api/src/timer_convert.c new file mode 100644 index 0000000000000..8aa4c5c0814e0 --- /dev/null +++ b/tests/kernel/timer/timer_api/src/timer_convert.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2019 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include + +#define NUM_RANDOM 100 + +enum units { UNIT_ticks, UNIT_cyc, UNIT_ms, UNIT_us, UNIT_ns }; + +enum round { ROUND_floor, ROUND_ceil, ROUND_near }; + +struct test_rec { + enum units src; + enum units dst; + int precision; /* 32 or 64 */ + enum round round; + void *func; +}; + +#define TESTREC(src, dst, round, prec) { \ + UNIT_##src, UNIT_##dst, prec, ROUND_##round, \ + (void *)k_##src##_to_##dst##_##round##prec \ + } \ + +static struct test_rec tests[] = { + TESTREC(ms, cyc, floor, 32), + TESTREC(ms, cyc, floor, 64), + TESTREC(ms, cyc, near, 32), + TESTREC(ms, cyc, near, 64), + TESTREC(ms, cyc, ceil, 32), + TESTREC(ms, cyc, ceil, 64), + TESTREC(ms, ticks, floor, 32), + TESTREC(ms, ticks, floor, 64), + TESTREC(ms, ticks, near, 32), + TESTREC(ms, ticks, near, 64), + TESTREC(ms, ticks, ceil, 32), + TESTREC(ms, ticks, ceil, 64), + TESTREC(us, cyc, floor, 64), + TESTREC(us, cyc, near, 64), + TESTREC(us, cyc, ceil, 64), + TESTREC(us, ticks, floor, 64), + TESTREC(us, ticks, near, 64), + TESTREC(us, ticks, ceil, 64), + TESTREC(cyc, ms, floor, 32), + TESTREC(cyc, ms, floor, 64), + TESTREC(cyc, ms, near, 32), + TESTREC(cyc, ms, near, 64), + TESTREC(cyc, ms, ceil, 32), + TESTREC(cyc, ms, ceil, 64), + TESTREC(cyc, us, floor, 64), + TESTREC(cyc, us, near, 64), + TESTREC(cyc, us, ceil, 64), + TESTREC(cyc, ticks, floor, 32), + TESTREC(cyc, ticks, floor, 64), + TESTREC(cyc, ticks, near, 32), + TESTREC(cyc, ticks, near, 64), + TESTREC(cyc, ticks, ceil, 32), + TESTREC(cyc, ticks, ceil, 64), + TESTREC(ticks, ms, floor, 32), + TESTREC(ticks, ms, floor, 64), + TESTREC(ticks, ms, near, 32), + TESTREC(ticks, ms, near, 64), + TESTREC(ticks, ms, ceil, 32), + TESTREC(ticks, ms, ceil, 64), + TESTREC(ticks, us, floor, 64), + TESTREC(ticks, us, near, 64), + TESTREC(ticks, us, ceil, 64), + TESTREC(ticks, cyc, floor, 32), + TESTREC(ticks, cyc, floor, 64), + TESTREC(ticks, cyc, near, 32), + TESTREC(ticks, cyc, near, 64), + TESTREC(ticks, cyc, ceil, 32), + TESTREC(ticks, cyc, ceil, 64), + TESTREC(ns, cyc, floor, 64), + TESTREC(ns, cyc, near, 64), + TESTREC(ns, cyc, ceil, 64), + TESTREC(ns, ticks, floor, 64), + TESTREC(ns, ticks, near, 64), + TESTREC(ns, ticks, ceil, 64), + TESTREC(cyc, ns, floor, 64), + TESTREC(cyc, ns, near, 64), + TESTREC(cyc, ns, ceil, 64), + TESTREC(ticks, ns, floor, 64), + TESTREC(ticks, ns, near, 64), + TESTREC(ticks, ns, ceil, 64), + }; + +u32_t get_hz(enum units u) +{ + if (u == UNIT_ticks) { + return CONFIG_SYS_CLOCK_TICKS_PER_SEC; + } else if (u == UNIT_cyc) { + return sys_clock_hw_cycles_per_sec(); + } else if (u == UNIT_ms) { + return 1000; + } else if (u == UNIT_us) { + return 1000000; + } else if (u == UNIT_ns) { + return 1000000000; + } + __ASSERT(0, ""); + return 0; +} + +void test_conversion(struct test_rec *t, u64_t val) +{ + u32_t from_hz = get_hz(t->src), to_hz = get_hz(t->dst); + u64_t result; + + if (t->precision == 32) { + u32_t (*convert)(u32_t) = (u32_t (*)(u32_t)) t->func; + + result = convert((u32_t) val); + + /* If the input value legitimately overflows, then + * there is nothing to test + */ + if ((val * to_hz) >= ((((u64_t)from_hz) << 32))) { + return; + } + } else { + u64_t (*convert)(u64_t) = (u64_t (*)(u64_t)) t->func; + + result = convert(val); + } + + /* We expect the ideal result to be equal to "val * to_hz / + * from_hz", but that division is the source of precision + * issues. So reexpress our equation as: + * + * val * to_hz ==? result * from_hz + * 0 ==? val * to_hz - result * from_hz + * + * The difference is allowed to be in the range [0:from_hz) if + * we are rounding down, from (-from_hz:0] if we are rounding + * up, or [-from_hz/2:from_hz/2] if we are rounding to the + * nearest. + */ + s64_t diff = (s64_t)(val * to_hz - result * from_hz); + s64_t maxdiff, mindiff; + + if (t->round == ROUND_floor) { + maxdiff = from_hz - 1; + mindiff = 0; + } else if (t->round == ROUND_ceil) { + maxdiff = 0; + mindiff = -(s64_t)(from_hz-1); + } else { + maxdiff = from_hz/2; + mindiff = -(s64_t)(from_hz/2); + } + + zassert_true(diff <= maxdiff && diff >= mindiff, + "Convert %lld from %lldhz to %lldhz (= %lld) failed. " + "diff %lld should be in [%lld:%lld]", + val, from_hz, to_hz, result, diff, mindiff, maxdiff); +} + +void test_time_conversions(void) +{ + for (int i = 0; i < ARRAY_SIZE(tests); i++) { + test_conversion(&tests[i], 0); + test_conversion(&tests[i], 1); + test_conversion(&tests[i], 0x7fffffff); + test_conversion(&tests[i], 0x80000000); + if (tests[i].precision == 64) { + test_conversion(&tests[i], 0xffffffff); + test_conversion(&tests[i], 0x100000000ULL); + } + + for (int j = 0; j < NUM_RANDOM; j++) { + test_conversion(&tests[i], sys_rand32_get()); + } + } +} From 768ca98a3df5747f6b09a39f5fe59dec66e96b94 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 3 Oct 2019 11:19:24 -0700 Subject: [PATCH 3/4] kernel: Express legacy time conversions using new API Remove the older time conversion utilities and use the new ones exclusively, with preprocessor macros to provide the older symbols for compatibility. Signed-off-by: Andy Ross --- include/sys_clock.h | 101 +++----------------------------------------- 1 file changed, 7 insertions(+), 94 deletions(-) diff --git a/include/sys_clock.h b/include/sys_clock.h index ababc85cfea26..6396e47826791 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -33,20 +33,6 @@ extern int _sys_clock_always_on; extern void z_enable_sys_clock(void); #endif -/* Note that some systems with comparatively slow cycle counters - * experience precision loss when doing math like this. In the - * general case it is not correct that "cycles" are much faster than - * "ticks". - */ -static inline int sys_clock_hw_cycles_per_tick(void) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - return sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC; -#else - return 1; /* Just to avoid a division by zero */ -#endif -} - #if defined(CONFIG_SYS_CLOCK_EXISTS) && \ (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 0) #error "SYS_CLOCK_HW_CYCLES_PER_SEC must be non-zero!" @@ -86,78 +72,17 @@ static inline int sys_clock_hw_cycles_per_tick(void) #endif -static ALWAYS_INLINE s32_t z_ms_to_ticks(s32_t ms) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - -#ifdef _NEED_PRECISE_TICK_MS_CONVERSION - int cyc = sys_clock_hw_cycles_per_sec(); - - /* use 64-bit math to keep precision */ - return (s32_t)ceiling_fraction((s64_t)ms * cyc, - ((s64_t)MSEC_PER_SEC * cyc) / CONFIG_SYS_CLOCK_TICKS_PER_SEC); -#else - /* simple division keeps precision */ - s32_t ms_per_tick = MSEC_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC; - - return (s32_t)ceiling_fraction(ms, ms_per_tick); -#endif - -#else - __ASSERT(ms == 0, "ms not zero"); - return 0; -#endif -} - -static inline u64_t __ticks_to_ms(s64_t ticks) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - return (u64_t)ticks * MSEC_PER_SEC / - (u64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC; -#else - __ASSERT(ticks == 0, "ticks not zero"); - return 0ULL; -#endif -} - -/* - * These are only currently used by k_usleep(), but they are - * defined here for parity with their ms analogs above. Note: - * we don't bother trying the 32-bit intermediate shortcuts - * possible with ms, because of the magnitudes involved. - */ - -static inline s32_t z_us_to_ticks(s32_t us) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - return (s32_t) ceiling_fraction( - (s64_t)us * sys_clock_hw_cycles_per_sec(), - ((s64_t)USEC_PER_SEC * sys_clock_hw_cycles_per_sec()) / - CONFIG_SYS_CLOCK_TICKS_PER_SEC); -#else - __ASSERT(us == 0, "us not zero"); - return 0; -#endif -} - -static inline s32_t __ticks_to_us(s32_t ticks) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - return (s32_t) ((s64_t)ticks * USEC_PER_SEC / - (s64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC); -#else - __ASSERT(ticks == 0, "ticks not zero"); - return 0; -#endif -} +#define __ticks_to_ms(t) k_ticks_to_ms_floor64(t) +#define z_ms_to_ticks(t) k_ms_to_ticks_ceil32(t) +#define __ticks_to_us(t) k_ticks_to_us_floor64(t) +#define z_us_to_ticks(t) k_us_to_ticks_ceil64(t) +#define sys_clock_hw_cycles_per_tick() k_ticks_to_cyc_floor32(1) +#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) (1000 * k_cyc_to_us_floor64(t)) +#define SYS_CLOCK_HW_CYCLES_TO_NS(t) ((u32_t)(1000 * k_cyc_to_us_floor64(t))) /* added tick needed to account for tick in progress */ #define _TICK_ALIGN 1 -/* SYS_CLOCK_HW_CYCLES_TO_NS64 converts CPU clock cycles to nanoseconds */ -#define SYS_CLOCK_HW_CYCLES_TO_NS64(X) \ - (((u64_t)(X) * NSEC_PER_SEC) / sys_clock_hw_cycles_per_sec()) - /* * SYS_CLOCK_HW_CYCLES_TO_NS_AVG converts CPU clock cycles to nanoseconds * and calculates the average cycle time @@ -171,18 +96,6 @@ static inline s32_t __ticks_to_us(s32_t ticks) * @{ */ -/** - * @brief Compute nanoseconds from hardware clock cycles. - * - * This macro converts a time duration expressed in hardware clock cycles - * to the equivalent duration expressed in nanoseconds. - * - * @param X Duration in hardware clock cycles. - * - * @return Duration in nanoseconds. - */ -#define SYS_CLOCK_HW_CYCLES_TO_NS(X) (u32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X)) - /** * @} end defgroup clock_apis */ From cde2d431beb06da720fd8ce13e68ca7abb9c4be6 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 3 Oct 2019 11:43:10 -0700 Subject: [PATCH 4/4] kernel/sys_clock.h: Deprecate and convert uses of old conversions Mark the old time conversion APIs deprecated, leave compatibility macros in place, and replace all usage with the new API. Signed-off-by: Andy Ross --- arch/arc/core/timestamp.c | 2 +- doc/reference/kernel/timing/clocks.rst | 2 +- drivers/timer/altera_avalon_timer_hal.c | 8 +++---- drivers/timer/litex_timer.c | 6 ++--- drivers/timer/loapic_timer.c | 2 +- drivers/timer/xlnx_psttc_timer.c | 2 +- include/kernel.h | 4 ++-- include/sys_clock.h | 23 ++++++++++++------- kernel/poll.c | 2 +- kernel/sched.c | 12 +++++----- kernel/thread.c | 2 +- kernel/timeout.c | 2 +- kernel/timer.c | 4 ++-- kernel/work_q.c | 2 +- lib/cmsis_rtos_v1/cmsis_signal.c | 2 +- lib/cmsis_rtos_v2/event_flags.c | 4 ++-- lib/cmsis_rtos_v2/kernel.c | 4 ++-- lib/cmsis_rtos_v2/mempool.c | 2 +- lib/cmsis_rtos_v2/msgq.c | 4 ++-- lib/cmsis_rtos_v2/mutex.c | 2 +- lib/cmsis_rtos_v2/semaphore.c | 2 +- lib/cmsis_rtos_v2/thread_flags.c | 4 ++-- lib/cmsis_rtos_v2/timer.c | 2 +- samples/smp/pi/src/main.c | 2 +- subsys/bluetooth/shell/gatt.c | 2 +- subsys/bluetooth/shell/l2cap.c | 2 +- subsys/debug/tracing/cpu_stats.c | 6 ++--- subsys/net/ip/net_shell.c | 8 +++---- subsys/net/ip/net_stats.h | 4 ++-- subsys/net/l2/canbus/6locan.c | 16 ++++++------- subsys/net/l2/ppp/ppp_l2.c | 2 +- subsys/testsuite/include/timestamp.h | 2 +- .../latency_measure/src/int_to_thread.c | 2 +- .../latency_measure/src/int_to_thread_evt.c | 2 +- tests/benchmarks/latency_measure/src/utils.h | 2 +- tests/benchmarks/mbedtls/src/benchmark.c | 2 +- .../benchmarks/timing_info/src/timing_info.h | 2 +- tests/kernel/common/src/boot_delay.c | 3 +-- tests/kernel/common/src/clock.c | 4 ++-- tests/kernel/context/src/main.c | 4 ++-- tests/kernel/early_sleep/src/main.c | 8 +++---- tests/kernel/fifo/fifo_timeout/src/main.c | 4 ++-- tests/kernel/lifo/lifo_usage/src/main.c | 2 +- .../src/test_sched_timeslice_reset.c | 10 ++++---- .../schedule_api/src/test_slice_scheduling.c | 4 ++-- tests/kernel/sleep/src/main.c | 2 +- .../tickless/tickless_concept/src/main.c | 8 +++---- tests/kernel/timer/timer_api/src/main.c | 6 ++--- tests/kernel/timer/timer_monotonic/src/main.c | 4 ++-- tests/kernel/workq/work_queue/src/main.c | 2 +- tests/kernel/workq/work_queue_api/src/main.c | 6 ++--- .../cmsis_rtos_v1/src/kernel_apis.c | 2 +- .../cmsis_rtos_v2/src/thread_apis.c | 14 +++++------ 53 files changed, 122 insertions(+), 114 deletions(-) diff --git a/arch/arc/core/timestamp.c b/arch/arc/core/timestamp.c index 81e3ea7914e89..1eee914a4b054 100644 --- a/arch/arc/core/timestamp.c +++ b/arch/arc/core/timestamp.c @@ -33,7 +33,7 @@ u64_t z_tsc_read(void) t = (u64_t)z_tick_get(); count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); irq_unlock(key); - t *= (u64_t)sys_clock_hw_cycles_per_tick(); + t *= k_ticks_to_cyc_floor64(1); t += (u64_t)count; return t; } diff --git a/doc/reference/kernel/timing/clocks.rst b/doc/reference/kernel/timing/clocks.rst index 70f355be96caf..afeb335ee0eb8 100644 --- a/doc/reference/kernel/timing/clocks.rst +++ b/doc/reference/kernel/timing/clocks.rst @@ -129,7 +129,7 @@ between two points in time. /* compute how long the work took (assumes no counter rollover) */ cycles_spent = stop_time - start_time; - nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent); + nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent); Suggested Uses ************** diff --git a/drivers/timer/altera_avalon_timer_hal.c b/drivers/timer/altera_avalon_timer_hal.c index 5a97b7efaccfc..d8cd8a99bf5ff 100644 --- a/drivers/timer/altera_avalon_timer_hal.c +++ b/drivers/timer/altera_avalon_timer_hal.c @@ -28,7 +28,7 @@ static void timer_irq_handler(void *unused) read_timer_start_of_tick_handler(); #endif - accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); + accumulated_cycle_count += k_ticks_to_cyc_floor32(1); /* Clear the interrupt */ alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ); @@ -46,15 +46,15 @@ int z_clock_driver_init(struct device *device) ARG_UNUSED(device); IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE, - sys_clock_hw_cycles_per_tick() & 0xFFFF); + k_ticks_to_cyc_floor32(1) & 0xFFFF); IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE, - (sys_clock_hw_cycles_per_tick() >> 16) & 0xFFFF); + (k_ticks_to_cyc_floor32(1) >> 16) & 0xFFFF); IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0); irq_enable(TIMER_0_IRQ); alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0, - TIMER_0_IRQ, sys_clock_hw_cycles_per_tick()); + TIMER_0_IRQ, k_ticks_to_cyc_floor32(1)); return 0; } diff --git a/drivers/timer/litex_timer.c b/drivers/timer/litex_timer.c index d1bc201ce5311..48d0aece89b03 100644 --- a/drivers/timer/litex_timer.c +++ b/drivers/timer/litex_timer.c @@ -30,7 +30,7 @@ static void litex_timer_irq_handler(void *device) int key = irq_lock(); sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR); - accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); + accumulated_cycle_count += k_ticks_to_cyc_floor32(1); z_clock_announce(1); irq_unlock(key); @@ -57,9 +57,9 @@ int z_clock_driver_init(struct device *device) sys_write8(TIMER_DISABLE, TIMER_EN_ADDR); for (int i = 0; i < 4; i++) { - sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8), + sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8), TIMER_RELOAD_ADDR + i * 0x4); - sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8), + sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8), TIMER_LOAD_ADDR + i * 0x4); } diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index a53a568ae0179..f91e369f4423f 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -574,7 +574,7 @@ int z_clock_driver_init(struct device *device) /* determine the timer counter value (in timer clock cycles/system tick) */ - cycles_per_tick = sys_clock_hw_cycles_per_tick(); + cycles_per_tick = k_ticks_to_cyc_floor32(1); tickless_idle_init(); diff --git a/drivers/timer/xlnx_psttc_timer.c b/drivers/timer/xlnx_psttc_timer.c index 5ec46ab3e2da7..d1abeb45e9c36 100644 --- a/drivers/timer/xlnx_psttc_timer.c +++ b/drivers/timer/xlnx_psttc_timer.c @@ -111,7 +111,7 @@ void _timer_int_handler(void *unused) u32_t regval; regval = sys_read32(TIMER_BASEADDR + XTTCPS_ISR_OFFSET); - accumulated_cycles += sys_clock_hw_cycles_per_tick(); + accumulated_cycles += k_ticks_to_cyc_floor32(1); z_clock_announce(_sys_idle_elapsed_ticks); } diff --git a/include/kernel.h b/include/kernel.h index 959a05cb2327a..1256ab72bf473 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1637,7 +1637,7 @@ __syscall u32_t k_timer_remaining_get(struct k_timer *timer); static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer) { const s32_t ticks = z_timeout_remaining(&timer->timeout); - return (ticks > 0) ? (u32_t)__ticks_to_ms(ticks) : 0U; + return (ticks > 0) ? (u32_t)k_ticks_to_ms_floor64(ticks) : 0U; } /** @@ -3077,7 +3077,7 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work, */ static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work) { - return __ticks_to_ms(z_timeout_remaining(&work->timeout)); + return k_ticks_to_ms_floor64(z_timeout_remaining(&work->timeout)); } /** diff --git a/include/sys_clock.h b/include/sys_clock.h index 6396e47826791..08d87f764a974 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -72,13 +72,20 @@ extern void z_enable_sys_clock(void); #endif -#define __ticks_to_ms(t) k_ticks_to_ms_floor64(t) -#define z_ms_to_ticks(t) k_ms_to_ticks_ceil32(t) -#define __ticks_to_us(t) k_ticks_to_us_floor64(t) -#define z_us_to_ticks(t) k_us_to_ticks_ceil64(t) -#define sys_clock_hw_cycles_per_tick() k_ticks_to_cyc_floor32(1) -#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) (1000 * k_cyc_to_us_floor64(t)) -#define SYS_CLOCK_HW_CYCLES_TO_NS(t) ((u32_t)(1000 * k_cyc_to_us_floor64(t))) +#define __ticks_to_ms(t) __DEPRECATED_MACRO \ + k_ticks_to_ms_floor64(t) +#define z_ms_to_ticks(t) \ + k_ms_to_ticks_ceil32(t) +#define __ticks_to_us(t) __DEPRECATED_MACRO \ + k_ticks_to_us_floor64(t) +#define z_us_to_ticks(t) __DEPRECATED_MACRO \ + k_us_to_ticks_ceil64(t) +#define sys_clock_hw_cycles_per_tick() __DEPRECATED_MACRO \ + k_ticks_to_cyc_floor32(1) +#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) __DEPRECATED_MACRO \ + k_cyc_to_ns_floor64(t) +#define SYS_CLOCK_HW_CYCLES_TO_NS(t) __DEPRECATED_MACRO \ + ((u32_t)k_cyc_to_ns_floor64(t)) /* added tick needed to account for tick in progress */ #define _TICK_ALIGN 1 @@ -88,7 +95,7 @@ extern void z_enable_sys_clock(void); * and calculates the average cycle time */ #define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \ - (u32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X) / NCYCLES) + (u32_t)(k_cyc_to_ns_floor64(X) / NCYCLES) /** * @defgroup clock_apis Kernel Clock APIs diff --git a/kernel/poll.c b/kernel/poll.c index 608887a3acea3..795198b755154 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -638,7 +638,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, if (timeout != K_FOREVER) { z_add_timeout(&work->timeout, triggered_work_expiration_handler, - z_ms_to_ticks(timeout)); + k_ms_to_ticks_ceil32(timeout)); } /* From now, any event will result in submitted work. */ diff --git a/kernel/sched.c b/kernel/sched.c index cadd391db41f8..5bbe86225042d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -257,7 +257,7 @@ void k_sched_time_slice_set(s32_t slice, int prio) { LOCKED(&sched_spinlock) { _current_cpu->slice_ticks = 0; - slice_time = z_ms_to_ticks(slice); + slice_time = k_ms_to_ticks_ceil32(slice); slice_max_prio = prio; z_reset_time_slice(); } @@ -368,7 +368,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) } if (timeout != K_FOREVER) { - s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(timeout); + s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout); z_add_thread_timeout(thread, ticks); } @@ -975,9 +975,9 @@ s32_t z_impl_k_sleep(int ms) { s32_t ticks; - ticks = z_ms_to_ticks(ms); + ticks = k_ms_to_ticks_ceil32(ms); ticks = z_tick_sleep(ticks); - return __ticks_to_ms(ticks); + return k_ticks_to_ms_floor64(ticks); } #ifdef CONFIG_USERSPACE @@ -992,9 +992,9 @@ s32_t z_impl_k_usleep(int us) { s32_t ticks; - ticks = z_us_to_ticks(us); + ticks = k_us_to_ticks_ceil64(us); ticks = z_tick_sleep(ticks); - return __ticks_to_us(ticks); + return k_ticks_to_us_floor64(ticks); } #ifdef CONFIG_USERSPACE diff --git a/kernel/thread.c b/kernel/thread.c index 01266e2a93377..162acea6b1329 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -378,7 +378,7 @@ static void schedule_new_thread(struct k_thread *thread, s32_t delay) if (delay == 0) { k_thread_start(thread); } else { - s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(delay); + s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay); z_add_thread_timeout(thread, ticks); } diff --git a/kernel/timeout.c b/kernel/timeout.c index 9694653d69ae0..262366742d5d5 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -238,7 +238,7 @@ u32_t z_tick_get_32(void) s64_t z_impl_k_uptime_get(void) { - return __ticks_to_ms(z_tick_get()); + return k_ticks_to_ms_floor64(z_tick_get()); } #ifdef CONFIG_USERSPACE diff --git a/kernel/timer.c b/kernel/timer.c index 3c4c4caacf7e8..46d69dd3fc666 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -112,8 +112,8 @@ void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period) volatile s32_t period_in_ticks, duration_in_ticks; - period_in_ticks = z_ms_to_ticks(period); - duration_in_ticks = z_ms_to_ticks(duration); + period_in_ticks = k_ms_to_ticks_ceil32(period); + duration_in_ticks = k_ms_to_ticks_ceil32(duration); (void)z_abort_timeout(&timer->timeout); timer->period = period_in_ticks; diff --git a/kernel/work_q.c b/kernel/work_q.c index f050a437ab818..1fb18f3ac8312 100644 --- a/kernel/work_q.c +++ b/kernel/work_q.c @@ -108,7 +108,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q, /* Add timeout */ z_add_timeout(&work->timeout, work_timeout, - _TICK_ALIGN + z_ms_to_ticks(delay)); + _TICK_ALIGN + k_ms_to_ticks_ceil32(delay)); done: k_spin_unlock(&lock, key); diff --git a/lib/cmsis_rtos_v1/cmsis_signal.c b/lib/cmsis_rtos_v1/cmsis_signal.c index 02a262d33e419..151e6d8cd3aa4 100644 --- a/lib/cmsis_rtos_v1/cmsis_signal.c +++ b/lib/cmsis_rtos_v1/cmsis_signal.c @@ -142,7 +142,7 @@ osEvent osSignalWait(int32_t signals, uint32_t millisec) */ hwclk_cycles_delta = (u64_t)k_cycle_get_32() - time_stamp_start; time_delta_ns = - (u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); + (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta); time_delta_ms = (u32_t)time_delta_ns/NSEC_PER_MSEC; if (timeout > time_delta_ms) { diff --git a/lib/cmsis_rtos_v2/event_flags.c b/lib/cmsis_rtos_v2/event_flags.c index 93e751ed6ccc9..20b99c810f637 100644 --- a/lib/cmsis_rtos_v2/event_flags.c +++ b/lib/cmsis_rtos_v2/event_flags.c @@ -109,7 +109,7 @@ uint32_t osEventFlagsWait(osEventFlagsId_t ef_id, uint32_t flags, struct cv2_event_flags *events = (struct cv2_event_flags *)ef_id; int retval, key; u32_t sig; - u32_t time_delta_ms, timeout_ms = __ticks_to_ms(timeout); + u32_t time_delta_ms, timeout_ms = k_ticks_to_ms_floor64(timeout); u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns; /* Can be called from ISRs only if timeout is set to 0 */ @@ -172,7 +172,7 @@ uint32_t osEventFlagsWait(osEventFlagsId_t ef_id, uint32_t flags, (u64_t)k_cycle_get_32() - time_stamp_start; time_delta_ns = - (u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); + (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta); time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC; diff --git a/lib/cmsis_rtos_v2/kernel.c b/lib/cmsis_rtos_v2/kernel.c index 06285911fc729..e58bef5e7ddf4 100644 --- a/lib/cmsis_rtos_v2/kernel.c +++ b/lib/cmsis_rtos_v2/kernel.c @@ -132,7 +132,7 @@ osStatus_t osDelay(uint32_t ticks) return osErrorISR; } - k_sleep(__ticks_to_ms(ticks)); + k_sleep(k_ticks_to_ms_floor64(ticks)); return osOK; } @@ -149,7 +149,7 @@ osStatus_t osDelayUntil(uint32_t ticks) } ticks_elapsed = osKernelGetTickCount(); - k_sleep(__ticks_to_ms(ticks - ticks_elapsed)); + k_sleep(k_ticks_to_ms_floor64(ticks - ticks_elapsed)); return osOK; } diff --git a/lib/cmsis_rtos_v2/mempool.c b/lib/cmsis_rtos_v2/mempool.c index 128b8068a6c9c..1ff49ed1159e8 100644 --- a/lib/cmsis_rtos_v2/mempool.c +++ b/lib/cmsis_rtos_v2/mempool.c @@ -109,7 +109,7 @@ void *osMemoryPoolAlloc(osMemoryPoolId_t mp_id, uint32_t timeout) } else { retval = k_mem_slab_alloc( (struct k_mem_slab *)(&mslab->z_mslab), - (void **)&ptr, __ticks_to_ms(timeout)); + (void **)&ptr, k_ticks_to_ms_floor64(timeout)); } if (retval == 0) { diff --git a/lib/cmsis_rtos_v2/msgq.c b/lib/cmsis_rtos_v2/msgq.c index d4f4de9f3e23a..930ec50e3dea3 100644 --- a/lib/cmsis_rtos_v2/msgq.c +++ b/lib/cmsis_rtos_v2/msgq.c @@ -104,7 +104,7 @@ osStatus_t osMessageQueuePut(osMessageQueueId_t msgq_id, const void *msg_ptr, retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, K_FOREVER); } else { retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (retval == 0) { @@ -142,7 +142,7 @@ osStatus_t osMessageQueueGet(osMessageQueueId_t msgq_id, void *msg_ptr, retval = k_msgq_get(&msgq->z_msgq, msg_ptr, K_FOREVER); } else { retval = k_msgq_get(&msgq->z_msgq, msg_ptr, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (retval == 0) { diff --git a/lib/cmsis_rtos_v2/mutex.c b/lib/cmsis_rtos_v2/mutex.c index 27c8ae13086b3..8127941f5dc7e 100644 --- a/lib/cmsis_rtos_v2/mutex.c +++ b/lib/cmsis_rtos_v2/mutex.c @@ -94,7 +94,7 @@ osStatus_t osMutexAcquire(osMutexId_t mutex_id, uint32_t timeout) status = k_mutex_lock(&mutex->z_mutex, K_NO_WAIT); } else { status = k_mutex_lock(&mutex->z_mutex, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (status == -EBUSY) { diff --git a/lib/cmsis_rtos_v2/semaphore.c b/lib/cmsis_rtos_v2/semaphore.c index 938401fd4eccf..d77dea51e7785 100644 --- a/lib/cmsis_rtos_v2/semaphore.c +++ b/lib/cmsis_rtos_v2/semaphore.c @@ -77,7 +77,7 @@ osStatus_t osSemaphoreAcquire(osSemaphoreId_t semaphore_id, uint32_t timeout) status = k_sem_take(&semaphore->z_semaphore, K_NO_WAIT); } else { status = k_sem_take(&semaphore->z_semaphore, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (status == -EBUSY) { diff --git a/lib/cmsis_rtos_v2/thread_flags.c b/lib/cmsis_rtos_v2/thread_flags.c index d7f769391d0f0..ecb51caae4013 100644 --- a/lib/cmsis_rtos_v2/thread_flags.c +++ b/lib/cmsis_rtos_v2/thread_flags.c @@ -89,7 +89,7 @@ uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout) struct cv2_thread *tid; int retval, key; u32_t sig; - u32_t time_delta_ms, timeout_ms = __ticks_to_ms(timeout); + u32_t time_delta_ms, timeout_ms = k_ticks_to_ms_floor64(timeout); u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns; if (k_is_in_isr()) { @@ -155,7 +155,7 @@ uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout) (u64_t)k_cycle_get_32() - time_stamp_start; time_delta_ns = - (u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); + (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta); time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC; diff --git a/lib/cmsis_rtos_v2/timer.c b/lib/cmsis_rtos_v2/timer.c index 2508e3ec7c2e0..e202618894e92 100644 --- a/lib/cmsis_rtos_v2/timer.c +++ b/lib/cmsis_rtos_v2/timer.c @@ -80,7 +80,7 @@ osTimerId_t osTimerNew(osTimerFunc_t func, osTimerType_t type, osStatus_t osTimerStart(osTimerId_t timer_id, uint32_t ticks) { struct cv2_timer *timer = (struct cv2_timer *)timer_id; - u32_t millisec = __ticks_to_ms(ticks); + u32_t millisec = k_ticks_to_ms_floor64(ticks); if (timer == NULL) { return osErrorParameter; diff --git a/samples/smp/pi/src/main.c b/samples/smp/pi/src/main.c index f100ae7818afd..eb92d66ae50b5 100644 --- a/samples/smp/pi/src/main.c +++ b/samples/smp/pi/src/main.c @@ -102,7 +102,7 @@ void main(void) stop_time = k_cycle_get_32(); cycles_spent = stop_time - start_time; - nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent); + nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent); for (i = 0; i < THREADS_NUM; i++) printk("Pi value calculated by thread #%d: %s\n", i, buffer[i]); diff --git a/subsys/bluetooth/shell/gatt.c b/subsys/bluetooth/shell/gatt.c index 0ca75a6118205..f9682234bf321 100644 --- a/subsys/bluetooth/shell/gatt.c +++ b/subsys/bluetooth/shell/gatt.c @@ -860,7 +860,7 @@ static ssize_t write_met(struct bt_conn *conn, const struct bt_gatt_attr *attr, memcpy(value + offset, buf, len); delta = k_cycle_get_32() - cycle_stamp; - delta = SYS_CLOCK_HW_CYCLES_TO_NS(delta); + delta = (u32_t)k_cyc_to_ns_floor64(delta); /* if last data rx-ed was greater than 1 second in the past, * reset the metrics. diff --git a/subsys/bluetooth/shell/l2cap.c b/subsys/bluetooth/shell/l2cap.c index 0a7a47a9699ae..4132edbb36392 100644 --- a/subsys/bluetooth/shell/l2cap.c +++ b/subsys/bluetooth/shell/l2cap.c @@ -62,7 +62,7 @@ static int l2cap_recv_metrics(struct bt_l2cap_chan *chan, struct net_buf *buf) u32_t delta; delta = k_cycle_get_32() - cycle_stamp; - delta = SYS_CLOCK_HW_CYCLES_TO_NS(delta); + delta = (u32_t)k_cyc_to_ns_floor64(delta); /* if last data rx-ed was greater than 1 second in the past, * reset the metrics. diff --git a/subsys/debug/tracing/cpu_stats.c b/subsys/debug/tracing/cpu_stats.c index c5c0306cf0348..4ea14595da4de 100644 --- a/subsys/debug/tracing/cpu_stats.c +++ b/subsys/debug/tracing/cpu_stats.c @@ -62,10 +62,10 @@ void cpu_stats_get_ns(struct cpu_stats *cpu_stats_ns) int key = irq_lock(); cpu_stats_update_counters(); - cpu_stats_ns->idle = SYS_CLOCK_HW_CYCLES_TO_NS(stats_hw_tick.idle); - cpu_stats_ns->non_idle = SYS_CLOCK_HW_CYCLES_TO_NS( + cpu_stats_ns->idle = (u32_t)k_cyc_to_ns_floor64(stats_hw_tick.idle); + cpu_stats_ns->non_idle = (u32_t)k_cyc_to_ns_floor64( stats_hw_tick.non_idle); - cpu_stats_ns->sched = SYS_CLOCK_HW_CYCLES_TO_NS(stats_hw_tick.sched); + cpu_stats_ns->sched = (u32_t)k_cyc_to_ns_floor64(stats_hw_tick.sched); irq_unlock(key); } diff --git a/subsys/net/ip/net_shell.c b/subsys/net/ip/net_shell.c index b7781d37d550b..bc3a0657112a7 100644 --- a/subsys/net/ip/net_shell.c +++ b/subsys/net/ip/net_shell.c @@ -2887,9 +2887,9 @@ static enum net_verdict handle_ipv6_echo_reply(struct net_pkt *pkt, net_pkt_ieee802154_rssi(pkt), #endif #ifdef CONFIG_FLOAT - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000.f)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000.f)); #else - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000)); #endif k_sem_give(&ping_timeout); @@ -3011,9 +3011,9 @@ static enum net_verdict handle_ipv4_echo_reply(struct net_pkt *pkt, ntohs(icmp_echo->sequence), ip_hdr->ttl, #ifdef CONFIG_FLOAT - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000.f)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000.f)); #else - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000)); #endif k_sem_give(&ping_timeout); diff --git a/subsys/net/ip/net_stats.h b/subsys/net/ip/net_stats.h index 3684ec80049fb..aa54477a587a8 100644 --- a/subsys/net/ip/net_stats.h +++ b/subsys/net/ip/net_stats.h @@ -327,7 +327,7 @@ static inline void net_stats_update_tx_time(struct net_if *iface, u32_t diff = end_time - start_time; UPDATE_STAT(iface, stats.tx_time.sum += - SYS_CLOCK_HW_CYCLES_TO_NS64(diff) / NSEC_PER_USEC); + k_cyc_to_ns_floor64(diff) / 1000); UPDATE_STAT(iface, stats.tx_time.count += 1); } #else @@ -379,7 +379,7 @@ static inline void net_stats_update_tc_tx_time(struct net_if *iface, u32_t diff = end_time - start_time; UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.sum += - SYS_CLOCK_HW_CYCLES_TO_NS64(diff) / NSEC_PER_USEC); + k_cyc_to_ns_floor64(diff) / 1000); UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.count += 1); net_stats_update_tx_time(iface, start_time, end_time); diff --git a/subsys/net/l2/canbus/6locan.c b/subsys/net/l2/canbus/6locan.c index 4f4e424f695a0..f85dd8d25a7d5 100644 --- a/subsys/net/l2/canbus/6locan.c +++ b/subsys/net/l2/canbus/6locan.c @@ -160,7 +160,7 @@ static s32_t canbus_stmin_to_ticks(u8_t stmin) time_ms = stmin; } - return z_ms_to_ticks(time_ms); + return k_ms_to_ticks_ceil32(time_ms); } static u16_t canbus_get_lladdr(struct net_linkaddr *net_lladdr) @@ -533,7 +533,7 @@ static enum net_verdict canbus_process_cf(struct net_pkt *pkt) } } else { z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); if (NET_CAN_BS != 0 && !mcast) { rx_ctx->act_block_nr++; @@ -637,7 +637,7 @@ static enum net_verdict canbus_process_ff(struct net_pkt *pkt) /* At this point we expect to get Consecutive frames directly */ z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); rx_ctx->state = NET_CAN_RX_STATE_CF; @@ -764,7 +764,7 @@ static void canbus_tx_work(struct net_pkt *pkt) ctx); ctx->state = NET_CAN_TX_STATE_WAIT_FC; z_add_timeout(&ctx->timeout, canbus_tx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); break; } else if (ctx->opts.stmin) { ctx->state = NET_CAN_TX_STATE_WAIT_ST; @@ -777,7 +777,7 @@ static void canbus_tx_work(struct net_pkt *pkt) case NET_CAN_TX_STATE_WAIT_ST: NET_DBG("SM wait ST. CTX: %p", ctx); z_add_timeout(&ctx->timeout, canbus_st_min_timeout, - z_ms_to_ticks(canbus_stmin_to_ticks(ctx->opts.stmin))); + k_ms_to_ticks_ceil32(canbus_stmin_to_ticks(ctx->opts.stmin))); ctx->state = NET_CAN_TX_STATE_SEND_CF; break; @@ -833,7 +833,7 @@ static enum net_verdict canbus_process_fc_data(struct canbus_isotp_tx_ctx *ctx, NET_DBG("Got WAIT frame. CTX: %p", ctx); z_abort_timeout(&ctx->timeout); z_add_timeout(&ctx->timeout, canbus_tx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); if (ctx->wft >= NET_CAN_WFTMAX) { NET_INFO("Got to many wait frames. CTX: %p", ctx); ctx->state = NET_CAN_TX_STATE_ERR; @@ -1023,12 +1023,12 @@ static int canbus_send_multiple_frames(struct net_pkt *pkt, size_t len, if (!mcast) { z_add_timeout(&tx_ctx->timeout, canbus_tx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); tx_ctx->state = NET_CAN_TX_STATE_WAIT_FC; } else { tx_ctx->state = NET_CAN_TX_STATE_SEND_CF; z_add_timeout(&tx_ctx->timeout, canbus_start_sending_cf, - z_ms_to_ticks(NET_CAN_FF_CF_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_FF_CF_TIME)); } return 0; diff --git a/subsys/net/l2/ppp/ppp_l2.c b/subsys/net/l2/ppp/ppp_l2.c index 4250db6f90b0e..1f10ea562c521 100644 --- a/subsys/net/l2/ppp/ppp_l2.c +++ b/subsys/net/l2/ppp/ppp_l2.c @@ -345,7 +345,7 @@ static void echo_reply_handler(void *user_data, size_t user_data_len) time_diff = abs(end_time - ctx->shell.echo_req_data); ctx->shell.echo_req_data = - SYS_CLOCK_HW_CYCLES_TO_NS64(time_diff) / 1000; + k_cyc_to_ns_floor64(time_diff) / 1000; k_sem_give(&ctx->shell.wait_echo_reply); } diff --git a/subsys/testsuite/include/timestamp.h b/subsys/testsuite/include/timestamp.h index d4e29a10be449..7dbfc9d1f6664 100644 --- a/subsys/testsuite/include/timestamp.h +++ b/subsys/testsuite/include/timestamp.h @@ -104,7 +104,7 @@ static inline int high_timer_overflow(void) /* Check if the time elapsed in msec is sufficient to trigger an * overflow of the high precision timer */ - if (tCheck >= (SYS_CLOCK_HW_CYCLES_TO_NS64(UINT_MAX) / + if (tCheck >= (k_cyc_to_ns_floor64(UINT_MAX) / (NSEC_PER_USEC * USEC_PER_MSEC))) { return -1; } diff --git a/tests/benchmarks/latency_measure/src/int_to_thread.c b/tests/benchmarks/latency_measure/src/int_to_thread.c index bad34a0bb8693..b58b0dc09ed3e 100644 --- a/tests/benchmarks/latency_measure/src/int_to_thread.c +++ b/tests/benchmarks/latency_measure/src/int_to_thread.c @@ -74,7 +74,7 @@ int int_to_thread(void) make_int(); if (flag_var == 1) { PRINT_FORMAT(" switching time is %u tcs = %u nsec", - timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); + timestamp, (u32_t)k_cyc_to_ns_floor64(timestamp)); } return 0; } diff --git a/tests/benchmarks/latency_measure/src/int_to_thread_evt.c b/tests/benchmarks/latency_measure/src/int_to_thread_evt.c index 1cc91c750b420..96e041c2f36ff 100644 --- a/tests/benchmarks/latency_measure/src/int_to_thread_evt.c +++ b/tests/benchmarks/latency_measure/src/int_to_thread_evt.c @@ -92,6 +92,6 @@ int int_to_thread_evt(void) k_sem_take(&WORKSEMA, K_FOREVER); PRINT_FORMAT(" switch time is %u tcs = %u nsec", - timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); + timestamp, (u32_t)k_cyc_to_ns_floor64(timestamp)); return 0; } diff --git a/tests/benchmarks/latency_measure/src/utils.h b/tests/benchmarks/latency_measure/src/utils.h index c3e08145970c2..42802db3e6657 100644 --- a/tests/benchmarks/latency_measure/src/utils.h +++ b/tests/benchmarks/latency_measure/src/utils.h @@ -65,7 +65,7 @@ static inline void print_dash_line(void) #define PRINT_TIME_BANNER() \ do { \ PRINT_FORMAT(" tcs = timer clock cycles: 1 tcs is %u nsec", \ - SYS_CLOCK_HW_CYCLES_TO_NS(1)); \ + (u32_t)k_cyc_to_ns_floor64(1)); \ print_dash_line(); \ } while (0) diff --git a/tests/benchmarks/mbedtls/src/benchmark.c b/tests/benchmarks/mbedtls/src/benchmark.c index 56a63bc7a38a8..360a95d248a86 100644 --- a/tests/benchmarks/mbedtls/src/benchmark.c +++ b/tests/benchmarks/mbedtls/src/benchmark.c @@ -174,7 +174,7 @@ do { \ } \ \ delta = k_cycle_get_32() - tsc; \ - delta = SYS_CLOCK_HW_CYCLES_TO_NS64(delta); \ + delta = k_cyc_to_ns_floor64(delta); \ \ mbedtls_printf("%9lu KiB/s, %9lu ns/byte\n", \ ii * BUFSIZE / 1024, \ diff --git a/tests/benchmarks/timing_info/src/timing_info.h b/tests/benchmarks/timing_info/src/timing_info.h index 1da40e08cf565..17eb0d09ada80 100644 --- a/tests/benchmarks/timing_info/src/timing_info.h +++ b/tests/benchmarks/timing_info/src/timing_info.h @@ -133,7 +133,7 @@ static inline void benchmark_timer_init(void) { } static inline void benchmark_timer_stop(void) { } static inline void benchmark_timer_start(void) { } -#define CYCLES_TO_NS(x) SYS_CLOCK_HW_CYCLES_TO_NS(x) +#define CYCLES_TO_NS(x) (u32_t)k_cyc_to_ns_floor64(x) /* Get Core Frequency in MHz */ static inline u32_t get_core_freq_MHz(void) diff --git a/tests/kernel/common/src/boot_delay.c b/tests/kernel/common/src/boot_delay.c index 2a62199186058..3e47ab6a09a49 100644 --- a/tests/kernel/common/src/boot_delay.c +++ b/tests/kernel/common/src/boot_delay.c @@ -16,14 +16,13 @@ /** * @brief This module verifies the delay specified during boot. - * @see k_cycle_get_32, #SYS_CLOCK_HW_CYCLES_TO_NS64(X) */ void test_verify_bootdelay(void) { u32_t current_cycles = k_cycle_get_32(); /* compare this with the boot delay specified */ - zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS64(current_cycles) >= + zassert_true(k_cyc_to_ns_floor64(current_cycles) >= (NSEC_PER_MSEC * CONFIG_BOOT_DELAY), "boot delay not executed"); } diff --git a/tests/kernel/common/src/clock.c b/tests/kernel/common/src/clock.c index 1101912dbaa00..6e7dac943acc2 100644 --- a/tests/kernel/common/src/clock.c +++ b/tests/kernel/common/src/clock.c @@ -95,7 +95,7 @@ void test_clock_cycle(void) c32 = k_cycle_get_32(); /*break if cycle counter wrap around*/ while (k_cycle_get_32() > c32 && - k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick())) { + k_cycle_get_32() < (c32 + k_ticks_to_cyc_floor32(1))) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif @@ -119,7 +119,7 @@ void test_clock_cycle(void) (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), NULL); /* delta NS should be greater than 1 milli-second */ - zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS(c1 - c0) > + zassert_true((u32_t)k_cyc_to_ns_floor64(c1 - c0) > (NSEC_PER_SEC / MSEC_PER_SEC), NULL); } } diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index b35802f00db0f..31f865122cfd4 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -260,7 +260,7 @@ static void _test_kernel_cpu_idle(int atomic) k_cpu_idle(); } /* calculating milliseconds per tick*/ - tms += __ticks_to_ms(1); + tms += k_ticks_to_ms_floor64(1); tms2 = k_uptime_get_32(); zassert_false(tms2 < tms, "Bad ms per tick value computed," "got %d which is less than %d\n", @@ -699,7 +699,7 @@ static void thread_sleep(void *delta, void *arg2, void *arg3) timestamp = k_uptime_get() - timestamp; TC_PRINT(" thread back from sleep\n"); - int slop = MAX(__ticks_to_ms(2), 1); + int slop = MAX(k_ticks_to_ms_floor64(2), 1); if (timestamp < timeout || timestamp > timeout + slop) { TC_ERROR("timestamp out of range, got %d\n", (int)timestamp); diff --git a/tests/kernel/early_sleep/src/main.c b/tests/kernel/early_sleep/src/main.c index 7db8befb773ac..7466259ee985e 100644 --- a/tests/kernel/early_sleep/src/main.c +++ b/tests/kernel/early_sleep/src/main.c @@ -58,10 +58,10 @@ static int ticks_to_sleep(int ticks) u32_t stop_time; start_time = k_cycle_get_32(); - k_sleep(__ticks_to_ms(ticks)); + k_sleep(k_ticks_to_ms_floor64(ticks)); stop_time = k_cycle_get_32(); - return (stop_time - start_time) / sys_clock_hw_cycles_per_tick(); + return (stop_time - start_time) / k_ticks_to_cyc_floor32(1); } @@ -103,8 +103,8 @@ static void test_early_sleep(void) k_thread_priority_set(k_current_get(), 0); TC_PRINT("msec per tick: %lld.%03lld, ticks to sleep: %d\n", - __ticks_to_ms(1000) / 1000U, - __ticks_to_ms(1000) % 1000, + k_ticks_to_ms_floor64(1000) / 1000U, + k_ticks_to_ms_floor64(1000) % 1000, TEST_TICKS_TO_SLEEP); /* Create a lower priority thread */ diff --git a/tests/kernel/fifo/fifo_timeout/src/main.c b/tests/kernel/fifo/fifo_timeout/src/main.c index a6baec355f62f..644c3f8df3606 100644 --- a/tests/kernel/fifo/fifo_timeout/src/main.c +++ b/tests/kernel/fifo/fifo_timeout/src/main.c @@ -101,7 +101,7 @@ static bool is_timeout_in_range(u32_t start_time, u32_t timeout) u32_t stop_time, diff; stop_time = k_cycle_get_32(); - diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - + diff = (u32_t)k_cyc_to_ns_floor64(stop_time - start_time) / NSEC_PER_USEC; diff = diff / USEC_PER_MSEC; return timeout <= diff; @@ -177,7 +177,7 @@ static int test_multiple_threads_pending(struct timeout_order_data *test_data, diff_ms = test_data[j].timeout - data->timeout; } - if (z_ms_to_ticks(diff_ms) == 1) { + if (k_ms_to_ticks_ceil32(diff_ms) == 1) { TC_PRINT( " thread (q order: %d, t/o: %d, fifo %p)\n", data->q_order, data->timeout, data->fifo); diff --git a/tests/kernel/lifo/lifo_usage/src/main.c b/tests/kernel/lifo/lifo_usage/src/main.c index a2c354dbfcc5b..c88b124ac754f 100644 --- a/tests/kernel/lifo/lifo_usage/src/main.c +++ b/tests/kernel/lifo/lifo_usage/src/main.c @@ -110,7 +110,7 @@ static bool is_timeout_in_range(u32_t start_time, u32_t timeout) u32_t stop_time, diff; stop_time = k_cycle_get_32(); - diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - + diff = (u32_t)k_cyc_to_ns_floor64(stop_time - start_time) / NSEC_PER_USEC; diff = diff / USEC_PER_MSEC; return timeout <= diff; diff --git a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c index bbfb8df78b700..101694c39d6f5 100644 --- a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c +++ b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c @@ -69,11 +69,13 @@ static void thread_time_slice(void *p1, void *p2, void *p3) * also expecting task switch below the switching tolerance. */ expected_slice_min = - (z_ms_to_ticks(SLICE_SIZE) - TASK_SWITCH_TOLERANCE) * - sys_clock_hw_cycles_per_tick(); + (k_ms_to_ticks_ceil32(SLICE_SIZE) + - TASK_SWITCH_TOLERANCE) + * k_ticks_to_cyc_floor32(1); expected_slice_max = - (z_ms_to_ticks(SLICE_SIZE) + TASK_SWITCH_TOLERANCE) * - sys_clock_hw_cycles_per_tick(); + (k_ms_to_ticks_ceil32(SLICE_SIZE) + + TASK_SWITCH_TOLERANCE) + * k_ticks_to_cyc_floor32(1); } #ifdef CONFIG_DEBUG diff --git a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c index 50429991159cd..0026e23bd4f14 100644 --- a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c @@ -41,8 +41,8 @@ static void thread_tslice(void *p1, void *p2, void *p3) int thread_parameter = (idx == (NUM_THREAD - 1)) ? '\n' : (idx + 'A'); - s64_t expected_slice_min = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE)); - s64_t expected_slice_max = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE) + 1); + s64_t expected_slice_min = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE)); + s64_t expected_slice_max = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE) + 1); /* Clumsy, but need to handle the precision loss with * submillisecond ticks. It's always possible to alias and diff --git a/tests/kernel/sleep/src/main.c b/tests/kernel/sleep/src/main.c index 09de55fa9caf5..ec52804921143 100644 --- a/tests/kernel/sleep/src/main.c +++ b/tests/kernel/sleep/src/main.c @@ -22,7 +22,7 @@ #define ONE_SECOND (MSEC_PER_SEC) #define ONE_SECOND_ALIGNED \ - (u32_t)(__ticks_to_ms(z_ms_to_ticks(ONE_SECOND) + _TICK_ALIGN)) + (u32_t)(k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(ONE_SECOND) + _TICK_ALIGN)) static struct k_sem test_thread_sem; static struct k_sem helper_thread_sem; diff --git a/tests/kernel/tickless/tickless_concept/src/main.c b/tests/kernel/tickless/tickless_concept/src/main.c index 0bef530cf3a58..c0137ea213a33 100644 --- a/tests/kernel/tickless/tickless_concept/src/main.c +++ b/tests/kernel/tickless/tickless_concept/src/main.c @@ -16,16 +16,16 @@ static struct k_thread tdata[NUM_THREAD]; #define CONFIG_TICKLESS_IDLE_THRESH 20 #endif /*sleep duration tickless*/ -#define SLEEP_TICKLESS __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH) +#define SLEEP_TICKLESS k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH) /*sleep duration with tick*/ -#define SLEEP_TICKFUL __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH - 1) +#define SLEEP_TICKFUL k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH - 1) /*slice size is set as half of the sleep duration*/ -#define SLICE_SIZE __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH >> 1) +#define SLICE_SIZE k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH >> 1) /*maximum slice duration accepted by the test*/ -#define SLICE_SIZE_LIMIT __ticks_to_ms((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1) +#define SLICE_SIZE_LIMIT k_ticks_to_ms_floor64((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1) /*align to millisecond boundary*/ #if defined(CONFIG_ARCH_POSIX) diff --git a/tests/kernel/timer/timer_api/src/main.c b/tests/kernel/timer/timer_api/src/main.c index 2ffb81aad270c..8f7f2ce393c87 100644 --- a/tests/kernel/timer/timer_api/src/main.c +++ b/tests/kernel/timer/timer_api/src/main.c @@ -269,10 +269,10 @@ void test_timer_periodicity(void) * Please note, that expected firing time is not the * one requested, as the kernel uses the ticks to manage * time. The actual perioid will be equal to [tick time] - * multiplied by z_ms_to_ticks(PERIOD). + * multiplied by k_ms_to_ticks_ceil32(PERIOD). */ TIMER_ASSERT(WITHIN_ERROR(delta, - __ticks_to_ms(z_ms_to_ticks(PERIOD)), 1), + k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(PERIOD)), 1), &periodicity_timer); } @@ -532,7 +532,7 @@ void test_timer_remaining_get(void) * the value obtained through k_timer_remaining_get() could be larger * than actual remaining time with maximum error equal to one tick. */ - zassert_true(remaining <= (DURATION / 2) + __ticks_to_ms(1), NULL); + zassert_true(remaining <= (DURATION / 2) + k_ticks_to_ms_floor64(1), NULL); } static void timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, diff --git a/tests/kernel/timer/timer_monotonic/src/main.c b/tests/kernel/timer/timer_monotonic/src/main.c index 393724da28b76..d674face25453 100644 --- a/tests/kernel/timer/timer_monotonic/src/main.c +++ b/tests/kernel/timer/timer_monotonic/src/main.c @@ -54,8 +54,8 @@ void test_timer(void) errors = 0U; - TC_PRINT("sys_clock_hw_cycles_per_tick() = %d\n", - sys_clock_hw_cycles_per_tick()); + TC_PRINT("k_ticks_to_cyc_floor32(1) = %d\n", + k_ticks_to_cyc_floor32(1)); TC_PRINT("sys_clock_hw_cycles_per_sec() = %d\n", sys_clock_hw_cycles_per_sec()); diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c index 412956caf03ff..af2cf59cc7d12 100644 --- a/tests/kernel/workq/work_queue/src/main.c +++ b/tests/kernel/workq/work_queue/src/main.c @@ -17,7 +17,7 @@ /* In fact, each work item could take up to this value */ #define WORK_ITEM_WAIT_ALIGNED \ - __ticks_to_ms(z_ms_to_ticks(WORK_ITEM_WAIT) + _TICK_ALIGN) + k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(WORK_ITEM_WAIT) + _TICK_ALIGN) /* * Wait 50ms between work submissions, to ensure co-op and prempt diff --git a/tests/kernel/workq/work_queue_api/src/main.c b/tests/kernel/workq/work_queue_api/src/main.c index b9d905fb666e6..c28f6ab2c5469 100644 --- a/tests/kernel/workq/work_queue_api/src/main.c +++ b/tests/kernel/workq/work_queue_api/src/main.c @@ -141,10 +141,10 @@ static void tdelayed_work_submit(void *data) /**TESTPOINT: check remaining timeout after submit */ zassert_true( - time_remaining <= __ticks_to_ms(z_ms_to_ticks(TIMEOUT) + time_remaining <= k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(TIMEOUT) + _TICK_ALIGN) && - time_remaining >= __ticks_to_ms(z_ms_to_ticks(TIMEOUT) - - z_ms_to_ticks(15)), NULL); + time_remaining >= k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(TIMEOUT) - + k_ms_to_ticks_ceil32(15)), NULL); /**TESTPOINT: check pending after delayed work submit*/ zassert_true(k_work_pending((struct k_work *)&delayed_work[i]) == 0, NULL); diff --git a/tests/portability/cmsis_rtos_v1/src/kernel_apis.c b/tests/portability/cmsis_rtos_v1/src/kernel_apis.c index e4f77bc9e5613..56cf9e45e1d55 100644 --- a/tests/portability/cmsis_rtos_v1/src/kernel_apis.c +++ b/tests/portability/cmsis_rtos_v1/src/kernel_apis.c @@ -45,7 +45,7 @@ void test_kernel_systick(void) k_busy_wait(WAIT_TIME_US); stop_time = osKernelSysTick(); - diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - + diff = (u32_t)k_cyc_to_ns_floor64(stop_time - start_time) / NSEC_PER_USEC; /* Check that it's within 1%. On some Zephyr platforms diff --git a/tests/portability/cmsis_rtos_v2/src/thread_apis.c b/tests/portability/cmsis_rtos_v2/src/thread_apis.c index 5ed478847ef4e..2d7abd247bc26 100644 --- a/tests/portability/cmsis_rtos_v2/src/thread_apis.c +++ b/tests/portability/cmsis_rtos_v2/src/thread_apis.c @@ -252,7 +252,7 @@ void test_thread_prio(void) static void thread5(void *argument) { printk(" * Thread B started.\n"); - osDelay(z_ms_to_ticks(DELAY_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS)); printk(" * Thread B joining...\n"); } @@ -317,13 +317,13 @@ void test_thread_detached(void) thread = osThreadNew(thread5, NULL, NULL); /* osThreadDetached */ zassert_not_null(thread, "Failed to create thread with osThreadNew!"); - osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS)); status = osThreadJoin(thread); zassert_equal(status, osErrorResource, "Incorrect status returned from osThreadJoin!"); - osDelay(z_ms_to_ticks(DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELTA_MS)); } void thread6(void *argument) @@ -350,12 +350,12 @@ void test_thread_joinable_detach(void) tB = osThreadNew(thread6, tA, &attr); zassert_not_null(tB, "Failed to create thread with osThreadNew!"); - osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS)); status = osThreadDetach(tA); zassert_equal(status, osOK, "osThreadDetach failed."); - osDelay(z_ms_to_ticks(DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELTA_MS)); } void test_thread_joinable_terminate(void) @@ -372,10 +372,10 @@ void test_thread_joinable_terminate(void) tB = osThreadNew(thread6, tA, &attr); zassert_not_null(tB, "Failed to create thread with osThreadNew!"); - osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS)); status = osThreadTerminate(tA); zassert_equal(status, osOK, "osThreadTerminate failed."); - osDelay(z_ms_to_ticks(DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELTA_MS)); }