diff --git a/drivers/serial/uart_sam0.c b/drivers/serial/uart_sam0.c index 2e15e8642c4b6..42cde3d2a427b 100644 --- a/drivers/serial/uart_sam0.c +++ b/drivers/serial/uart_sam0.c @@ -343,7 +343,7 @@ static void uart_sam0_rx_timeout(struct k_work *work) return; } - u32_t now = k_uptime_get_32(); + u32_t now = (u32_t)k_uptime_get(); u32_t elapsed = now - dev_data->rx_timeout_start; if (elapsed >= dev_data->rx_timeout_time) { @@ -546,7 +546,7 @@ static void uart_sam0_isr(void *arg) */ if (dev_data->rx_timeout_time != K_FOREVER) { dev_data->rx_timeout_from_isr = true; - dev_data->rx_timeout_start = k_uptime_get_32(); + dev_data->rx_timeout_start = (u32_t)k_uptime_get(); k_delayed_work_submit(&dev_data->rx_timeout_work, dev_data->rx_timeout_chunk); } diff --git a/drivers/watchdog/wdt_iwdg_stm32.c b/drivers/watchdog/wdt_iwdg_stm32.c index c04c557531599..10acd317d299e 100644 --- a/drivers/watchdog/wdt_iwdg_stm32.c +++ b/drivers/watchdog/wdt_iwdg_stm32.c @@ -112,11 +112,11 @@ static int iwdg_stm32_install_timeout(struct device *dev, return -EINVAL; } - tickstart = k_uptime_get_32(); + tickstart = (u32_t)k_uptime_get(); while (LL_IWDG_IsReady(iwdg) == 0) { /* Wait until WVU, RVU, PVU are reset before updating */ - if ((k_uptime_get_32() - tickstart) > IWDG_DEFAULT_TIMEOUT) { + if (((u32_t)k_uptime_get() - tickstart) > IWDG_DEFAULT_TIMEOUT) { return -ENODEV; } } diff --git a/include/kernel.h b/include/kernel.h index a52f544d89294..999996fce9dc0 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1729,13 +1729,16 @@ __deprecated static inline void k_disable_sys_clock_always_on(void) /** * @brief Get system uptime (32-bit version). * - * This routine returns the lower 32-bits of the elapsed time since the system - * booted, in milliseconds. + * This routine returns a value obtained by converting the lower + * 32-bits of the system clock into milliseconds. * - * This routine can be more efficient than k_uptime_get(), as it reduces the - * need for interrupt locking and 64-bit math. However, the 32-bit result - * cannot hold a system uptime time larger than approximately 50 days, so the - * caller must handle possible rollovers. + * This value is computed more efficiently than k_uptime_get(), as it + * reduces the need for interrupt locking and 64-bit math. However as + * the system clock counter approaches 2^32 the value is no longer + * equal to the low 32-bits of the elapsed time in milliseconds since + * the system booted. Rollover calculation at the discontinuity is complex. + * + * @note Replace this with k_uptime_get(). * * @note * @rst @@ -1744,9 +1747,9 @@ __deprecated static inline void k_disable_sys_clock_always_on(void) * :option:`CONFIG_SYS_CLOCK_TICKS_PER_SEC` config option * @endrst * - * @return Current uptime in milliseconds. + * @return Current uptime in milliseconds, sorta. */ -__syscall u32_t k_uptime_get_32(void); +__deprecated __syscall u32_t k_uptime_get_32(void); /** * @brief Get elapsed time. diff --git a/kernel/queue.c b/kernel/queue.c index da03559695029..1028b8dea790b 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -287,7 +287,7 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout) K_POLL_MODE_NOTIFY_ONLY, queue); if (timeout != K_FOREVER) { - start = k_uptime_get_32(); + start = (u32_t)k_uptime_get(); } do { @@ -304,7 +304,7 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout) k_spin_unlock(&queue->lock, key); if ((val == NULL) && (timeout != K_FOREVER)) { - elapsed = k_uptime_get_32() - start; + elapsed = (u32_t)k_uptime_get() - start; done = elapsed > timeout; } } while (!val && !done); diff --git a/lib/gui/lvgl/lv_conf.h b/lib/gui/lvgl/lv_conf.h index 3a37b26128ac0..01fa54f41ca6e 100644 --- a/lib/gui/lvgl/lv_conf.h +++ b/lib/gui/lvgl/lv_conf.h @@ -175,7 +175,7 @@ typedef void *lv_img_decoder_user_data_t; #define LV_TICK_CUSTOM 1 #define LV_TICK_CUSTOM_INCLUDE "kernel.h" -#define LV_TICK_CUSTOM_SYS_TIME_EXPR (k_uptime_get_32()) +#define LV_TICK_CUSTOM_SYS_TIME_EXPR ((u32_t)k_uptime_get()) typedef void *lv_disp_drv_user_data_t; typedef void *lv_indev_drv_user_data_t; diff --git a/samples/boards/nrf52/mesh/onoff-app/src/main.c b/samples/boards/nrf52/mesh/onoff-app/src/main.c index 545759311406d..26c657dd9292b 100644 --- a/samples/boards/nrf52/mesh/onoff-app/src/main.c +++ b/samples/boards/nrf52/mesh/onoff-app/src/main.c @@ -437,7 +437,7 @@ static void button_pressed(struct device *dev, struct gpio_callback *cb, * More than one button press sends an off message */ - time = k_uptime_get_32(); + time = (u32_t)k_uptime_get(); /* debounce the switch */ if (time < last_time + BUTTON_DEBOUNCE_DELAY_MS) { @@ -604,7 +604,7 @@ void main(void) printk("Initializing...\n"); /* Initialize the button debouncer */ - last_time = k_uptime_get_32(); + last_time = (u32_t)k_uptime_get(); /* Initialize button worker task*/ k_work_init(&sw.button_work, button_pressed_worker); diff --git a/samples/boards/reel_board/mesh_badge/src/reel_board.c b/samples/boards/reel_board/mesh_badge/src/reel_board.c index 0962051f7ac58..9032075c1dbfa 100644 --- a/samples/boards/reel_board/mesh_badge/src/reel_board.c +++ b/samples/boards/reel_board/mesh_badge/src/reel_board.c @@ -480,7 +480,7 @@ static void button_interrupt(struct device *dev, struct gpio_callback *cb, return; case SCREEN_MAIN: if (pins & BIT(DT_ALIAS_SW0_GPIOS_PIN)) { - u32_t uptime = k_uptime_get_32(); + u32_t uptime = (u32_t)k_uptime_get(); static u32_t bad_count, press_ts; if (uptime - press_ts < 500) { diff --git a/samples/philosophers/src/main.c b/samples/philosophers/src/main.c index d6161cd23e5d9..c4b07ac3d6f9e 100644 --- a/samples/philosophers/src/main.c +++ b/samples/philosophers/src/main.c @@ -124,7 +124,7 @@ static s32_t get_random_delay(int id, int period_in_ms) * and the current uptime to create some pseudo-randomness. It produces * a value between 0 and 31. */ - s32_t delay = (k_uptime_get_32()/100 * (id + 1)) & 0x1f; + s32_t delay = ((u32_t)k_uptime_get()/100 * (id + 1)) & 0x1f; /* add 1 to not generate a delay of 0 */ s32_t ms = (delay + 1) * period_in_ms; diff --git a/samples/portability/cmsis_rtos_v1/philosophers/src/main.c b/samples/portability/cmsis_rtos_v1/philosophers/src/main.c index e5d83c1dcf001..0403db47e24ae 100644 --- a/samples/portability/cmsis_rtos_v1/philosophers/src/main.c +++ b/samples/portability/cmsis_rtos_v1/philosophers/src/main.c @@ -117,7 +117,7 @@ static s32_t get_random_delay(int id, int period_in_ms) * and the current uptime to create some pseudo-randomness. It produces * a value between 0 and 31. */ - s32_t delay = (k_uptime_get_32() / 100 * (id + 1)) & 0x1f; + s32_t delay = ((u32_t)k_uptime_get() / 100 * (id + 1)) & 0x1f; /* add 1 to not generate a delay of 0 */ s32_t ms = (delay + 1) * period_in_ms; diff --git a/samples/portability/cmsis_rtos_v2/philosophers/src/main.c b/samples/portability/cmsis_rtos_v2/philosophers/src/main.c index 8c3031e2f12ee..a67bd3a5e4ee7 100644 --- a/samples/portability/cmsis_rtos_v2/philosophers/src/main.c +++ b/samples/portability/cmsis_rtos_v2/philosophers/src/main.c @@ -153,7 +153,7 @@ static s32_t get_random_delay(int id, int period_in_ms) * and the current uptime to create some pseudo-randomness. It produces * a value between 0 and 31. */ - s32_t delay = (k_uptime_get_32()/100 * (id + 1)) & 0x1f; + s32_t delay = ((u32_t)k_uptime_get()/100 * (id + 1)) & 0x1f; /* add 1 to not generate a delay of 0 */ s32_t ms = (delay + 1) * period_in_ms; diff --git a/soc/arm/st_stm32/common/stm32cube_hal.c b/soc/arm/st_stm32/common/stm32cube_hal.c index 4a7c643755d6a..5d8d45b0bea9a 100644 --- a/soc/arm/st_stm32/common/stm32cube_hal.c +++ b/soc/arm/st_stm32/common/stm32cube_hal.c @@ -22,7 +22,7 @@ */ uint32_t HAL_GetTick(void) { - return k_uptime_get_32(); + return (u32_t)k_uptime_get(); } /** diff --git a/subsys/bluetooth/controller/hci/hci_driver.c b/subsys/bluetooth/controller/hci/hci_driver.c index bcfd3b6022c43..027be8d90091a 100644 --- a/subsys/bluetooth/controller/hci/hci_driver.c +++ b/subsys/bluetooth/controller/hci/hci_driver.c @@ -126,10 +126,10 @@ static void prio_recv_thread(void *p1, void *p2, void *p3) BT_DBG("sem taken"); #if defined(CONFIG_INIT_STACKS) - if (k_uptime_get_32() - prio_ts > K_SECONDS(5)) { + if ((u32_t)k_uptime_get() - prio_ts > K_SECONDS(5)) { STACK_ANALYZE("prio recv thread stack", prio_recv_thread_stack); - prio_ts = k_uptime_get_32(); + prio_ts = (u32_t)k_uptime_get(); } #endif } @@ -370,9 +370,9 @@ static void recv_thread(void *p1, void *p2, void *p3) k_yield(); #if defined(CONFIG_INIT_STACKS) - if (k_uptime_get_32() - rx_ts > K_SECONDS(5)) { + if ((u32_t)k_uptime_get() - rx_ts > K_SECONDS(5)) { STACK_ANALYZE("recv thread stack", recv_thread_stack); - rx_ts = k_uptime_get_32(); + rx_ts = (u32_t)k_uptime_get(); } #endif } diff --git a/subsys/bluetooth/mesh/access.c b/subsys/bluetooth/mesh/access.c index 48c4621927776..940c6ad6dd1dd 100644 --- a/subsys/bluetooth/mesh/access.c +++ b/subsys/bluetooth/mesh/access.c @@ -115,7 +115,7 @@ static s32_t next_period(struct bt_mesh_model *mod) return 0; } - elapsed = k_uptime_get_32() - pub->period_start; + elapsed = (u32_t)k_uptime_get() - pub->period_start; BT_DBG("Publishing took %ums", elapsed); @@ -159,7 +159,7 @@ static void publish_start(u16_t duration, int err, void *user_data) /* Initialize the timestamp for the beginning of a new period */ if (pub->count == BT_MESH_PUB_TRANSMIT_COUNT(pub->retransmit)) { - pub->period_start = k_uptime_get_32(); + pub->period_start = (u32_t)k_uptime_get(); } } diff --git a/subsys/bluetooth/mesh/beacon.c b/subsys/bluetooth/mesh/beacon.c index fc0c6be690e26..9eced41e9305b 100644 --- a/subsys/bluetooth/mesh/beacon.c +++ b/subsys/bluetooth/mesh/beacon.c @@ -71,7 +71,7 @@ static void beacon_complete(int err, void *user_data) BT_DBG("err %d", err); - sub->beacon_sent = k_uptime_get_32(); + sub->beacon_sent = (u32_t)k_uptime_get(); } void bt_mesh_beacon_create(struct bt_mesh_subnet *sub, @@ -113,7 +113,7 @@ static int secure_beacon_send(void) static const struct bt_mesh_send_cb send_cb = { .end = beacon_complete, }; - u32_t now = k_uptime_get_32(); + u32_t now = (u32_t)k_uptime_get(); int i; BT_DBG(""); diff --git a/subsys/bluetooth/mesh/friend.c b/subsys/bluetooth/mesh/friend.c index 88a5991446ff7..4e65b22320939 100644 --- a/subsys/bluetooth/mesh/friend.c +++ b/subsys/bluetooth/mesh/friend.c @@ -649,7 +649,7 @@ static void clear_timeout(struct k_work *work) BT_DBG("LPN 0x%04x (old) Friend 0x%04x", frnd->lpn, frnd->clear.frnd); - duration = k_uptime_get_32() - frnd->clear.start; + duration = (u32_t)k_uptime_get() - frnd->clear.start; if (duration > 2 * frnd->poll_to) { BT_DBG("Clear Procedure timer expired"); frnd->clear.frnd = BT_MESH_ADDR_UNASSIGNED; @@ -663,7 +663,7 @@ static void clear_procedure_start(struct bt_mesh_friend *frnd) { BT_DBG("LPN 0x%04x (old) Friend 0x%04x", frnd->lpn, frnd->clear.frnd); - frnd->clear.start = k_uptime_get_32() + (2 * frnd->poll_to); + frnd->clear.start = (u32_t)k_uptime_get() + (2 * frnd->poll_to); frnd->clear.repeat_sec = 1U; send_friend_clear(frnd); diff --git a/subsys/bluetooth/mesh/proxy.c b/subsys/bluetooth/mesh/proxy.c index c8047efb8951d..1097fe879e888 100644 --- a/subsys/bluetooth/mesh/proxy.c +++ b/subsys/bluetooth/mesh/proxy.c @@ -370,7 +370,7 @@ void bt_mesh_proxy_beacon_send(struct bt_mesh_subnet *sub) void bt_mesh_proxy_identity_start(struct bt_mesh_subnet *sub) { sub->node_id = BT_MESH_NODE_IDENTITY_RUNNING; - sub->node_id_start = k_uptime_get_32(); + sub->node_id_start = (u32_t)k_uptime_get(); /* Prioritize the recently enabled subnet */ next_idx = sub - bt_mesh.sub; @@ -1138,7 +1138,7 @@ static s32_t gatt_proxy_advertise(struct bt_mesh_subnet *sub) } if (sub->node_id == BT_MESH_NODE_IDENTITY_RUNNING) { - u32_t active = k_uptime_get_32() - sub->node_id_start; + u32_t active = (u32_t)k_uptime_get() - sub->node_id_start; if (active < NODE_ID_TIMEOUT) { remaining = NODE_ID_TIMEOUT - active; diff --git a/subsys/bluetooth/mesh/transport.c b/subsys/bluetooth/mesh/transport.c index a859608d1f78d..ba6790ad62136 100644 --- a/subsys/bluetooth/mesh/transport.c +++ b/subsys/bluetooth/mesh/transport.c @@ -1071,7 +1071,7 @@ static void seg_ack(struct k_work *work) BT_DBG("rx %p", rx); - if (k_uptime_get_32() - rx->last > K_SECONDS(60)) { + if ((u32_t)k_uptime_get() - rx->last > K_SECONDS(60)) { BT_WARN("Incomplete timer expired"); seg_rx_reset(rx, false); @@ -1365,7 +1365,7 @@ static int trans_seg(struct net_buf_simple *buf, struct bt_mesh_net_rx *net_rx, } /* Reset the Incomplete Timer */ - rx->last = k_uptime_get_32(); + rx->last = (u32_t)k_uptime_get(); if (!k_delayed_work_remaining_get(&rx->ack) && !bt_mesh_lpn_established()) { diff --git a/subsys/disk/disk_access_sdhc.h b/subsys/disk/disk_access_sdhc.h index 9ec5e2d7c82f7..a536b7f7ba53e 100644 --- a/subsys/disk/disk_access_sdhc.h +++ b/subsys/disk/disk_access_sdhc.h @@ -551,7 +551,7 @@ static inline int sdhc_map_data_status(int status) static inline void sdhc_retry_init(struct sdhc_retry *retry, u32_t timeout, u16_t sleep) { - retry->end = k_uptime_get_32() + timeout; + retry->end = (u32_t)k_uptime_get() + timeout; retry->tries = 0; retry->sleep = sleep; } @@ -561,7 +561,7 @@ static inline void sdhc_retry_init(struct sdhc_retry *retry, u32_t timeout, */ static inline bool sdhc_retry_ok(struct sdhc_retry *retry) { - s32_t remain = retry->end - k_uptime_get_32(); + s32_t remain = retry->end - (u32_t)k_uptime_get(); if (retry->tries < SDHC_MIN_TRIES) { retry->tries++; diff --git a/subsys/net/buf.c b/subsys/net/buf.c index e37e90647d315..19ce9c236c2eb 100644 --- a/subsys/net/buf.c +++ b/subsys/net/buf.c @@ -234,7 +234,7 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, s32_t timeout) #endif { - u32_t alloc_start = k_uptime_get_32(); + u32_t alloc_start = (u32_t)k_uptime_get(); struct net_buf *buf; unsigned int key; @@ -277,7 +277,7 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, #if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN) if (timeout == K_FOREVER) { - u32_t ref = k_uptime_get_32(); + u32_t ref = (u32_t)k_uptime_get(); buf = k_lifo_get(&pool->free, K_NO_WAIT); while (!buf) { #if defined(CONFIG_NET_BUF_POOL_USAGE) @@ -291,11 +291,11 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, #if defined(CONFIG_NET_BUF_POOL_USAGE) NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs", func, line, pool->name, - (k_uptime_get_32() - ref) / MSEC_PER_SEC); + ((u32_t)k_uptime_get() - ref) / MSEC_PER_SEC); #else NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs", func, line, pool, - (k_uptime_get_32() - ref) / MSEC_PER_SEC); + ((u32_t)k_uptime_get() - ref) / MSEC_PER_SEC); #endif } } else { @@ -314,7 +314,7 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, if (size) { if (timeout != K_NO_WAIT && timeout != K_FOREVER) { - u32_t diff = k_uptime_get_32() - alloc_start; + u32_t diff = (u32_t)k_uptime_get() - alloc_start; timeout -= MIN(timeout, diff); } @@ -563,7 +563,7 @@ struct net_buf *net_buf_ref(struct net_buf *buf) struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout) { - u32_t alloc_start = k_uptime_get_32(); + u32_t alloc_start = (u32_t)k_uptime_get(); struct net_buf_pool *pool; struct net_buf *clone; @@ -588,7 +588,7 @@ struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout) size_t size = buf->size; if (timeout != K_NO_WAIT && timeout != K_FOREVER) { - u32_t diff = k_uptime_get_32() - alloc_start; + u32_t diff = (u32_t)k_uptime_get() - alloc_start; timeout -= MIN(timeout, diff); } diff --git a/subsys/net/ip/ipv6_nbr.c b/subsys/net/ip/ipv6_nbr.c index d662deb42b4d5..ffa172f6dae8e 100644 --- a/subsys/net/ip/ipv6_nbr.c +++ b/subsys/net/ip/ipv6_nbr.c @@ -2108,7 +2108,7 @@ static inline u32_t remaining_lifetime(struct net_if_addr *ifaddr) remaining = (u64_t)ifaddr->lifetime.timer_timeout + (u64_t)ifaddr->lifetime.wrap_counter * (u64_t)NET_TIMEOUT_MAX_VALUE - - (u64_t)time_diff(k_uptime_get_32(), + (u64_t)time_diff((u32_t)k_uptime_get(), ifaddr->lifetime.timer_start); return (u32_t)(remaining / K_MSEC(1000)); diff --git a/subsys/net/ip/net_if.c b/subsys/net/ip/net_if.c index 013c7d2347147..bc0365db777d0 100644 --- a/subsys/net/ip/net_if.c +++ b/subsys/net/ip/net_if.c @@ -543,7 +543,7 @@ static void iface_router_run_timer(u32_t current_time) static void iface_router_expired(struct k_work *work) { - u32_t current_time = k_uptime_get_32(); + u32_t current_time = (u32_t)k_uptime_get(); struct net_if_router *router, *next; ARG_UNUSED(work); @@ -587,7 +587,7 @@ static struct net_if_router *iface_router_add(struct net_if *iface, routers[i].is_default = true; routers[i].is_infinite = false; routers[i].lifetime = lifetime; - routers[i].life_start = k_uptime_get_32(); + routers[i].life_start = (u32_t)k_uptime_get(); sys_slist_append(&active_router_timers, &routers[i].node); @@ -645,7 +645,7 @@ static bool iface_router_rm(struct net_if_router *router) /* We recompute the timer if only the router was time limited */ if (sys_slist_find_and_remove(&active_router_timers, &router->node)) { - iface_router_run_timer(k_uptime_get_32()); + iface_router_run_timer((u32_t)k_uptime_get()); } router->is_used = false; @@ -818,7 +818,7 @@ static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr) static void dad_timeout(struct k_work *work) { - u32_t current_time = k_uptime_get_32(); + u32_t current_time = (u32_t)k_uptime_get(); struct net_if_addr *ifaddr, *next; ARG_UNUSED(work); @@ -887,7 +887,7 @@ static void net_if_ipv6_start_dad(struct net_if *iface, ifaddr->dad_count = 1U; if (!net_ipv6_start_dad(iface, ifaddr)) { - ifaddr->dad_start = k_uptime_get_32(); + ifaddr->dad_start = (u32_t)k_uptime_get(); sys_slist_append(&active_dad_timers, &ifaddr->dad_node); if (!k_delayed_work_remaining_get(&dad_timer)) { @@ -984,7 +984,7 @@ static inline void net_if_ipv6_start_dad(struct net_if *iface, static void rs_timeout(struct k_work *work) { - u32_t current_time = k_uptime_get_32(); + u32_t current_time = (u32_t)k_uptime_get(); struct net_if_ipv6 *ipv6, *next; ARG_UNUSED(work); @@ -1040,7 +1040,7 @@ void net_if_start_rs(struct net_if *iface) NET_DBG("Starting ND/RS for iface %p", iface); if (!net_ipv6_start_rs(iface)) { - ipv6->rs_start = k_uptime_get_32(); + ipv6->rs_start = (u32_t)k_uptime_get(); sys_slist_append(&active_rs_timers, &ipv6->rs_node); if (!k_delayed_work_remaining_get(&rs_timer)) { @@ -1200,7 +1200,7 @@ static bool address_manage_timeout(struct net_if_addr *ifaddr, } if (current_time == NET_TIMEOUT_MAX_VALUE) { - ifaddr->lifetime.timer_start = k_uptime_get_32(); + ifaddr->lifetime.timer_start = (u32_t)k_uptime_get(); ifaddr->lifetime.wrap_counter--; } @@ -1216,7 +1216,7 @@ static bool address_manage_timeout(struct net_if_addr *ifaddr, static void address_lifetime_timeout(struct k_work *work) { u64_t timeout_update = UINT64_MAX; - u32_t current_time = k_uptime_get_32(); + u32_t current_time = (u32_t)k_uptime_get(); bool found = false; struct net_if_addr *current, *next; @@ -1291,7 +1291,7 @@ static void address_start_timer(struct net_if_addr *ifaddr, u32_t vlifetime) sys_slist_append(&active_address_lifetime_timers, &ifaddr->lifetime.node); - ifaddr->lifetime.timer_start = k_uptime_get_32(); + ifaddr->lifetime.timer_start = (u32_t)k_uptime_get(); ifaddr->lifetime.wrap_counter = expire_timeout / (u64_t)NET_TIMEOUT_MAX_VALUE; ifaddr->lifetime.timer_timeout = expire_timeout - @@ -1771,7 +1771,7 @@ static bool prefix_manage_timeout(struct net_if_ipv6_prefix *ifprefix, static void prefix_lifetime_timeout(struct k_work *work) { u64_t timeout_update = UINT64_MAX; - u32_t current_time = k_uptime_get_32(); + u32_t current_time = (u32_t)k_uptime_get(); bool found = false; struct net_if_ipv6_prefix *current, *next; @@ -1840,7 +1840,7 @@ static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix, sys_slist_append(&active_prefix_lifetime_timers, &ifprefix->lifetime.node); - ifprefix->lifetime.timer_start = k_uptime_get_32(); + ifprefix->lifetime.timer_start = (u32_t)k_uptime_get(); ifprefix->lifetime.wrap_counter = expire_timeout / (u64_t)NET_TIMEOUT_MAX_VALUE; ifprefix->lifetime.timer_timeout = expire_timeout - @@ -2106,7 +2106,7 @@ void net_if_ipv6_router_update_lifetime(struct net_if_router *router, log_strdup(net_sprint_ipv6_addr(&router->address.in6_addr)), lifetime); - router->life_start = k_uptime_get_32(); + router->life_start = (u32_t)k_uptime_get(); router->lifetime = lifetime; iface_router_run_timer(router->life_start); diff --git a/subsys/net/ip/net_pkt.c b/subsys/net/ip/net_pkt.c index 1686bf827171f..478a0996595a0 100644 --- a/subsys/net/ip/net_pkt.c +++ b/subsys/net/ip/net_pkt.c @@ -857,7 +857,7 @@ static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool, size_t size, s32_t timeout) #endif { - u32_t alloc_start = k_uptime_get_32(); + u32_t alloc_start = (u32_t)k_uptime_get(); struct net_buf *first = NULL; struct net_buf *current = NULL; @@ -883,7 +883,7 @@ static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool, size -= current->size; if (timeout != K_NO_WAIT && timeout != K_FOREVER) { - u32_t diff = k_uptime_get_32() - alloc_start; + u32_t diff = (u32_t)k_uptime_get() - alloc_start; timeout -= MIN(timeout, diff); } @@ -1098,7 +1098,7 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt, s32_t timeout) #endif { - u32_t alloc_start = k_uptime_get_32(); + u32_t alloc_start = (u32_t)k_uptime_get(); struct net_buf_pool *pool = NULL; size_t alloc_len = 0; size_t hdr_len = 0; @@ -1138,7 +1138,7 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt, } if (timeout != K_NO_WAIT && timeout != K_FOREVER) { - u32_t diff = k_uptime_get_32() - alloc_start; + u32_t diff = (u32_t)k_uptime_get() - alloc_start; timeout -= MIN(timeout, diff); } @@ -1324,7 +1324,7 @@ pkt_alloc_with_buffer(struct k_mem_slab *slab, s32_t timeout) #endif { - u32_t alloc_start = k_uptime_get_32(); + u32_t alloc_start = (u32_t)k_uptime_get(); struct net_pkt *pkt; int ret; @@ -1343,7 +1343,7 @@ pkt_alloc_with_buffer(struct k_mem_slab *slab, net_pkt_set_family(pkt, family); if (timeout != K_NO_WAIT && timeout != K_FOREVER) { - u32_t diff = k_uptime_get_32() - alloc_start; + u32_t diff = (u32_t)k_uptime_get() - alloc_start; timeout -= MIN(timeout, diff); } diff --git a/subsys/net/ip/net_shell.c b/subsys/net/ip/net_shell.c index 86926ad67bc5a..b692bea108b34 100644 --- a/subsys/net/ip/net_shell.c +++ b/subsys/net/ip/net_shell.c @@ -2346,7 +2346,7 @@ static void address_lifetime_cb(struct net_if *iface, void *user_data) remaining = (u64_t)ipv6->unicast[i].lifetime.timer_timeout + (u64_t)ipv6->unicast[i].lifetime.wrap_counter * (u64_t)NET_TIMEOUT_MAX_VALUE - - (u64_t)time_diff(k_uptime_get_32(), + (u64_t)time_diff((u32_t)k_uptime_get(), ipv6->unicast[i].lifetime.timer_start); prefix = net_if_ipv6_prefix_get(iface, diff --git a/subsys/net/ip/trickle.c b/subsys/net/ip/trickle.c index 61b36cef683d4..3248aebe6a36e 100644 --- a/subsys/net/ip/trickle.c +++ b/subsys/net/ip/trickle.c @@ -59,7 +59,7 @@ static void double_interval_timeout(struct k_work *work) trickle->c = 0U; - NET_DBG("now %u (was at %u)", k_uptime_get_32(), last_end); + NET_DBG("now %u (was at %u)", (u32_t)k_uptime_get(), last_end); /* Check if we need to double the interval */ if (trickle->I <= (trickle->Imax_abs >> 1)) { @@ -78,7 +78,7 @@ static void double_interval_timeout(struct k_work *work) NET_DBG("doubling time %u", rand_time); - trickle->Istart = k_uptime_get_32() + rand_time; + trickle->Istart = (u32_t)k_uptime_get() + rand_time; k_delayed_work_init(&trickle->timer, trickle_timeout); k_delayed_work_submit(&trickle->timer, rand_time); @@ -88,7 +88,7 @@ static void double_interval_timeout(struct k_work *work) static inline void reschedule(struct net_trickle *trickle) { - u32_t now = k_uptime_get_32(); + u32_t now = (u32_t)k_uptime_get(); u32_t diff = get_end(trickle) - now; NET_DBG("now %d end in %d", now, diff); @@ -109,7 +109,7 @@ static void trickle_timeout(struct k_work *work) struct net_trickle, timer); - NET_DBG("Trickle timeout at %d", k_uptime_get_32()); + NET_DBG("Trickle timeout at %d", (u32_t)k_uptime_get()); if (trickle->cb) { NET_DBG("TX ok %d c(%u) < k(%u)", @@ -132,7 +132,7 @@ static void setup_new_interval(struct net_trickle *trickle) t = get_t(trickle->I); - trickle->Istart = k_uptime_get_32(); + trickle->Istart = (u32_t)k_uptime_get(); k_delayed_work_submit(&trickle->timer, t); diff --git a/subsys/net/l2/ethernet/arp.c b/subsys/net/l2/ethernet/arp.c index cac51dec4fc1e..7b9b52406b856 100644 --- a/subsys/net/l2/ethernet/arp.c +++ b/subsys/net/l2/ethernet/arp.c @@ -168,7 +168,7 @@ static void arp_entry_register_pending(struct arp_entry *entry) sys_slist_append(&arp_pending_entries, &entry->node); - entry->req_start = k_uptime_get_32(); + entry->req_start = (u32_t)k_uptime_get(); /* Let's start the timer if necessary */ if (!k_delayed_work_remaining_get(&arp_request_timer)) { @@ -179,7 +179,7 @@ static void arp_entry_register_pending(struct arp_entry *entry) static void arp_request_timeout(struct k_work *work) { - u32_t current = k_uptime_get_32(); + u32_t current = (u32_t)k_uptime_get(); struct arp_entry *entry, *next; ARG_UNUSED(work); @@ -452,7 +452,7 @@ static void arp_update(struct net_if *iface, } if (entry) { - entry->req_start = k_uptime_get_32(); + entry->req_start = (u32_t)k_uptime_get(); entry->iface = iface; net_ipaddr_copy(&entry->ip, src); memcpy(&entry->eth, hwaddr, sizeof(entry->eth)); diff --git a/subsys/net/lib/mqtt/mqtt_os.h b/subsys/net/lib/mqtt/mqtt_os.h index 91e576dddb613..57c242810973c 100644 --- a/subsys/net/lib/mqtt/mqtt_os.h +++ b/subsys/net/lib/mqtt/mqtt_os.h @@ -74,7 +74,7 @@ static inline void mqtt_mutex_unlock(struct mqtt_client *client) */ static inline u32_t mqtt_sys_tick_in_ms_get(void) { - return k_uptime_get_32(); + return (u32_t)k_uptime_get(); } /**@brief Method to get elapsed time in milliseconds since the last activity. @@ -85,7 +85,7 @@ static inline u32_t mqtt_sys_tick_in_ms_get(void) */ static inline u32_t mqtt_elapsed_time_in_ms_get(u32_t last_activity) { - s32_t diff = k_uptime_get_32() - last_activity; + s32_t diff = (u32_t)k_uptime_get() - last_activity; if (diff < 0) { return 0; diff --git a/subsys/net/lib/openthread/platform/alarm.c b/subsys/net/lib/openthread/platform/alarm.c index c995b6548da9f..860298f5f993c 100644 --- a/subsys/net/lib/openthread/platform/alarm.c +++ b/subsys/net/lib/openthread/platform/alarm.c @@ -40,7 +40,7 @@ void platformAlarmInit(void) uint32_t otPlatAlarmMilliGetNow(void) { - return k_uptime_get_32(); + return (u32_t)k_uptime_get(); } void otPlatAlarmMilliStartAt(otInstance *aInstance, uint32_t t0, uint32_t dt) diff --git a/subsys/net/lib/sntp/sntp.c b/subsys/net/lib/sntp/sntp.c index 46d02b193a4ae..3d986c5f4621d 100644 --- a/subsys/net/lib/sntp/sntp.c +++ b/subsys/net/lib/sntp/sntp.c @@ -135,7 +135,7 @@ static u32_t get_uptime_in_sec(void) { u64_t time; - time = k_uptime_get_32(); + time = (u32_t)k_uptime_get(); return time / MSEC_PER_SEC; } diff --git a/subsys/net/lib/sockets/sockets.c b/subsys/net/lib/sockets/sockets.c index c9765e845c870..9bd501c0c01bd 100644 --- a/subsys/net/lib/sockets/sockets.c +++ b/subsys/net/lib/sockets/sockets.c @@ -949,7 +949,7 @@ static int zsock_poll_update_ctx(struct net_context *ctx, static inline int time_left(u32_t start, u32_t timeout) { - u32_t elapsed = k_uptime_get_32() - start; + u32_t elapsed = (u32_t)k_uptime_get() - start; return timeout - elapsed; } @@ -964,7 +964,7 @@ int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int timeout) struct k_poll_event *pev; struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events); const struct fd_op_vtable *vtable; - u32_t entry_time = k_uptime_get_32(); + u32_t entry_time = (u32_t)k_uptime_get(); if (timeout < 0) { timeout = K_FOREVER; diff --git a/subsys/net/lib/sockets/sockets_tls.c b/subsys/net/lib/sockets/sockets_tls.c index 7b0248d10c39f..3bd10bb7ba170 100644 --- a/subsys/net/lib/sockets/sockets_tls.c +++ b/subsys/net/lib/sockets/sockets_tls.c @@ -214,7 +214,7 @@ static void dtls_timing_set_delay(void *data, uint32_t int_ms, uint32_t fin_ms) ctx->fin_ms = fin_ms; if (fin_ms != 0U) { - ctx->snapshot = k_uptime_get_32(); + ctx->snapshot = (u32_t)k_uptime_get(); } } @@ -236,7 +236,7 @@ static int dtls_timing_get_delay(void *data) return -1; } - elapsed_ms = k_uptime_get_32() - timing->snapshot; + elapsed_ms = (u32_t)k_uptime_get() - timing->snapshot; if (elapsed_ms >= timing->fin_ms) { return 2; @@ -401,7 +401,7 @@ static int tls_release(struct tls_context *tls) static inline int time_left(u32_t start, u32_t timeout) { - u32_t elapsed = k_uptime_get_32() - start; + u32_t elapsed = (u32_t)k_uptime_get() - start; return timeout - elapsed; } @@ -481,7 +481,7 @@ static int dtls_rx(void *ctx, unsigned char *buf, size_t len, uint32_t timeout) bool is_block = !((net_ctx->tls->flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(net_ctx)); int remaining_time = (timeout == 0U) ? K_FOREVER : timeout; - u32_t entry_time = k_uptime_get_32(); + u32_t entry_time = (u32_t)k_uptime_get(); socklen_t addrlen = sizeof(struct sockaddr); struct sockaddr addr; int err; diff --git a/subsys/shell/modules/kernel_service.c b/subsys/shell/modules/kernel_service.c index f0504dc4dee03..09d5e51e59aed 100644 --- a/subsys/shell/modules/kernel_service.c +++ b/subsys/shell/modules/kernel_service.c @@ -36,7 +36,7 @@ static int cmd_kernel_uptime(const struct shell *shell, ARG_UNUSED(argv); shell_fprintf(shell, SHELL_NORMAL, "Uptime: %u ms\n", - k_uptime_get_32()); + (u32_t)k_uptime_get()); return 0; } diff --git a/subsys/shell/shell_log_backend.c b/subsys/shell/shell_log_backend.c index 9e8ba9358dc04..8518b91de9e3a 100644 --- a/subsys/shell/shell_log_backend.c +++ b/subsys/shell/shell_log_backend.c @@ -63,7 +63,7 @@ static void flush_expired_messages(const struct shell *shell) struct shell_log_backend_msg msg; struct k_msgq *msgq = shell->log_backend->msgq; u32_t timeout = shell->log_backend->timeout; - u32_t now = k_uptime_get_32(); + u32_t now = (u32_t)k_uptime_get(); while (1) { err = k_msgq_peek(msgq, &msg); @@ -87,7 +87,7 @@ static void msg_to_fifo(const struct shell *shell, int err; struct shell_log_backend_msg t_msg = { .msg = msg, - .timestamp = k_uptime_get_32() + .timestamp = (u32_t)k_uptime_get() }; err = k_msgq_put(shell->log_backend->msgq, &t_msg, diff --git a/tests/kernel/common/src/clock.c b/tests/kernel/common/src/clock.c index 1101912dbaa00..0a7342f55b2d8 100644 --- a/tests/kernel/common/src/clock.c +++ b/tests/kernel/common/src/clock.c @@ -9,15 +9,15 @@ #if defined(CONFIG_ARCH_POSIX) #define ALIGN_MS_BOUNDARY \ do { \ - u32_t t = k_uptime_get_32(); \ - while (t == k_uptime_get_32()) \ + u32_t t = (u32_t)k_uptime_get(); \ + while (t == (u32_t)k_uptime_get()) \ k_busy_wait(50); \ } while (0) #else #define ALIGN_MS_BOUNDARY \ do { \ - u32_t t = k_uptime_get_32(); \ - while (t == k_uptime_get_32()) \ + u32_t t = (u32_t)k_uptime_get(); \ + while (t == (u32_t)k_uptime_get()) \ ; \ } while (0) #endif @@ -30,7 +30,7 @@ /** * @brief Test clock uptime APIs functionality * - * @see k_uptime_get(), k_uptime_get_32(), k_uptime_delta() + * @see k_uptime_get(), (u32_t)k_uptime_get(), k_uptime_delta() * k_uptime_delta_32() */ void test_clock_uptime(void) @@ -47,17 +47,17 @@ void test_clock_uptime(void) } /**TESTPOINT: uptime elapse lower 32-bit*/ - t32 = k_uptime_get_32(); - while (k_uptime_get_32() < (t32 + 5)) { + t32 = (u32_t)k_uptime_get(); + while ((u32_t)k_uptime_get() < (t32 + 5)) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif } /**TESTPOINT: uptime straddled ms boundary*/ - t32 = k_uptime_get_32(); + t32 = (u32_t)k_uptime_get(); ALIGN_MS_BOUNDARY; - zassert_true(k_uptime_get_32() > t32, NULL); + zassert_true((u32_t)k_uptime_get() > t32, NULL); /**TESTPOINT: uptime delta*/ d64 = k_uptime_delta(&d64); @@ -84,7 +84,7 @@ void test_clock_uptime(void) /** * @brief Test clock cycle functionality * - * @see k_cycle_get_32(), k_uptime_get_32() + * @see k_cycle_get_32(), (u32_t)k_uptime_get() */ void test_clock_cycle(void) { @@ -104,14 +104,14 @@ void test_clock_cycle(void) /**TESTPOINT: cycle/uptime cross check*/ c0 = k_cycle_get_32(); ALIGN_MS_BOUNDARY; - t32 = k_uptime_get_32(); - while (t32 == k_uptime_get_32()) { + t32 = (u32_t)k_uptime_get(); + while (t32 == (u32_t)k_uptime_get()) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif } - c1 = k_uptime_get_32(); + c1 = (u32_t)k_uptime_get(); /*avoid cycle counter wrap around*/ if (c1 > c0) { /* delta cycle should be greater than 1 milli-second*/ diff --git a/tests/kernel/common/src/timeout_order.c b/tests/kernel/common/src/timeout_order.c index d5ac467dfd152..ea3044601b25f 100644 --- a/tests/kernel/common/src/timeout_order.c +++ b/tests/kernel/common/src/timeout_order.c @@ -63,10 +63,10 @@ void test_timeout_order(void) } - u32_t uptime = k_uptime_get_32(); + u32_t uptime = (u32_t)k_uptime_get(); /* sync on tick */ - while (uptime == k_uptime_get_32()) { + while (uptime == (u32_t)k_uptime_get()) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index d4cca0f9e3f23..385ee521eb5ff 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -243,14 +243,14 @@ static void _test_kernel_cpu_idle(int atomic) int i; /* loop variable */ /* Align to a "ms boundary". */ - tms = k_uptime_get_32(); - while (tms == k_uptime_get_32()) { + tms = (u32_t)k_uptime_get(); + while (tms == (u32_t)k_uptime_get()) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif } - tms = k_uptime_get_32(); + tms = (u32_t)k_uptime_get(); for (i = 0; i < 5; i++) { /* Repeat the test five times */ if (atomic) { unsigned int key = irq_lock(); @@ -261,7 +261,7 @@ static void _test_kernel_cpu_idle(int atomic) } /* calculating milliseconds per tick*/ tms += __ticks_to_ms(1); - tms2 = k_uptime_get_32(); + tms2 = (u32_t)k_uptime_get(); zassert_false(tms2 < tms, "Bad ms per tick value computed," "got %d which is less than %d\n", tms2, tms); diff --git a/tests/kernel/fifo/fifo_api/src/test_fifo_cancel.c b/tests/kernel/fifo/fifo_api/src/test_fifo_cancel.c index ead5996320401..220e905c9b89e 100644 --- a/tests/kernel/fifo/fifo_api/src/test_fifo_cancel.c +++ b/tests/kernel/fifo/fifo_api/src/test_fifo_cancel.c @@ -27,9 +27,9 @@ static void tfifo_thread_thread(struct k_fifo *pfifo) k_tid_t tid = k_thread_create(&thread, tstack, STACK_SIZE, t_cancel_wait_entry, pfifo, NULL, NULL, K_PRIO_PREEMPT(0), 0, 0); - u32_t start_t = k_uptime_get_32(); + u32_t start_t = (u32_t)k_uptime_get(); void *ret = k_fifo_get(pfifo, 500); - u32_t dur = k_uptime_get_32() - start_t; + u32_t dur = (u32_t)k_uptime_get() - start_t; /* While we observed the side effect of the last statement * ( call to k_fifo_cancel_wait) of the thread, it's not fact diff --git a/tests/kernel/pending/src/main.c b/tests/kernel/pending/src/main.c index 05c451f3d0a34..e7571efed3edc 100644 --- a/tests/kernel/pending/src/main.c +++ b/tests/kernel/pending/src/main.c @@ -194,7 +194,7 @@ static void timer_tests(void) { k_sem_take(&start_test_sem, K_FOREVER); - timer_start_tick = k_uptime_get_32(); + timer_start_tick = (u32_t)k_uptime_get(); k_timer_start(&timer, NUM_SECONDS(1), 0); @@ -202,7 +202,7 @@ static void timer_tests(void) timer_data = timer.user_data; } - timer_end_tick = k_uptime_get_32(); + timer_end_tick = (u32_t)k_uptime_get(); k_sem_take(&end_test_sem, K_FOREVER); } diff --git a/tests/kernel/sched/schedule_api/src/main.c b/tests/kernel/sched/schedule_api/src/main.c index 6e8671a9de34f..0faac7b40760d 100644 --- a/tests/kernel/sched/schedule_api/src/main.c +++ b/tests/kernel/sched/schedule_api/src/main.c @@ -26,9 +26,9 @@ void spin_for_ms(int ms) */ k_busy_wait(ms * 1000); #else - u32_t t32 = k_uptime_get_32(); + u32_t t32 = (u32_t)k_uptime_get(); - while (k_uptime_get_32() - t32 < ms) { + while ((u32_t)k_uptime_get() - t32 < ms) { /* In the posix arch, a busy loop takes no time, so * let's make it take some */ diff --git a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c index 102d9e885fc70..df71135f49539 100644 --- a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c +++ b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c @@ -99,7 +99,7 @@ static void thread_time_slice(void *p1, void *p2, void *p3) * Ensure that each thread is given the time slice period to execute. * * @see k_sched_time_slice_set(), k_sem_reset(), k_cycle_get_32(), - * k_uptime_get_32() + * (u32_t)k_uptime_get() * * @ingroup kernel_sched_tests */ @@ -130,8 +130,8 @@ void test_slice_reset(void) k_sched_time_slice_set(SLICE_SIZE, K_PRIO_PREEMPT(0)); /*synchronize to tick boundary*/ - t32 = k_uptime_get_32(); - while (k_uptime_get_32() == t32) { + t32 = (u32_t)k_uptime_get(); + while ((u32_t)k_uptime_get() == t32) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif diff --git a/tests/kernel/sleep/src/main.c b/tests/kernel/sleep/src/main.c index 0675b5a1f16d0..b2ffe69e7af1e 100644 --- a/tests/kernel/sleep/src/main.c +++ b/tests/kernel/sleep/src/main.c @@ -69,8 +69,8 @@ static void align_to_tick_boundary(void) { u32_t tick; - tick = k_uptime_get_32(); - while (k_uptime_get_32() == tick) { + tick = (u32_t)k_uptime_get(); + while ((u32_t)k_uptime_get() == tick) { /* Busy wait to align to tick boundary */ #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); @@ -101,9 +101,9 @@ static void test_thread(int arg1, int arg2) TC_PRINT("Testing normal expiration of k_sleep()\n"); align_to_tick_boundary(); - start_tick = k_uptime_get_32(); + start_tick = (u32_t)k_uptime_get(); k_sleep(ONE_SECOND); - end_tick = k_uptime_get_32(); + end_tick = (u32_t)k_uptime_get(); if (!sleep_time_valid(start_tick, end_tick, ONE_SECOND_ALIGNED)) { TC_ERROR(" *** k_sleep() slept for %d ticks not %d.", @@ -116,9 +116,9 @@ static void test_thread(int arg1, int arg2) k_sem_give(&helper_thread_sem); /* Activate helper thread */ align_to_tick_boundary(); - start_tick = k_uptime_get_32(); + start_tick = (u32_t)k_uptime_get(); k_sleep(ONE_SECOND); - end_tick = k_uptime_get_32(); + end_tick = (u32_t)k_uptime_get(); if (end_tick - start_tick > 1) { TC_ERROR(" *** k_wakeup() took too long (%d ticks)\n", @@ -130,9 +130,9 @@ static void test_thread(int arg1, int arg2) k_sem_give(&helper_thread_sem); /* Activate helper thread */ align_to_tick_boundary(); - start_tick = k_uptime_get_32(); + start_tick = (u32_t)k_uptime_get(); k_sleep(ONE_SECOND); - end_tick = k_uptime_get_32(); + end_tick = (u32_t)k_uptime_get(); if (end_tick - start_tick > 1) { TC_ERROR(" *** k_wakeup() took too long (%d ticks)\n", @@ -144,9 +144,9 @@ static void test_thread(int arg1, int arg2) k_sem_give(&task_sem); /* Activate task */ align_to_tick_boundary(); - start_tick = k_uptime_get_32(); + start_tick = (u32_t)k_uptime_get(); k_sleep(ONE_SECOND); /* Task will execute */ - end_tick = k_uptime_get_32(); + end_tick = (u32_t)k_uptime_get(); if (end_tick - start_tick > 1) { TC_ERROR(" *** k_wakeup() took too long (%d ticks) at LAST\n", @@ -178,7 +178,7 @@ static void helper_thread(int arg1, int arg2) * * @ingroup kernel_sleep_tests * - * @see k_sleep(), k_wakeup(), k_uptime_get_32() + * @see k_sleep(), k_wakeup(), (u32_t)k_uptime_get() */ void test_sleep(void) { @@ -223,9 +223,9 @@ void test_sleep(void) TC_PRINT("Testing kernel k_sleep()\n"); align_to_tick_boundary(); - start_tick = k_uptime_get_32(); + start_tick = (u32_t)k_uptime_get(); k_sleep(ONE_SECOND); - end_tick = k_uptime_get_32(); + end_tick = (u32_t)k_uptime_get(); zassert_true(sleep_time_valid(start_tick, end_tick, ONE_SECOND_ALIGNED), "k_sleep() slept for %d ticks, not %d\n", end_tick - start_tick, ONE_SECOND_ALIGNED); diff --git a/tests/kernel/tickless/tickless/src/main.c b/tests/kernel/tickless/tickless/src/main.c index 03e3fad9b6a73..f740548936c26 100644 --- a/tests/kernel/tickless/tickless/src/main.c +++ b/tests/kernel/tickless/tickless/src/main.c @@ -121,14 +121,14 @@ void ticklessTestThread(void) * as we can. */ k_sleep(TICKS_TO_MS); - start_time = k_uptime_get_32(); + start_time = (u32_t)k_uptime_get(); start_tsc = _TIMESTAMP_READ(); /* FIXME: one tick less to account for * one extra tick for _TICK_ALIGN in k_sleep */ k_sleep((SLEEP_TICKS - 1) * TICKS_TO_MS); end_tsc = _TIMESTAMP_READ(); - end_time = k_uptime_get_32(); + end_time = (u32_t)k_uptime_get(); cal_tsc += end_tsc - start_tsc; } cal_tsc /= CAL_REPS; @@ -156,14 +156,14 @@ void ticklessTestThread(void) * as we can. */ k_sleep(TICKS_TO_MS); - start_time = k_uptime_get_32(); + start_time = (u32_t)k_uptime_get(); start_tsc = _TIMESTAMP_READ(); /* FIXME: one tick less to account for * one extra tick for _TICK_ALIGN in k_sleep */ k_sleep((SLEEP_TICKS - 1) * TICKS_TO_MS); end_tsc = _TIMESTAMP_READ(); - end_time = k_uptime_get_32(); + end_time = (u32_t)k_uptime_get(); diff_tsc += end_tsc - start_tsc; } diff --git a/tests/kernel/tickless/tickless_concept/src/main.c b/tests/kernel/tickless/tickless_concept/src/main.c index e5aeacca3a7cb..70098505cef20 100644 --- a/tests/kernel/tickless/tickless_concept/src/main.c +++ b/tests/kernel/tickless/tickless_concept/src/main.c @@ -31,15 +31,15 @@ static struct k_thread tdata[NUM_THREAD]; #if defined(CONFIG_ARCH_POSIX) #define ALIGN_MS_BOUNDARY() \ do { \ - u32_t t = k_uptime_get_32(); \ - while (t == k_uptime_get_32()) \ + u32_t t = (u32_t)k_uptime_get(); \ + while (t == (u32_t)k_uptime_get()) \ k_busy_wait(50); \ } while (0) #else #define ALIGN_MS_BOUNDARY() \ do { \ - u32_t t = k_uptime_get_32(); \ - while (t == k_uptime_get_32()) \ + u32_t t = (u32_t)k_uptime_get(); \ + while (t == (u32_t)k_uptime_get()) \ ; \ } while (0) #endif @@ -78,17 +78,17 @@ void test_tickless_sysclock(void) volatile u32_t t0, t1; ALIGN_MS_BOUNDARY(); - t0 = k_uptime_get_32(); + t0 = (u32_t)k_uptime_get(); k_sleep(SLEEP_TICKLESS); - t1 = k_uptime_get_32(); + t1 = (u32_t)k_uptime_get(); TC_PRINT("time %d, %d\n", t0, t1); /**TESTPOINT: verify system clock recovery after exiting tickless idle*/ zassert_true((t1 - t0) >= SLEEP_TICKLESS, NULL); ALIGN_MS_BOUNDARY(); - t0 = k_uptime_get_32(); + t0 = (u32_t)k_uptime_get(); k_sem_take(&sema, SLEEP_TICKFUL); - t1 = k_uptime_get_32(); + t1 = (u32_t)k_uptime_get(); TC_PRINT("time %d, %d\n", t0, t1); /**TESTPOINT: verify system clock recovery after exiting tickful idle*/ zassert_true((t1 - t0) >= SLEEP_TICKFUL, NULL); diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c index 96f587d5d7f8d..5b7698bc14356 100644 --- a/tests/kernel/workq/work_queue/src/main.c +++ b/tests/kernel/workq/work_queue/src/main.c @@ -361,8 +361,8 @@ static void coop_delayed_work_resubmit(void) k_busy_wait(1000); #else volatile u32_t uptime; - uptime = k_uptime_get_32(); - while (k_uptime_get_32() == uptime) { + uptime = (u32_t)k_uptime_get(); + while ((u32_t)k_uptime_get() == uptime) { } #endif } diff --git a/tests/net/ipv6/src/main.c b/tests/net/ipv6/src/main.c index 29c9818d2111a..fc39414aaba5d 100644 --- a/tests/net/ipv6/src/main.c +++ b/tests/net/ipv6/src/main.c @@ -247,11 +247,11 @@ static int tester_send(struct device *dev, struct net_pkt *pkt) if (icmp->type == NET_ICMPV6_NS) { if (dad_time[0] == 0U) { - dad_time[0] = k_uptime_get_32(); + dad_time[0] = (u32_t)k_uptime_get(); } else if (dad_time[1] == 0U) { - dad_time[1] = k_uptime_get_32(); + dad_time[1] = (u32_t)k_uptime_get(); } else if (dad_time[2] == 0U) { - dad_time[2] = k_uptime_get_32(); + dad_time[2] = (u32_t)k_uptime_get(); } goto out; @@ -967,7 +967,7 @@ static void test_address_lifetime(void) "Wrap counter wrong (%d)", ifaddr->lifetime.wrap_counter); ifaddr->lifetime.timer_timeout = K_MSEC(10); - ifaddr->lifetime.timer_start = k_uptime_get_32() - K_MSEC(10); + ifaddr->lifetime.timer_start = (u32_t)k_uptime_get() - K_MSEC(10); ifaddr->lifetime.wrap_counter = 0; net_address_lifetime_timeout(); diff --git a/tests/net/socket/poll/src/main.c b/tests/net/socket/poll/src/main.c index 276fe493f50e4..a67fa04f00fc6 100644 --- a/tests/net/socket/poll/src/main.c +++ b/tests/net/socket/poll/src/main.c @@ -56,9 +56,9 @@ void test_poll(void) pollfds[1].events = POLLIN; /* Poll non-ready fd's with timeout of 0 */ - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = poll(pollfds, ARRAY_SIZE(pollfds), 0); - zassert_true(k_uptime_get_32() - tstamp <= FUZZ, ""); + zassert_true((u32_t)k_uptime_get() - tstamp <= FUZZ, ""); zassert_equal(res, 0, ""); zassert_equal(pollfds[0].fd, c_sock, ""); @@ -70,9 +70,9 @@ void test_poll(void) /* Poll non-ready fd's with timeout of 30 */ - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = poll(pollfds, ARRAY_SIZE(pollfds), 30); - tstamp = k_uptime_get_32() - tstamp; + tstamp = (u32_t)k_uptime_get() - tstamp; zassert_true(tstamp >= 30U && tstamp <= 30 + FUZZ, ""); zassert_equal(res, 0, ""); @@ -81,9 +81,9 @@ void test_poll(void) len = send(c_sock, BUF_AND_SIZE(TEST_STR_SMALL), 0); zassert_equal(len, STRLEN(TEST_STR_SMALL), "invalid send len"); - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = poll(pollfds, ARRAY_SIZE(pollfds), 30); - tstamp = k_uptime_get_32() - tstamp; + tstamp = (u32_t)k_uptime_get() - tstamp; zassert_true(tstamp <= FUZZ, ""); zassert_equal(res, 1, ""); @@ -99,9 +99,9 @@ void test_poll(void) len = recv(s_sock, BUF_AND_SIZE(buf), 0); zassert_equal(len, STRLEN(TEST_STR_SMALL), "invalid recv len"); - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = poll(pollfds, ARRAY_SIZE(pollfds), 0); - zassert_true(k_uptime_get_32() - tstamp <= FUZZ, ""); + zassert_true((u32_t)k_uptime_get() - tstamp <= FUZZ, ""); zassert_equal(res, 0, ""); zassert_equal(pollfds[1].revents, 0, ""); @@ -110,9 +110,9 @@ void test_poll(void) res = close(c_sock); zassert_equal(res, 0, "close failed"); - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = poll(pollfds, ARRAY_SIZE(pollfds), 0); - zassert_true(k_uptime_get_32() - tstamp <= FUZZ, ""); + zassert_true((u32_t)k_uptime_get() - tstamp <= FUZZ, ""); zassert_equal(res, 1, ""); zassert_equal(pollfds[0].revents, POLLNVAL, ""); zassert_equal(pollfds[1].revents, 0, ""); diff --git a/tests/net/socket/select/src/main.c b/tests/net/socket/select/src/main.c index 9670a0393976a..90e79bfa25ee0 100644 --- a/tests/net/socket/select/src/main.c +++ b/tests/net/socket/select/src/main.c @@ -91,9 +91,9 @@ void test_select(void) /* Poll non-ready fd's with timeout of 0 */ tval.tv_sec = tval.tv_usec = 0; - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = select(s_sock + 1, &readfds, NULL, NULL, &tval); - tstamp = k_uptime_get_32() - tstamp; + tstamp = (u32_t)k_uptime_get() - tstamp; /* Even though we expect select to be non-blocking, scheduler may * preempt the thread. That's why we add FUZZ to the expected * delay time. Also applies to similar cases below. @@ -109,9 +109,9 @@ void test_select(void) FD_SET(s_sock, &readfds); tval.tv_sec = 0; tval.tv_usec = 30 * 1000; - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = select(s_sock + 1, &readfds, NULL, NULL, &tval); - tstamp = k_uptime_get_32() - tstamp; + tstamp = (u32_t)k_uptime_get() - tstamp; zassert_true(tstamp >= 30U && tstamp <= 30 + FUZZ, ""); zassert_equal(res, 0, ""); @@ -124,9 +124,9 @@ void test_select(void) FD_SET(s_sock, &readfds); tval.tv_sec = 0; tval.tv_usec = 30 * 1000; - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = select(s_sock + 1, &readfds, NULL, NULL, &tval); - tstamp = k_uptime_get_32() - tstamp; + tstamp = (u32_t)k_uptime_get() - tstamp; zassert_true(tstamp <= FUZZ, ""); zassert_equal(res, 1, ""); @@ -141,9 +141,9 @@ void test_select(void) FD_SET(c_sock, &readfds); FD_SET(s_sock, &readfds); tval.tv_sec = tval.tv_usec = 0; - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = select(s_sock + 1, &readfds, NULL, NULL, &tval); - zassert_true(k_uptime_get_32() - tstamp <= FUZZ, ""); + zassert_true((u32_t)k_uptime_get() - tstamp <= FUZZ, ""); zassert_equal(res, 0, ""); zassert_false(FD_ISSET(s_sock, &readfds), ""); @@ -155,9 +155,9 @@ void test_select(void) FD_SET(c_sock, &readfds); FD_SET(s_sock, &readfds); tval.tv_sec = tval.tv_usec = 0; - tstamp = k_uptime_get_32(); + tstamp = (u32_t)k_uptime_get(); res = select(s_sock + 1, &readfds, NULL, NULL, &tval); - zassert_true(k_uptime_get_32() - tstamp <= FUZZ, ""); + zassert_true((u32_t)k_uptime_get() - tstamp <= FUZZ, ""); zassert_true(res < 0, ""); zassert_equal(errno, EBADF, ""); diff --git a/tests/subsys/fs/littlefs/src/test_lfs_perf.c b/tests/subsys/fs/littlefs/src/test_lfs_perf.c index 69fe51b3cda13..8f0fb2de3a0f8 100644 --- a/tests/subsys/fs/littlefs/src/test_lfs_perf.c +++ b/tests/subsys/fs/littlefs/src/test_lfs_perf.c @@ -83,7 +83,7 @@ static int write_read(const char *tag, goto out_buf; } - t0 = k_uptime_get_32(); + t0 = (u32_t)k_uptime_get(); for (size_t i = 0; i < nbuf; ++i) { rc = fs_write(&file, buf, buf_size); if (buf_size != rc) { @@ -91,7 +91,7 @@ static int write_read(const char *tag, goto out_file; } } - t1 = k_uptime_get_32(); + t1 = (u32_t)k_uptime_get(); (void)fs_close(&file); @@ -118,7 +118,7 @@ static int write_read(const char *tag, goto out_buf; } - t0 = k_uptime_get_32(); + t0 = (u32_t)k_uptime_get(); for (size_t i = 0; i < nbuf; ++i) { rc = fs_read(&file, buf, buf_size); if (buf_size != rc) { @@ -126,7 +126,7 @@ static int write_read(const char *tag, goto out_file; } } - t1 = k_uptime_get_32(); + t1 = (u32_t)k_uptime_get(); TC_PRINT("%s read %zu * %zu = %zu bytes in %u ms: " "%u By/s, %u KiBy/s\n",