|
15 | 15 | #include <linux/net_tstamp.h> |
16 | 16 | #include <linux/timecounter.h> |
17 | 17 | #include <linux/timekeeping.h> |
| 18 | +#include <linux/ptp_classify.h> |
18 | 19 | #include "bnxt_hsi.h" |
19 | 20 | #include "bnxt.h" |
20 | 21 | #include "bnxt_ptp.h" |
21 | 22 |
|
| 23 | +int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id) |
| 24 | +{ |
| 25 | + unsigned int ptp_class; |
| 26 | + struct ptp_header *hdr; |
| 27 | + |
| 28 | + ptp_class = ptp_classify_raw(skb); |
| 29 | + |
| 30 | + switch (ptp_class & PTP_CLASS_VMASK) { |
| 31 | + case PTP_CLASS_V1: |
| 32 | + case PTP_CLASS_V2: |
| 33 | + hdr = ptp_parse_header(skb, ptp_class); |
| 34 | + if (!hdr) |
| 35 | + return -EINVAL; |
| 36 | + |
| 37 | + *seq_id = ntohs(hdr->sequence_id); |
| 38 | + return 0; |
| 39 | + default: |
| 40 | + return -ERANGE; |
| 41 | + } |
| 42 | +} |
| 43 | + |
22 | 44 | static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, |
23 | 45 | const struct timespec64 *ts) |
24 | 46 | { |
@@ -57,6 +79,28 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) |
57 | 79 | spin_unlock_bh(&ptp->ptp_lock); |
58 | 80 | } |
59 | 81 |
|
| 82 | +static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) |
| 83 | +{ |
| 84 | + struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; |
| 85 | + struct hwrm_port_ts_query_input req = {0}; |
| 86 | + int rc; |
| 87 | + |
| 88 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1); |
| 89 | + req.flags = cpu_to_le32(flags); |
| 90 | + if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == |
| 91 | + PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { |
| 92 | + req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); |
| 93 | + req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); |
| 94 | + req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); |
| 95 | + } |
| 96 | + mutex_lock(&bp->hwrm_cmd_lock); |
| 97 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
| 98 | + if (!rc) |
| 99 | + *ts = le64_to_cpu(resp->ptp_msg_ts); |
| 100 | + mutex_unlock(&bp->hwrm_cmd_lock); |
| 101 | + return rc; |
| 102 | +} |
| 103 | + |
60 | 104 | static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, |
61 | 105 | struct timespec64 *ts, |
62 | 106 | struct ptp_system_timestamp *sts) |
@@ -269,16 +313,62 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc) |
269 | 313 | return bnxt_refclk_read(ptp->bp, NULL); |
270 | 314 | } |
271 | 315 |
|
| 316 | +static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) |
| 317 | +{ |
| 318 | + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
| 319 | + struct skb_shared_hwtstamps timestamp; |
| 320 | + u64 ts = 0, ns = 0; |
| 321 | + int rc; |
| 322 | + |
| 323 | + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts); |
| 324 | + if (!rc) { |
| 325 | + memset(×tamp, 0, sizeof(timestamp)); |
| 326 | + spin_lock_bh(&ptp->ptp_lock); |
| 327 | + ns = timecounter_cyc2time(&ptp->tc, ts); |
| 328 | + spin_unlock_bh(&ptp->ptp_lock); |
| 329 | + timestamp.hwtstamp = ns_to_ktime(ns); |
| 330 | + skb_tstamp_tx(ptp->tx_skb, ×tamp); |
| 331 | + } else { |
| 332 | + netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n", |
| 333 | + rc); |
| 334 | + } |
| 335 | + |
| 336 | + dev_kfree_skb_any(ptp->tx_skb); |
| 337 | + ptp->tx_skb = NULL; |
| 338 | + atomic_inc(&ptp->tx_avail); |
| 339 | +} |
| 340 | + |
272 | 341 | static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) |
273 | 342 | { |
274 | 343 | struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, |
275 | 344 | ptp_info); |
| 345 | + unsigned long now = jiffies; |
276 | 346 | struct bnxt *bp = ptp->bp; |
277 | 347 |
|
| 348 | + if (ptp->tx_skb) |
| 349 | + bnxt_stamp_tx_skb(bp, ptp->tx_skb); |
| 350 | + |
| 351 | + if (!time_after_eq(now, ptp->next_period)) |
| 352 | + return ptp->next_period - now; |
| 353 | + |
278 | 354 | bnxt_ptp_get_current_time(bp); |
| 355 | + ptp->next_period = now + HZ; |
279 | 356 | return HZ; |
280 | 357 | } |
281 | 358 |
|
| 359 | +int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb) |
| 360 | +{ |
| 361 | + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
| 362 | + |
| 363 | + if (ptp->tx_skb) { |
| 364 | + netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n"); |
| 365 | + return -EBUSY; |
| 366 | + } |
| 367 | + ptp->tx_skb = skb; |
| 368 | + ptp_schedule_worker(ptp->ptp_clock, 0); |
| 369 | + return 0; |
| 370 | +} |
| 371 | + |
282 | 372 | int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) |
283 | 373 | { |
284 | 374 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
@@ -375,5 +465,9 @@ void bnxt_ptp_clear(struct bnxt *bp) |
375 | 465 | ptp_clock_unregister(ptp->ptp_clock); |
376 | 466 |
|
377 | 467 | ptp->ptp_clock = NULL; |
| 468 | + if (ptp->tx_skb) { |
| 469 | + dev_kfree_skb_any(ptp->tx_skb); |
| 470 | + ptp->tx_skb = NULL; |
| 471 | + } |
378 | 472 | bnxt_unmap_ptp_regs(bp); |
379 | 473 | } |
0 commit comments