28
28
#include <linux/sched.h>
29
29
#include <linux/seq_file.h>
30
30
31
+ #if IS_ENABLED (CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS )
32
+ #include <linux/stackdepot.h>
33
+ #include <linux/sort.h>
34
+ #include <linux/timekeeping.h>
35
+ #include <linux/math64.h>
36
+ #endif
37
+
31
38
#include <drm/drm_atomic.h>
32
39
#include <drm/drm_atomic_helper.h>
33
40
#include <drm/drm_dp_mst_helper.h>
@@ -1399,12 +1406,184 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1399
1406
}
1400
1407
EXPORT_SYMBOL (drm_dp_mst_put_port_malloc );
1401
1408
1409
+ #if IS_ENABLED (CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS )
1410
+
1411
+ #define STACK_DEPTH 8
1412
+
1413
+ static noinline void
1414
+ __topology_ref_save (struct drm_dp_mst_topology_mgr * mgr ,
1415
+ struct drm_dp_mst_topology_ref_history * history ,
1416
+ enum drm_dp_mst_topology_ref_type type )
1417
+ {
1418
+ struct drm_dp_mst_topology_ref_entry * entry = NULL ;
1419
+ depot_stack_handle_t backtrace ;
1420
+ ulong stack_entries [STACK_DEPTH ];
1421
+ uint n ;
1422
+ int i ;
1423
+
1424
+ n = stack_trace_save (stack_entries , ARRAY_SIZE (stack_entries ), 1 );
1425
+ backtrace = stack_depot_save (stack_entries , n , GFP_KERNEL );
1426
+ if (!backtrace )
1427
+ return ;
1428
+
1429
+ /* Try to find an existing entry for this backtrace */
1430
+ for (i = 0 ; i < history -> len ; i ++ ) {
1431
+ if (history -> entries [i ].backtrace == backtrace ) {
1432
+ entry = & history -> entries [i ];
1433
+ break ;
1434
+ }
1435
+ }
1436
+
1437
+ /* Otherwise add one */
1438
+ if (!entry ) {
1439
+ struct drm_dp_mst_topology_ref_entry * new ;
1440
+ int new_len = history -> len + 1 ;
1441
+
1442
+ new = krealloc (history -> entries , sizeof (* new ) * new_len ,
1443
+ GFP_KERNEL );
1444
+ if (!new )
1445
+ return ;
1446
+
1447
+ entry = & new [history -> len ];
1448
+ history -> len = new_len ;
1449
+ history -> entries = new ;
1450
+
1451
+ entry -> backtrace = backtrace ;
1452
+ entry -> type = type ;
1453
+ entry -> count = 0 ;
1454
+ }
1455
+ entry -> count ++ ;
1456
+ entry -> ts_nsec = ktime_get_ns ();
1457
+ }
1458
+
1459
+ static int
1460
+ topology_ref_history_cmp (const void * a , const void * b )
1461
+ {
1462
+ const struct drm_dp_mst_topology_ref_entry * entry_a = a , * entry_b = b ;
1463
+
1464
+ if (entry_a -> ts_nsec > entry_b -> ts_nsec )
1465
+ return 1 ;
1466
+ else if (entry_a -> ts_nsec < entry_b -> ts_nsec )
1467
+ return -1 ;
1468
+ else
1469
+ return 0 ;
1470
+ }
1471
+
1472
+ static inline const char *
1473
+ topology_ref_type_to_str (enum drm_dp_mst_topology_ref_type type )
1474
+ {
1475
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET )
1476
+ return "get" ;
1477
+ else
1478
+ return "put" ;
1479
+ }
1480
+
1481
+ static void
1482
+ __dump_topology_ref_history (struct drm_dp_mst_topology_ref_history * history ,
1483
+ void * ptr , const char * type_str )
1484
+ {
1485
+ struct drm_printer p = drm_debug_printer (DBG_PREFIX );
1486
+ char * buf = kzalloc (PAGE_SIZE , GFP_KERNEL );
1487
+ int i ;
1488
+
1489
+ if (!buf )
1490
+ return ;
1491
+
1492
+ if (!history -> len )
1493
+ goto out ;
1494
+
1495
+ /* First, sort the list so that it goes from oldest to newest
1496
+ * reference entry
1497
+ */
1498
+ sort (history -> entries , history -> len , sizeof (* history -> entries ),
1499
+ topology_ref_history_cmp , NULL );
1500
+
1501
+ drm_printf (& p , "%s (%p) topology count reached 0, dumping history:\n" ,
1502
+ type_str , ptr );
1503
+
1504
+ for (i = 0 ; i < history -> len ; i ++ ) {
1505
+ const struct drm_dp_mst_topology_ref_entry * entry =
1506
+ & history -> entries [i ];
1507
+ ulong * entries ;
1508
+ uint nr_entries ;
1509
+ u64 ts_nsec = entry -> ts_nsec ;
1510
+ u64 rem_nsec = do_div (ts_nsec , 1000000000 );
1511
+
1512
+ nr_entries = stack_depot_fetch (entry -> backtrace , & entries );
1513
+ stack_trace_snprint (buf , PAGE_SIZE , entries , nr_entries , 4 );
1514
+
1515
+ drm_printf (& p , " %d %ss (last at %5llu.%06llu):\n%s" ,
1516
+ entry -> count ,
1517
+ topology_ref_type_to_str (entry -> type ),
1518
+ ts_nsec , rem_nsec / 1000 , buf );
1519
+ }
1520
+
1521
+ /* Now free the history, since this is the only time we expose it */
1522
+ kfree (history -> entries );
1523
+ out :
1524
+ kfree (buf );
1525
+ }
1526
+
1527
+ static __always_inline void
1528
+ drm_dp_mst_dump_mstb_topology_history (struct drm_dp_mst_branch * mstb )
1529
+ {
1530
+ __dump_topology_ref_history (& mstb -> topology_ref_history , mstb ,
1531
+ "MSTB" );
1532
+ }
1533
+
1534
+ static __always_inline void
1535
+ drm_dp_mst_dump_port_topology_history (struct drm_dp_mst_port * port )
1536
+ {
1537
+ __dump_topology_ref_history (& port -> topology_ref_history , port ,
1538
+ "Port" );
1539
+ }
1540
+
1541
+ static __always_inline void
1542
+ save_mstb_topology_ref (struct drm_dp_mst_branch * mstb ,
1543
+ enum drm_dp_mst_topology_ref_type type )
1544
+ {
1545
+ __topology_ref_save (mstb -> mgr , & mstb -> topology_ref_history , type );
1546
+ }
1547
+
1548
+ static __always_inline void
1549
+ save_port_topology_ref (struct drm_dp_mst_port * port ,
1550
+ enum drm_dp_mst_topology_ref_type type )
1551
+ {
1552
+ __topology_ref_save (port -> mgr , & port -> topology_ref_history , type );
1553
+ }
1554
+
1555
+ static inline void
1556
+ topology_ref_history_lock (struct drm_dp_mst_topology_mgr * mgr )
1557
+ {
1558
+ mutex_lock (& mgr -> topology_ref_history_lock );
1559
+ }
1560
+
1561
+ static inline void
1562
+ topology_ref_history_unlock (struct drm_dp_mst_topology_mgr * mgr )
1563
+ {
1564
+ mutex_unlock (& mgr -> topology_ref_history_lock );
1565
+ }
1566
+ #else
1567
+ static inline void
1568
+ topology_ref_history_lock (struct drm_dp_mst_topology_mgr * mgr ) {}
1569
+ static inline void
1570
+ topology_ref_history_unlock (struct drm_dp_mst_topology_mgr * mgr ) {}
1571
+ static inline void
1572
+ drm_dp_mst_dump_mstb_topology_history (struct drm_dp_mst_branch * mstb ) {}
1573
+ static inline void
1574
+ drm_dp_mst_dump_port_topology_history (struct drm_dp_mst_port * port ) {}
1575
+ #define save_mstb_topology_ref (mstb , type )
1576
+ #define save_port_topology_ref (port , type )
1577
+ #endif
1578
+
1402
1579
static void drm_dp_destroy_mst_branch_device (struct kref * kref )
1403
1580
{
1404
1581
struct drm_dp_mst_branch * mstb =
1405
1582
container_of (kref , struct drm_dp_mst_branch , topology_kref );
1406
1583
struct drm_dp_mst_topology_mgr * mgr = mstb -> mgr ;
1407
1584
1585
+ drm_dp_mst_dump_mstb_topology_history (mstb );
1586
+
1408
1587
INIT_LIST_HEAD (& mstb -> destroy_next );
1409
1588
1410
1589
/*
@@ -1442,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1442
1621
static int __must_check
1443
1622
drm_dp_mst_topology_try_get_mstb (struct drm_dp_mst_branch * mstb )
1444
1623
{
1445
- int ret = kref_get_unless_zero ( & mstb -> topology_kref ) ;
1624
+ int ret ;
1446
1625
1447
- if (ret )
1448
- DRM_DEBUG ("mstb %p (%d)\n" , mstb ,
1449
- kref_read (& mstb -> topology_kref ));
1626
+ topology_ref_history_lock (mstb -> mgr );
1627
+ ret = kref_get_unless_zero (& mstb -> topology_kref );
1628
+ if (ret ) {
1629
+ DRM_DEBUG ("mstb %p (%d)\n" ,
1630
+ mstb , kref_read (& mstb -> topology_kref ));
1631
+ save_mstb_topology_ref (mstb , DRM_DP_MST_TOPOLOGY_REF_GET );
1632
+ }
1633
+
1634
+ topology_ref_history_unlock (mstb -> mgr );
1450
1635
1451
1636
return ret ;
1452
1637
}
@@ -1467,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1467
1652
*/
1468
1653
static void drm_dp_mst_topology_get_mstb (struct drm_dp_mst_branch * mstb )
1469
1654
{
1655
+ topology_ref_history_lock (mstb -> mgr );
1656
+
1657
+ save_mstb_topology_ref (mstb , DRM_DP_MST_TOPOLOGY_REF_GET );
1470
1658
WARN_ON (kref_read (& mstb -> topology_kref ) == 0 );
1471
1659
kref_get (& mstb -> topology_kref );
1472
1660
DRM_DEBUG ("mstb %p (%d)\n" , mstb , kref_read (& mstb -> topology_kref ));
1661
+
1662
+ topology_ref_history_unlock (mstb -> mgr );
1473
1663
}
1474
1664
1475
1665
/**
@@ -1487,8 +1677,13 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1487
1677
static void
1488
1678
drm_dp_mst_topology_put_mstb (struct drm_dp_mst_branch * mstb )
1489
1679
{
1680
+ topology_ref_history_lock (mstb -> mgr );
1681
+
1490
1682
DRM_DEBUG ("mstb %p (%d)\n" ,
1491
1683
mstb , kref_read (& mstb -> topology_kref ) - 1 );
1684
+ save_mstb_topology_ref (mstb , DRM_DP_MST_TOPOLOGY_REF_PUT );
1685
+
1686
+ topology_ref_history_unlock (mstb -> mgr );
1492
1687
kref_put (& mstb -> topology_kref , drm_dp_destroy_mst_branch_device );
1493
1688
}
1494
1689
@@ -1498,6 +1693,8 @@ static void drm_dp_destroy_port(struct kref *kref)
1498
1693
container_of (kref , struct drm_dp_mst_port , topology_kref );
1499
1694
struct drm_dp_mst_topology_mgr * mgr = port -> mgr ;
1500
1695
1696
+ drm_dp_mst_dump_port_topology_history (port );
1697
+
1501
1698
/* There's nothing that needs locking to destroy an input port yet */
1502
1699
if (port -> input ) {
1503
1700
drm_dp_mst_put_port_malloc (port );
@@ -1541,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref)
1541
1738
static int __must_check
1542
1739
drm_dp_mst_topology_try_get_port (struct drm_dp_mst_port * port )
1543
1740
{
1544
- int ret = kref_get_unless_zero ( & port -> topology_kref ) ;
1741
+ int ret ;
1545
1742
1546
- if (ret )
1547
- DRM_DEBUG ("port %p (%d)\n" , port ,
1548
- kref_read (& port -> topology_kref ));
1743
+ topology_ref_history_lock (port -> mgr );
1744
+ ret = kref_get_unless_zero (& port -> topology_kref );
1745
+ if (ret ) {
1746
+ DRM_DEBUG ("port %p (%d)\n" ,
1747
+ port , kref_read (& port -> topology_kref ));
1748
+ save_port_topology_ref (port , DRM_DP_MST_TOPOLOGY_REF_GET );
1749
+ }
1549
1750
1751
+ topology_ref_history_unlock (port -> mgr );
1550
1752
return ret ;
1551
1753
}
1552
1754
@@ -1565,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1565
1767
*/
1566
1768
static void drm_dp_mst_topology_get_port (struct drm_dp_mst_port * port )
1567
1769
{
1770
+ topology_ref_history_lock (port -> mgr );
1771
+
1568
1772
WARN_ON (kref_read (& port -> topology_kref ) == 0 );
1569
1773
kref_get (& port -> topology_kref );
1570
1774
DRM_DEBUG ("port %p (%d)\n" , port , kref_read (& port -> topology_kref ));
1775
+ save_port_topology_ref (port , DRM_DP_MST_TOPOLOGY_REF_GET );
1776
+
1777
+ topology_ref_history_unlock (port -> mgr );
1571
1778
}
1572
1779
1573
1780
/**
@@ -1583,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1583
1790
*/
1584
1791
static void drm_dp_mst_topology_put_port (struct drm_dp_mst_port * port )
1585
1792
{
1793
+ topology_ref_history_lock (port -> mgr );
1794
+
1586
1795
DRM_DEBUG ("port %p (%d)\n" ,
1587
1796
port , kref_read (& port -> topology_kref ) - 1 );
1797
+ save_port_topology_ref (port , DRM_DP_MST_TOPOLOGY_REF_PUT );
1798
+
1799
+ topology_ref_history_unlock (port -> mgr );
1588
1800
kref_put (& port -> topology_kref , drm_dp_destroy_port );
1589
1801
}
1590
1802
@@ -4578,6 +4790,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4578
4790
mutex_init (& mgr -> delayed_destroy_lock );
4579
4791
mutex_init (& mgr -> up_req_lock );
4580
4792
mutex_init (& mgr -> probe_lock );
4793
+ #if IS_ENABLED (CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS )
4794
+ mutex_init (& mgr -> topology_ref_history_lock );
4795
+ #endif
4581
4796
INIT_LIST_HEAD (& mgr -> tx_msg_downq );
4582
4797
INIT_LIST_HEAD (& mgr -> destroy_port_list );
4583
4798
INIT_LIST_HEAD (& mgr -> destroy_branch_device_list );
@@ -4644,6 +4859,9 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4644
4859
mutex_destroy (& mgr -> lock );
4645
4860
mutex_destroy (& mgr -> up_req_lock );
4646
4861
mutex_destroy (& mgr -> probe_lock );
4862
+ #if IS_ENABLED (CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS )
4863
+ mutex_destroy (& mgr -> topology_ref_history_lock );
4864
+ #endif
4647
4865
}
4648
4866
EXPORT_SYMBOL (drm_dp_mst_topology_mgr_destroy );
4649
4867
0 commit comments