@@ -16,6 +16,219 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
16
16
17
17
DEFINE_XARRAY_FLAGS (devlinks , XA_FLAGS_ALLOC );
18
18
19
+ static struct devlink * devlinks_xa_get (unsigned long index )
20
+ {
21
+ struct devlink * devlink ;
22
+
23
+ rcu_read_lock ();
24
+ devlink = xa_find (& devlinks , & index , index , DEVLINK_REGISTERED );
25
+ if (!devlink || !devlink_try_get (devlink ))
26
+ devlink = NULL ;
27
+ rcu_read_unlock ();
28
+ return devlink ;
29
+ }
30
+
31
+ /* devlink_rels xarray contains 1:1 relationships between
32
+ * devlink object and related nested devlink instance.
33
+ * The xarray index is used to get the nested object from
34
+ * the nested-in object code.
35
+ */
36
+ static DEFINE_XARRAY_FLAGS (devlink_rels , XA_FLAGS_ALLOC1 ) ;
37
+
38
+ #define DEVLINK_REL_IN_USE XA_MARK_0
39
+
40
+ struct devlink_rel {
41
+ u32 index ;
42
+ refcount_t refcount ;
43
+ u32 devlink_index ;
44
+ struct {
45
+ u32 devlink_index ;
46
+ u32 obj_index ;
47
+ devlink_rel_notify_cb_t * notify_cb ;
48
+ devlink_rel_cleanup_cb_t * cleanup_cb ;
49
+ struct work_struct notify_work ;
50
+ } nested_in ;
51
+ };
52
+
53
+ static void devlink_rel_free (struct devlink_rel * rel )
54
+ {
55
+ xa_erase (& devlink_rels , rel -> index );
56
+ kfree (rel );
57
+ }
58
+
59
+ static void __devlink_rel_get (struct devlink_rel * rel )
60
+ {
61
+ refcount_inc (& rel -> refcount );
62
+ }
63
+
64
+ static void __devlink_rel_put (struct devlink_rel * rel )
65
+ {
66
+ if (refcount_dec_and_test (& rel -> refcount ))
67
+ devlink_rel_free (rel );
68
+ }
69
+
70
+ static void devlink_rel_nested_in_notify_work (struct work_struct * work )
71
+ {
72
+ struct devlink_rel * rel = container_of (work , struct devlink_rel ,
73
+ nested_in .notify_work );
74
+ struct devlink * devlink ;
75
+
76
+ devlink = devlinks_xa_get (rel -> nested_in .devlink_index );
77
+ if (!devlink )
78
+ goto rel_put ;
79
+ if (!devl_trylock (devlink )) {
80
+ devlink_put (devlink );
81
+ goto reschedule_work ;
82
+ }
83
+ if (!devl_is_registered (devlink )) {
84
+ devl_unlock (devlink );
85
+ devlink_put (devlink );
86
+ goto rel_put ;
87
+ }
88
+ if (!xa_get_mark (& devlink_rels , rel -> index , DEVLINK_REL_IN_USE ))
89
+ rel -> nested_in .cleanup_cb (devlink , rel -> nested_in .obj_index , rel -> index );
90
+ rel -> nested_in .notify_cb (devlink , rel -> nested_in .obj_index );
91
+ devl_unlock (devlink );
92
+ devlink_put (devlink );
93
+
94
+ rel_put :
95
+ __devlink_rel_put (rel );
96
+ return ;
97
+
98
+ reschedule_work :
99
+ schedule_work (& rel -> nested_in .notify_work );
100
+ }
101
+
102
+ static void devlink_rel_nested_in_notify_work_schedule (struct devlink_rel * rel )
103
+ {
104
+ __devlink_rel_get (rel );
105
+ schedule_work (& rel -> nested_in .notify_work );
106
+ }
107
+
108
+ static struct devlink_rel * devlink_rel_alloc (void )
109
+ {
110
+ struct devlink_rel * rel ;
111
+ static u32 next ;
112
+ int err ;
113
+
114
+ rel = kzalloc (sizeof (* rel ), GFP_KERNEL );
115
+ if (!rel )
116
+ return ERR_PTR (- ENOMEM );
117
+
118
+ err = xa_alloc_cyclic (& devlink_rels , & rel -> index , rel ,
119
+ xa_limit_32b , & next , GFP_KERNEL );
120
+ if (err ) {
121
+ kfree (rel );
122
+ return ERR_PTR (err );
123
+ }
124
+
125
+ refcount_set (& rel -> refcount , 1 );
126
+ INIT_WORK (& rel -> nested_in .notify_work ,
127
+ & devlink_rel_nested_in_notify_work );
128
+ return rel ;
129
+ }
130
+
131
+ static void devlink_rel_put (struct devlink * devlink )
132
+ {
133
+ struct devlink_rel * rel = devlink -> rel ;
134
+
135
+ if (!rel )
136
+ return ;
137
+ xa_clear_mark (& devlink_rels , rel -> index , DEVLINK_REL_IN_USE );
138
+ devlink_rel_nested_in_notify_work_schedule (rel );
139
+ __devlink_rel_put (rel );
140
+ devlink -> rel = NULL ;
141
+ }
142
+
143
+ void devlink_rel_nested_in_clear (u32 rel_index )
144
+ {
145
+ xa_clear_mark (& devlink_rels , rel_index , DEVLINK_REL_IN_USE );
146
+ }
147
+
148
+ int devlink_rel_nested_in_add (u32 * rel_index , u32 devlink_index ,
149
+ u32 obj_index , devlink_rel_notify_cb_t * notify_cb ,
150
+ devlink_rel_cleanup_cb_t * cleanup_cb ,
151
+ struct devlink * devlink )
152
+ {
153
+ struct devlink_rel * rel = devlink_rel_alloc ();
154
+
155
+ ASSERT_DEVLINK_NOT_REGISTERED (devlink );
156
+
157
+ if (IS_ERR (rel ))
158
+ return PTR_ERR (rel );
159
+
160
+ rel -> devlink_index = devlink -> index ;
161
+ rel -> nested_in .devlink_index = devlink_index ;
162
+ rel -> nested_in .obj_index = obj_index ;
163
+ rel -> nested_in .notify_cb = notify_cb ;
164
+ rel -> nested_in .cleanup_cb = cleanup_cb ;
165
+ * rel_index = rel -> index ;
166
+ xa_set_mark (& devlink_rels , rel -> index , DEVLINK_REL_IN_USE );
167
+ devlink -> rel = rel ;
168
+ return 0 ;
169
+ }
170
+
171
+ void devlink_rel_nested_in_notify (struct devlink * devlink )
172
+ {
173
+ struct devlink_rel * rel = devlink -> rel ;
174
+
175
+ if (!rel )
176
+ return ;
177
+ devlink_rel_nested_in_notify_work_schedule (rel );
178
+ }
179
+
180
+ static struct devlink_rel * devlink_rel_find (unsigned long rel_index )
181
+ {
182
+ return xa_find (& devlink_rels , & rel_index , rel_index ,
183
+ DEVLINK_REL_IN_USE );
184
+ }
185
+
186
+ static struct devlink * devlink_rel_devlink_get_lock (u32 rel_index )
187
+ {
188
+ struct devlink * devlink ;
189
+ struct devlink_rel * rel ;
190
+ u32 devlink_index ;
191
+
192
+ if (!rel_index )
193
+ return NULL ;
194
+ xa_lock (& devlink_rels );
195
+ rel = devlink_rel_find (rel_index );
196
+ if (rel )
197
+ devlink_index = rel -> devlink_index ;
198
+ xa_unlock (& devlink_rels );
199
+ if (!rel )
200
+ return NULL ;
201
+ devlink = devlinks_xa_get (devlink_index );
202
+ if (!devlink )
203
+ return NULL ;
204
+ devl_lock (devlink );
205
+ if (!devl_is_registered (devlink )) {
206
+ devl_unlock (devlink );
207
+ devlink_put (devlink );
208
+ return NULL ;
209
+ }
210
+ return devlink ;
211
+ }
212
+
213
+ int devlink_rel_devlink_handle_put (struct sk_buff * msg , struct devlink * devlink ,
214
+ u32 rel_index , int attrtype ,
215
+ bool * msg_updated )
216
+ {
217
+ struct net * net = devlink_net (devlink );
218
+ struct devlink * rel_devlink ;
219
+ int err ;
220
+
221
+ rel_devlink = devlink_rel_devlink_get_lock (rel_index );
222
+ if (!rel_devlink )
223
+ return 0 ;
224
+ err = devlink_nl_put_nested_handle (msg , net , rel_devlink , attrtype );
225
+ devl_unlock (rel_devlink );
226
+ devlink_put (rel_devlink );
227
+ if (!err && msg_updated )
228
+ * msg_updated = true;
229
+ return err ;
230
+ }
231
+
19
232
void * devlink_priv (struct devlink * devlink )
20
233
{
21
234
return & devlink -> priv ;
@@ -142,6 +355,7 @@ int devl_register(struct devlink *devlink)
142
355
143
356
xa_set_mark (& devlinks , devlink -> index , DEVLINK_REGISTERED );
144
357
devlink_notify_register (devlink );
358
+ devlink_rel_nested_in_notify (devlink );
145
359
146
360
return 0 ;
147
361
}
@@ -166,6 +380,7 @@ void devl_unregister(struct devlink *devlink)
166
380
167
381
devlink_notify_unregister (devlink );
168
382
xa_clear_mark (& devlinks , devlink -> index , DEVLINK_REGISTERED );
383
+ devlink_rel_put (devlink );
169
384
}
170
385
EXPORT_SYMBOL_GPL (devl_unregister );
171
386
0 commit comments