22
22
#include <asm/checksum.h>
23
23
24
24
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
25
- static inline
25
+ static __always_inline
26
26
__wsum csum_and_copy_from_user (const void __user * src , void * dst ,
27
27
int len )
28
28
{
@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
33
33
#endif
34
34
35
35
#ifndef HAVE_CSUM_COPY_USER
36
- static __inline__ __wsum csum_and_copy_to_user
36
+ static __always_inline __wsum csum_and_copy_to_user
37
37
(const void * src , void __user * dst , int len )
38
38
{
39
39
__wsum sum = csum_partial (src , len , ~0U );
@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user
45
45
#endif
46
46
47
47
#ifndef _HAVE_ARCH_CSUM_AND_COPY
48
- static inline __wsum
48
+ static __always_inline __wsum
49
49
csum_partial_copy_nocheck (const void * src , void * dst , int len )
50
50
{
51
51
memcpy (dst , src , len );
@@ -54,76 +54,77 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len)
54
54
#endif
55
55
56
56
#ifndef HAVE_ARCH_CSUM_ADD
57
- static inline __wsum csum_add (__wsum csum , __wsum addend )
57
+ static __always_inline __wsum csum_add (__wsum csum , __wsum addend )
58
58
{
59
59
u32 res = (__force u32 )csum ;
60
60
res += (__force u32 )addend ;
61
61
return (__force __wsum )(res + (res < (__force u32 )addend ));
62
62
}
63
63
#endif
64
64
65
- static inline __wsum csum_sub (__wsum csum , __wsum addend )
65
+ static __always_inline __wsum csum_sub (__wsum csum , __wsum addend )
66
66
{
67
67
return csum_add (csum , ~addend );
68
68
}
69
69
70
- static inline __sum16 csum16_add (__sum16 csum , __be16 addend )
70
+ static __always_inline __sum16 csum16_add (__sum16 csum , __be16 addend )
71
71
{
72
72
u16 res = (__force u16 )csum ;
73
73
74
74
res += (__force u16 )addend ;
75
75
return (__force __sum16 )(res + (res < (__force u16 )addend ));
76
76
}
77
77
78
- static inline __sum16 csum16_sub (__sum16 csum , __be16 addend )
78
+ static __always_inline __sum16 csum16_sub (__sum16 csum , __be16 addend )
79
79
{
80
80
return csum16_add (csum , ~addend );
81
81
}
82
82
83
- static inline __wsum csum_shift (__wsum sum , int offset )
83
+ static __always_inline __wsum csum_shift (__wsum sum , int offset )
84
84
{
85
85
/* rotate sum to align it with a 16b boundary */
86
86
if (offset & 1 )
87
87
return (__force __wsum )ror32 ((__force u32 )sum , 8 );
88
88
return sum ;
89
89
}
90
90
91
- static inline __wsum
91
+ static __always_inline __wsum
92
92
csum_block_add (__wsum csum , __wsum csum2 , int offset )
93
93
{
94
94
return csum_add (csum , csum_shift (csum2 , offset ));
95
95
}
96
96
97
- static inline __wsum
97
+ static __always_inline __wsum
98
98
csum_block_add_ext (__wsum csum , __wsum csum2 , int offset , int len )
99
99
{
100
100
return csum_block_add (csum , csum2 , offset );
101
101
}
102
102
103
- static inline __wsum
103
+ static __always_inline __wsum
104
104
csum_block_sub (__wsum csum , __wsum csum2 , int offset )
105
105
{
106
106
return csum_block_add (csum , ~csum2 , offset );
107
107
}
108
108
109
- static inline __wsum csum_unfold (__sum16 n )
109
+ static __always_inline __wsum csum_unfold (__sum16 n )
110
110
{
111
111
return (__force __wsum )n ;
112
112
}
113
113
114
- static inline __wsum csum_partial_ext (const void * buff , int len , __wsum sum )
114
+ static __always_inline
115
+ __wsum csum_partial_ext (const void * buff , int len , __wsum sum )
115
116
{
116
117
return csum_partial (buff , len , sum );
117
118
}
118
119
119
120
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
120
121
121
- static inline void csum_replace_by_diff (__sum16 * sum , __wsum diff )
122
+ static __always_inline void csum_replace_by_diff (__sum16 * sum , __wsum diff )
122
123
{
123
124
* sum = csum_fold (csum_add (diff , ~csum_unfold (* sum )));
124
125
}
125
126
126
- static inline void csum_replace4 (__sum16 * sum , __be32 from , __be32 to )
127
+ static __always_inline void csum_replace4 (__sum16 * sum , __be32 from , __be32 to )
127
128
{
128
129
__wsum tmp = csum_sub (~csum_unfold (* sum ), (__force __wsum )from );
129
130
@@ -136,7 +137,7 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
136
137
* m : old value of a 16bit field
137
138
* m' : new value of a 16bit field
138
139
*/
139
- static inline void csum_replace2 (__sum16 * sum , __be16 old , __be16 new )
140
+ static __always_inline void csum_replace2 (__sum16 * sum , __be16 old , __be16 new )
140
141
{
141
142
* sum = ~csum16_add (csum16_sub (~(* sum ), old ), new );
142
143
}
@@ -150,16 +151,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
150
151
void inet_proto_csum_replace_by_diff (__sum16 * sum , struct sk_buff * skb ,
151
152
__wsum diff , bool pseudohdr );
152
153
153
- static inline void inet_proto_csum_replace2 ( __sum16 * sum , struct sk_buff * skb ,
154
- __be16 from , __be16 to ,
155
- bool pseudohdr )
154
+ static __always_inline
155
+ void inet_proto_csum_replace2 ( __sum16 * sum , struct sk_buff * skb ,
156
+ __be16 from , __be16 to , bool pseudohdr )
156
157
{
157
158
inet_proto_csum_replace4 (sum , skb , (__force __be32 )from ,
158
159
(__force __be32 )to , pseudohdr );
159
160
}
160
161
161
- static inline __wsum remcsum_adjust (void * ptr , __wsum csum ,
162
- int start , int offset )
162
+ static __always_inline __wsum remcsum_adjust (void * ptr , __wsum csum ,
163
+ int start , int offset )
163
164
{
164
165
__sum16 * psum = (__sum16 * )(ptr + offset );
165
166
__wsum delta ;
@@ -175,12 +176,12 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
175
176
return delta ;
176
177
}
177
178
178
- static inline void remcsum_unadjust (__sum16 * psum , __wsum delta )
179
+ static __always_inline void remcsum_unadjust (__sum16 * psum , __wsum delta )
179
180
{
180
181
* psum = csum_fold (csum_sub (delta , (__force __wsum )* psum ));
181
182
}
182
183
183
- static inline __wsum wsum_negate (__wsum val )
184
+ static __always_inline __wsum wsum_negate (__wsum val )
184
185
{
185
186
return (__force __wsum )- ((__force u32 )val );
186
187
}
0 commit comments