@@ -97,7 +97,8 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
97
97
return restart_syscall ();
98
98
99
99
if (dev_isalive (netdev )) {
100
- if ((ret = (* set )(netdev , new )) == 0 )
100
+ ret = (* set )(netdev , new );
101
+ if (ret == 0 )
101
102
ret = len ;
102
103
}
103
104
rtnl_unlock ();
@@ -160,6 +161,7 @@ static ssize_t broadcast_show(struct device *dev,
160
161
struct device_attribute * attr , char * buf )
161
162
{
162
163
struct net_device * ndev = to_net_dev (dev );
164
+
163
165
if (dev_isalive (ndev ))
164
166
return sysfs_format_mac (buf , ndev -> broadcast , ndev -> addr_len );
165
167
return - EINVAL ;
@@ -170,7 +172,7 @@ static int change_carrier(struct net_device *dev, unsigned long new_carrier)
170
172
{
171
173
if (!netif_running (dev ))
172
174
return - EINVAL ;
173
- return dev_change_carrier (dev , (bool ) new_carrier );
175
+ return dev_change_carrier (dev , (bool )new_carrier );
174
176
}
175
177
176
178
static ssize_t carrier_store (struct device * dev , struct device_attribute * attr ,
@@ -183,9 +185,10 @@ static ssize_t carrier_show(struct device *dev,
183
185
struct device_attribute * attr , char * buf )
184
186
{
185
187
struct net_device * netdev = to_net_dev (dev );
186
- if (netif_running (netdev )) {
188
+
189
+ if (netif_running (netdev ))
187
190
return sprintf (buf , fmt_dec , !!netif_carrier_ok (netdev ));
188
- }
191
+
189
192
return - EINVAL ;
190
193
}
191
194
static DEVICE_ATTR_RW (carrier );
@@ -290,6 +293,7 @@ static ssize_t carrier_changes_show(struct device *dev,
290
293
char * buf )
291
294
{
292
295
struct net_device * netdev = to_net_dev (dev );
296
+
293
297
return sprintf (buf , fmt_dec ,
294
298
atomic_read (& netdev -> carrier_changes ));
295
299
}
@@ -299,7 +303,7 @@ static DEVICE_ATTR_RO(carrier_changes);
299
303
300
304
static int change_mtu (struct net_device * dev , unsigned long new_mtu )
301
305
{
302
- return dev_set_mtu (dev , (int ) new_mtu );
306
+ return dev_set_mtu (dev , (int )new_mtu );
303
307
}
304
308
305
309
static ssize_t mtu_store (struct device * dev , struct device_attribute * attr ,
@@ -311,7 +315,7 @@ NETDEVICE_SHOW_RW(mtu, fmt_dec);
311
315
312
316
static int change_flags (struct net_device * dev , unsigned long new_flags )
313
317
{
314
- return dev_change_flags (dev , (unsigned int ) new_flags );
318
+ return dev_change_flags (dev , (unsigned int )new_flags );
315
319
}
316
320
317
321
static ssize_t flags_store (struct device * dev , struct device_attribute * attr ,
@@ -362,8 +366,8 @@ static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
362
366
}
363
367
364
368
static ssize_t gro_flush_timeout_store (struct device * dev ,
365
- struct device_attribute * attr ,
366
- const char * buf , size_t len )
369
+ struct device_attribute * attr ,
370
+ const char * buf , size_t len )
367
371
{
368
372
if (!capable (CAP_NET_ADMIN ))
369
373
return - EPERM ;
@@ -412,7 +416,7 @@ static DEVICE_ATTR_RW(ifalias);
412
416
413
417
static int change_group (struct net_device * dev , unsigned long new_group )
414
418
{
415
- dev_set_group (dev , (int ) new_group );
419
+ dev_set_group (dev , (int )new_group );
416
420
return 0 ;
417
421
}
418
422
@@ -426,7 +430,7 @@ static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
426
430
427
431
static int change_proto_down (struct net_device * dev , unsigned long proto_down )
428
432
{
429
- return dev_change_proto_down (dev , (bool ) proto_down );
433
+ return dev_change_proto_down (dev , (bool )proto_down );
430
434
}
431
435
432
436
static ssize_t proto_down_store (struct device * dev ,
@@ -549,14 +553,14 @@ static ssize_t netstat_show(const struct device *d,
549
553
ssize_t ret = - EINVAL ;
550
554
551
555
WARN_ON (offset > sizeof (struct rtnl_link_stats64 ) ||
552
- offset % sizeof (u64 ) != 0 );
556
+ offset % sizeof (u64 ) != 0 );
553
557
554
558
read_lock (& dev_base_lock );
555
559
if (dev_isalive (dev )) {
556
560
struct rtnl_link_stats64 temp ;
557
561
const struct rtnl_link_stats64 * stats = dev_get_stats (dev , & temp );
558
562
559
- ret = sprintf (buf , fmt_u64 , * (u64 * )(((u8 * ) stats ) + offset ));
563
+ ret = sprintf (buf , fmt_u64 , * (u64 * )(((u8 * )stats ) + offset ));
560
564
}
561
565
read_unlock (& dev_base_lock );
562
566
return ret ;
@@ -565,7 +569,7 @@ static ssize_t netstat_show(const struct device *d,
565
569
/* generate a read-only statistics attribute */
566
570
#define NETSTAT_ENTRY (name ) \
567
571
static ssize_t name##_show(struct device *d, \
568
- struct device_attribute *attr, char *buf) \
572
+ struct device_attribute *attr, char *buf) \
569
573
{ \
570
574
return netstat_show(d, attr, buf, \
571
575
offsetof(struct rtnl_link_stats64, name)); \
@@ -625,7 +629,6 @@ static struct attribute *netstat_attrs[] __ro_after_init = {
625
629
NULL
626
630
};
627
631
628
-
629
632
static const struct attribute_group netstat_group = {
630
633
.name = "statistics" ,
631
634
.attrs = netstat_attrs ,
@@ -647,8 +650,8 @@ static const struct attribute_group wireless_group = {
647
650
#endif /* CONFIG_SYSFS */
648
651
649
652
#ifdef CONFIG_SYSFS
650
- #define to_rx_queue_attr (_attr ) container_of(_attr, \
651
- struct rx_queue_attribute, attr)
653
+ #define to_rx_queue_attr (_attr ) \
654
+ container_of(_attr, struct rx_queue_attribute, attr)
652
655
653
656
#define to_rx_queue (obj ) container_of(obj, struct netdev_rx_queue, kobj)
654
657
@@ -725,8 +728,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
725
728
}
726
729
727
730
map = kzalloc (max_t (unsigned int ,
728
- RPS_MAP_SIZE (cpumask_weight (mask )), L1_CACHE_BYTES ),
729
- GFP_KERNEL );
731
+ RPS_MAP_SIZE (cpumask_weight (mask )), L1_CACHE_BYTES ),
732
+ GFP_KERNEL );
730
733
if (!map ) {
731
734
free_cpumask_var (mask );
732
735
return - ENOMEM ;
@@ -736,9 +739,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
736
739
for_each_cpu_and (cpu , mask , cpu_online_mask )
737
740
map -> cpus [i ++ ] = cpu ;
738
741
739
- if (i )
742
+ if (i ) {
740
743
map -> len = i ;
741
- else {
744
+ } else {
742
745
kfree (map );
743
746
map = NULL ;
744
747
}
@@ -827,8 +830,9 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
827
830
table -> mask = mask ;
828
831
for (count = 0 ; count <= mask ; count ++ )
829
832
table -> flows [count ].cpu = RPS_NO_CPU ;
830
- } else
833
+ } else {
831
834
table = NULL ;
835
+ }
832
836
833
837
spin_lock (& rps_dev_flow_lock );
834
838
old_table = rcu_dereference_protected (queue -> rps_flow_table ,
@@ -865,7 +869,6 @@ static void rx_queue_release(struct kobject *kobj)
865
869
struct rps_map * map ;
866
870
struct rps_dev_flow_table * flow_table ;
867
871
868
-
869
872
map = rcu_dereference_protected (queue -> rps_map , 1 );
870
873
if (map ) {
871
874
RCU_INIT_POINTER (queue -> rps_map , NULL );
@@ -910,7 +913,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
910
913
911
914
kobj -> kset = dev -> queues_kset ;
912
915
error = kobject_init_and_add (kobj , & rx_queue_ktype , NULL ,
913
- "rx-%u" , index );
916
+ "rx-%u" , index );
914
917
if (error )
915
918
return error ;
916
919
@@ -974,8 +977,8 @@ struct netdev_queue_attribute {
974
977
ssize_t (* store )(struct netdev_queue * queue ,
975
978
const char * buf , size_t len );
976
979
};
977
- #define to_netdev_queue_attr (_attr ) container_of(_attr, \
978
- struct netdev_queue_attribute, attr)
980
+ #define to_netdev_queue_attr (_attr ) \
981
+ container_of(_attr, struct netdev_queue_attribute, attr)
979
982
980
983
#define to_netdev_queue (obj ) container_of(obj, struct netdev_queue, kobj)
981
984
@@ -1104,9 +1107,9 @@ static ssize_t bql_set(const char *buf, const size_t count,
1104
1107
unsigned int value ;
1105
1108
int err ;
1106
1109
1107
- if (!strcmp (buf , "max" ) || !strcmp (buf , "max\n" ))
1110
+ if (!strcmp (buf , "max" ) || !strcmp (buf , "max\n" )) {
1108
1111
value = DQL_MAX_LIMIT ;
1109
- else {
1112
+ } else {
1110
1113
err = kstrtouint (buf , 10 , & value );
1111
1114
if (err < 0 )
1112
1115
return err ;
@@ -1320,7 +1323,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
1320
1323
1321
1324
kobj -> kset = dev -> queues_kset ;
1322
1325
error = kobject_init_and_add (kobj , & netdev_queue_ktype , NULL ,
1323
- "tx-%u" , index );
1326
+ "tx-%u" , index );
1324
1327
if (error )
1325
1328
return error ;
1326
1329
@@ -1377,7 +1380,7 @@ static int register_queue_kobjects(struct net_device *dev)
1377
1380
1378
1381
#ifdef CONFIG_SYSFS
1379
1382
dev -> queues_kset = kset_create_and_add ("queues" ,
1380
- NULL , & dev -> dev .kobj );
1383
+ NULL , & dev -> dev .kobj );
1381
1384
if (!dev -> queues_kset )
1382
1385
return - ENOMEM ;
1383
1386
real_rx = dev -> real_num_rx_queues ;
@@ -1467,7 +1470,8 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1467
1470
1468
1471
/* pass ifindex to uevent.
1469
1472
* ifindex is useful as it won't change (interface name may change)
1470
- * and is what RtNetlink uses natively. */
1473
+ * and is what RtNetlink uses natively.
1474
+ */
1471
1475
retval = add_uevent_var (env , "IFINDEX=%d" , dev -> ifindex );
1472
1476
1473
1477
exit :
@@ -1542,7 +1546,7 @@ EXPORT_SYMBOL(of_find_net_device_by_node);
1542
1546
*/
1543
1547
void netdev_unregister_kobject (struct net_device * ndev )
1544
1548
{
1545
- struct device * dev = & ( ndev -> dev ) ;
1549
+ struct device * dev = & ndev -> dev ;
1546
1550
1547
1551
if (!atomic_read (& dev_net (ndev )-> count ))
1548
1552
dev_set_uevent_suppress (dev , 1 );
@@ -1559,7 +1563,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
1559
1563
/* Create sysfs entries for network device. */
1560
1564
int netdev_register_kobject (struct net_device * ndev )
1561
1565
{
1562
- struct device * dev = & ( ndev -> dev ) ;
1566
+ struct device * dev = & ndev -> dev ;
1563
1567
const struct attribute_group * * groups = ndev -> sysfs_groups ;
1564
1568
int error = 0 ;
1565
1569
0 commit comments