33
33
34
34
#include <ei.h>
35
35
36
- struct netif {
37
- // NETLINK_ROUTE socket information
38
- struct mnl_socket * nl ;
39
-
40
- // NETLINK_KOBJECT_UEVENT socket information
41
- struct mnl_socket * nl_uevent ;
42
-
43
- // Netlink buffering
44
- char nlbuf [8192 ]; // See MNL_SOCKET_BUFFER_SIZE
45
-
46
- // Erlang response processing
47
- char resp [8192 ];
48
- int resp_index ;
49
- };
50
-
51
- /**
52
- * @brief Synchronously send a response back to Erlang
53
- *
54
- * @param response what to send back
55
- */
56
- static void erlcmd_send (char * response , size_t len )
36
+ static void erlcmd_write_header_len (char * response , size_t len )
57
37
{
58
38
uint16_t be_len = htons (len - sizeof (uint16_t ));
59
39
memcpy (response , & be_len , sizeof (be_len ));
40
+ }
60
41
42
+ static void write_all (char * response , size_t len )
43
+ {
61
44
size_t wrote = 0 ;
62
45
do {
63
46
ssize_t amount_written = write (STDOUT_FILENO , response + wrote , len - wrote );
@@ -72,36 +55,23 @@ static void erlcmd_send(char *response, size_t len)
72
55
} while (wrote < len );
73
56
}
74
57
75
- static void netif_init ( struct netif * nb )
58
+ static struct mnl_socket * uevent_open ( )
76
59
{
77
- memset (nb , 0 , sizeof (* nb ));
78
- nb -> nl = mnl_socket_open (NETLINK_ROUTE );
79
- if (!nb -> nl )
80
- err (EXIT_FAILURE , "mnl_socket_open (NETLINK_ROUTE)" );
81
-
82
- if (mnl_socket_bind (nb -> nl , RTMGRP_LINK , MNL_SOCKET_AUTOPID ) < 0 )
83
- err (EXIT_FAILURE , "mnl_socket_bind" );
84
-
85
- nb -> nl_uevent = mnl_socket_open (NETLINK_KOBJECT_UEVENT );
86
- if (!nb -> nl_uevent )
60
+ struct mnl_socket * nl_uevent = mnl_socket_open2 (NETLINK_KOBJECT_UEVENT , O_NONBLOCK );
61
+ if (!nl_uevent )
87
62
err (EXIT_FAILURE , "mnl_socket_open (NETLINK_KOBJECT_UEVENT)" );
88
63
89
64
// There is one single group in kobject over netlink
90
- if (mnl_socket_bind (nb -> nl_uevent , (1 << 0 ), MNL_SOCKET_AUTOPID ) < 0 )
65
+ if (mnl_socket_bind (nl_uevent , (1 << 0 ), MNL_SOCKET_AUTOPID ) < 0 )
91
66
err (EXIT_FAILURE , "mnl_socket_bind" );
92
67
93
68
// Turn off ENOBUFS notifications since there's nothing that we can do
94
69
// about them.
95
70
unsigned int val = 1 ;
96
- if (mnl_socket_setsockopt (nb -> nl_uevent , NETLINK_NO_ENOBUFS , & val , sizeof (val )) < 0 )
71
+ if (mnl_socket_setsockopt (nl_uevent , NETLINK_NO_ENOBUFS , & val , sizeof (val )) < 0 )
97
72
err (EXIT_FAILURE , "mnl_socket_setsockopt(NETLINK_NO_ENOBUFS)" );
98
- }
99
73
100
- static void netif_cleanup (struct netif * nb )
101
- {
102
- mnl_socket_close (nb -> nl );
103
- mnl_socket_close (nb -> nl_uevent );
104
- nb -> nl = NULL ;
74
+ return nl_uevent ;
105
75
}
106
76
107
77
static void str_tolower (char * str )
@@ -156,18 +126,19 @@ static int ei_encode_devpath(char * buf, int *index, char *devpath, char **end_d
156
126
return ei_encode_empty_list (buf , index );
157
127
}
158
128
159
- static void nl_uevent_process (struct netif * nb )
129
+ static int nl_uevent_process_one (struct mnl_socket * nl_uevent , char * resp )
160
130
{
161
- int bytecount = mnl_socket_recvfrom (nb -> nl_uevent , nb -> nlbuf , sizeof (nb -> nlbuf ));
131
+ char nlbuf [8192 ]; // See MNL_SOCKET_BUFFER_SIZE
132
+ int bytecount = mnl_socket_recvfrom (nl_uevent , nlbuf , sizeof (nlbuf ));
162
133
if (bytecount <= 0 )
163
134
err (EXIT_FAILURE , "mnl_socket_recvfrom" );
164
135
165
- char * str = nb -> nlbuf ;
136
+ char * str = nlbuf ;
166
137
char * str_end = str + bytecount ;
167
138
168
139
debug ("uevent: %s" , str );
169
- nb -> resp_index = sizeof (uint16_t ); // Skip over payload size
170
- ei_encode_version (nb -> resp , & nb -> resp_index );
140
+ int resp_index = sizeof (uint16_t ); // Skip over payload size
141
+ ei_encode_version (resp , & resp_index );
171
142
172
143
// The uevent comes in with the form:
173
144
//
@@ -179,21 +150,21 @@ static void nl_uevent_process(struct netif *nb)
179
150
// The kv_map contains all of the kv pairs in the uevent except
180
151
// ACTION, DEVPATH, SEQNUM, SYNTH_UUID.
181
152
182
- ei_encode_tuple_header (nb -> resp , & nb -> resp_index , 3 );
153
+ ei_encode_tuple_header (resp , & resp_index , 3 );
183
154
184
155
char * atsign = strchr (str , '@' );
185
156
if (!atsign )
186
- return ;
157
+ return 0 ;
187
158
* atsign = '\0' ;
188
159
189
160
// action
190
- ei_encode_elixir_string (nb -> resp , & nb -> resp_index , str );
161
+ ei_encode_elixir_string (resp , & resp_index , str );
191
162
192
163
// devpath - filter anything that's not under "/devices"
193
164
str = atsign + 1 ;
194
165
if (strncmp ("/devices" , str , 8 ) != 0 )
195
- return ;
196
- ei_encode_devpath (nb -> resp , & nb -> resp_index , str , & str );
166
+ return 0 ;
167
+ ei_encode_devpath (resp , & resp_index , str , & str );
197
168
198
169
#define MAX_KV_PAIRS 16
199
170
int kvpairs_count = 0 ;
@@ -225,12 +196,26 @@ static void nl_uevent_process(struct netif *nb)
225
196
kvpairs_count ++ ;
226
197
}
227
198
228
- ei_encode_map_header (nb -> resp , & nb -> resp_index , kvpairs_count );
199
+ ei_encode_map_header (resp , & resp_index , kvpairs_count );
229
200
for (int i = 0 ; i < kvpairs_count ; i ++ ) {
230
- ei_encode_elixir_string (nb -> resp , & nb -> resp_index , keys [i ]);
231
- ei_encode_elixir_string (nb -> resp , & nb -> resp_index , values [i ]);
201
+ ei_encode_elixir_string (resp , & resp_index , keys [i ]);
202
+ ei_encode_elixir_string (resp , & resp_index , values [i ]);
232
203
}
233
- erlcmd_send (nb -> resp , nb -> resp_index );
204
+ erlcmd_write_header_len (resp , resp_index );
205
+ return resp_index ;
206
+ }
207
+
208
+ static void nl_uevent_process_all (struct mnl_socket * nl_uevent )
209
+ {
210
+ // Erlang response processing
211
+ char resp [8192 ];
212
+ int resp_index ;
213
+
214
+ resp_index = nl_uevent_process_one (nl_uevent , resp );
215
+ if (resp_index <= 0 )
216
+ return ;
217
+
218
+ write_all (resp , resp_index );
234
219
}
235
220
236
221
static int filter (const struct dirent * dirp )
@@ -287,8 +272,7 @@ int uevent_main(int argc, char *argv[])
287
272
(void ) argc ;
288
273
(void ) argv ;
289
274
290
- struct netif nb ;
291
- netif_init (& nb );
275
+ struct mnl_socket * nl_uevent = uevent_open ();
292
276
293
277
// It's necessary to run the discovery process after every start to avoid
294
278
// missing device additions. Removals between restarts can still be missed.
@@ -299,7 +283,7 @@ int uevent_main(int argc, char *argv[])
299
283
for (;;) {
300
284
struct pollfd fdset [2 ];
301
285
302
- fdset [0 ].fd = mnl_socket_get_fd (nb . nl_uevent );
286
+ fdset [0 ].fd = mnl_socket_get_fd (nl_uevent );
303
287
fdset [0 ].events = POLLIN ;
304
288
fdset [0 ].revents = 0 ;
305
289
@@ -317,13 +301,13 @@ int uevent_main(int argc, char *argv[])
317
301
}
318
302
319
303
if (fdset [0 ].revents & (POLLIN | POLLHUP ))
320
- nl_uevent_process ( & nb );
304
+ nl_uevent_process_all ( nl_uevent );
321
305
322
306
// Any notification from Erlang is to exit
323
307
if (fdset [1 ].revents & (POLLIN | POLLHUP ))
324
308
break ;
325
309
}
326
310
327
- netif_cleanup ( & nb );
311
+ mnl_socket_close ( nl_uevent );
328
312
return 0 ;
329
313
}
0 commit comments