Line data Source code
1 : /*
2 : * RPC host
3 : *
4 : * Implements samba-dcerpcd service.
5 : *
6 : * This program is free software; you can redistribute it and/or modify
7 : * it under the terms of the GNU General Public License as published by
8 : * the Free Software Foundation; either version 3 of the License, or
9 : * (at your option) any later version.
10 : *
11 : * This program is distributed in the hope that it will be useful,
12 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : * GNU General Public License for more details.
15 : *
16 : * You should have received a copy of the GNU General Public License
17 : * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 : */
19 :
20 : /*
21 : * This binary has two usage modes:
22 : *
23 : * In the normal case when invoked from smbd or winbind it is given a
24 : * directory to scan via --libexec-rpcds and will invoke on demand any
25 : * binaries it finds there starting with rpcd_ when a named pipe
26 : * connection is requested.
27 : *
28 : * In the second mode it can be started explicitly from system startup
29 : * scripts.
30 : *
31 : * When Samba is set up as an Active Directory Domain Controller the
32 : * normal samba binary overrides and provides DCERPC services, whilst
33 : * allowing samba-dcerpcd to provide the services that smbd used to
34 : * provide in that set-up, such as SRVSVC.
35 : *
36 : * The second mode can also be useful for use outside of the Samba framework,
37 : * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 : * it behaves like inetd and listens on sockets on behalf of RPC server
39 : * implementations.
40 : */
41 :
42 : #include "replace.h"
43 : #include <fnmatch.h>
44 : #include "lib/cmdline/cmdline.h"
45 : #include "lib/cmdline/closefrom_except.h"
46 : #include "source3/include/includes.h"
47 : #include "source3/include/auth.h"
48 : #include "rpc_sock_helper.h"
49 : #include "messages.h"
50 : #include "lib/util_file.h"
51 : #include "lib/util/tevent_unix.h"
52 : #include "lib/util/tevent_ntstatus.h"
53 : #include "lib/util/smb_strtox.h"
54 : #include "lib/util/debug.h"
55 : #include "lib/util/server_id.h"
56 : #include "lib/util/util_tdb.h"
57 : #include "lib/tdb_wrap/tdb_wrap.h"
58 : #include "lib/async_req/async_sock.h"
59 : #include "librpc/rpc/dcerpc_util.h"
60 : #include "lib/tsocket/tsocket.h"
61 : #include "libcli/named_pipe_auth/npa_tstream.h"
62 : #include "librpc/gen_ndr/ndr_rpc_host.h"
63 : #include "source3/param/loadparm.h"
64 : #include "source3/lib/global_contexts.h"
65 : #include "lib/util/strv.h"
66 : #include "lib/util/pidfile.h"
67 : #include "source3/rpc_client/cli_pipe.h"
68 : #include "librpc/gen_ndr/ndr_epmapper.h"
69 : #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 : #include "nsswitch/winbind_client.h"
71 : #include "libcli/security/dom_sid.h"
72 : #include "libcli/security/security_token.h"
73 :
74 : extern bool override_logfile;
75 :
76 : struct rpc_server;
77 : struct rpc_work_process;
78 :
79 : /*
80 : * samba-dcerpcd state to keep track of rpcd_* servers.
81 : */
82 : struct rpc_host {
83 : struct messaging_context *msg_ctx;
84 : struct rpc_server **servers;
85 : struct tdb_wrap *epmdb;
86 :
87 : int worker_stdin[2];
88 :
89 : bool np_helper;
90 :
91 : /*
92 : * If we're started with --np-helper but nobody contacts us,
93 : * we need to exit after a while. This will be deleted once
94 : * the first real client connects and our self-exit mechanism
95 : * when we don't have any worker processes left kicks in.
96 : */
97 : struct tevent_timer *np_helper_shutdown;
98 : };
99 :
100 : /*
101 : * Map a RPC interface to a name. Used when filling the endpoint
102 : * mapper database
103 : */
104 : struct rpc_host_iface_name {
105 : struct ndr_syntax_id iface;
106 : char *name;
107 : };
108 :
109 : /*
110 : * rpc_host representation for listening sockets. ncacn_ip_tcp might
111 : * listen on multiple explicit IPs, all with the same port.
112 : */
113 : struct rpc_host_endpoint {
114 : struct rpc_server *server;
115 : struct dcerpc_binding *binding;
116 : struct ndr_syntax_id *interfaces;
117 : int *fds;
118 : size_t num_fds;
119 : };
120 :
121 : /*
122 : * Staging area until we sent the socket plus bind to the helper
123 : */
124 : struct rpc_host_pending_client {
125 : struct rpc_host_pending_client *prev, *next;
126 :
127 : /*
128 : * Pointer for the destructor to remove us from the list of
129 : * pending clients
130 : */
131 : struct rpc_server *server;
132 :
133 : /*
134 : * Waiter for client exit before a helper accepted the request
135 : */
136 : struct tevent_req *hangup_wait;
137 :
138 : /*
139 : * Info to pick the worker
140 : */
141 : struct ncacn_packet *bind_pkt;
142 :
143 : /*
144 : * This is what we send down to the worker
145 : */
146 : int sock;
147 : struct rpc_host_client *client;
148 : };
149 :
150 : /*
151 : * Representation of one worker process. For each rpcd_* executable
152 : * there will be more of than one of these.
153 : */
154 : struct rpc_work_process {
155 : pid_t pid;
156 :
157 : /*
158 : * !available means:
159 : *
160 : * Worker forked but did not send its initial status yet (not
161 : * yet initialized)
162 : *
163 : * Worker died, but we did not receive SIGCHLD yet. We noticed
164 : * it because we couldn't send it a message.
165 : */
166 : bool available;
167 :
168 : /*
169 : * Incremented by us when sending a client, decremented by
170 : * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
171 : * client exits.
172 : */
173 : uint32_t num_associations;
174 : uint32_t num_connections;
175 :
176 : /*
177 : * Send SHUTDOWN to an idle child after a while
178 : */
179 : struct tevent_timer *exit_timer;
180 : };
181 :
182 : /*
183 : * State for a set of running instances of an rpcd_* server executable
184 : */
185 : struct rpc_server {
186 : struct rpc_host *host;
187 : /*
188 : * Index into the rpc_host_state->servers array
189 : */
190 : uint32_t server_index;
191 :
192 : const char *rpc_server_exe;
193 :
194 : struct rpc_host_endpoint **endpoints;
195 : struct rpc_host_iface_name *iface_names;
196 :
197 : size_t max_workers;
198 : size_t idle_seconds;
199 :
200 : /*
201 : * "workers" can be larger than "max_workers": Internal
202 : * connections require an idle worker to avoid deadlocks
203 : * between RPC servers: netlogon requires samr, everybody
204 : * requires winreg. And if a deep call in netlogon asks for a
205 : * samr connection, this must never end up in the same
206 : * process. named_pipe_auth_req_info8->need_idle_server is set
207 : * in those cases.
208 : */
209 : struct rpc_work_process *workers;
210 :
211 : struct rpc_host_pending_client *pending_clients;
212 : };
213 :
214 : struct rpc_server_get_endpoints_state {
215 : char **argl;
216 : char *ncalrpc_endpoint;
217 : enum dcerpc_transport_t only_transport;
218 :
219 : struct rpc_host_iface_name *iface_names;
220 : struct rpc_host_endpoint **endpoints;
221 :
222 : unsigned long num_workers;
223 : unsigned long idle_seconds;
224 : };
225 :
226 : static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
227 :
228 : /**
229 : * @brief Query interfaces from an rpcd helper
230 : *
231 : * Spawn a rpcd helper, ask it for the interfaces it serves via
232 : * --list-interfaces, parse the output
233 : *
234 : * @param[in] mem_ctx Memory context for the tevent_req
235 : * @param[in] ev Event context to run this on
236 : * @param[in] rpc_server_exe Binary to ask with --list-interfaces
237 : * @param[in] only_transport Filter out anything but this
238 : * @return The tevent_req representing this process
239 : */
240 :
241 728 : static struct tevent_req *rpc_server_get_endpoints_send(
242 : TALLOC_CTX *mem_ctx,
243 : struct tevent_context *ev,
244 : const char *rpc_server_exe,
245 : enum dcerpc_transport_t only_transport)
246 : {
247 728 : struct tevent_req *req = NULL, *subreq = NULL;
248 728 : struct rpc_server_get_endpoints_state *state = NULL;
249 728 : const char *progname = NULL;
250 :
251 728 : req = tevent_req_create(
252 : mem_ctx, &state, struct rpc_server_get_endpoints_state);
253 728 : if (req == NULL) {
254 0 : return NULL;
255 : }
256 728 : state->only_transport = only_transport;
257 :
258 728 : progname = strrchr(rpc_server_exe, '/');
259 728 : if (progname != NULL) {
260 728 : progname += 1;
261 : } else {
262 0 : progname = rpc_server_exe;
263 : }
264 :
265 728 : state->ncalrpc_endpoint = talloc_strdup(state, progname);
266 728 : if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
267 0 : return tevent_req_post(req, ev);
268 : }
269 :
270 728 : state->argl = talloc_array(state, char *, 4);
271 728 : if (tevent_req_nomem(state->argl, req)) {
272 0 : return tevent_req_post(req, ev);
273 : }
274 :
275 728 : state->argl = str_list_make_empty(state);
276 728 : str_list_add_printf(&state->argl, "%s", rpc_server_exe);
277 728 : str_list_add_printf(&state->argl, "--list-interfaces");
278 728 : str_list_add_printf(
279 728 : &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
280 :
281 728 : if (tevent_req_nomem(state->argl, req)) {
282 0 : return tevent_req_post(req, ev);
283 : }
284 :
285 728 : subreq = file_ploadv_send(state, ev, state->argl, 65536);
286 728 : if (tevent_req_nomem(subreq, req)) {
287 0 : return tevent_req_post(req, ev);
288 : }
289 728 : tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
290 728 : return req;
291 : }
292 :
293 : /*
294 : * Parse a line of format
295 : *
296 : * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
297 : *
298 : * and add it to the "piface_names" array.
299 : */
300 :
301 1350 : static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
302 : TALLOC_CTX *mem_ctx,
303 : struct rpc_host_iface_name **piface_names,
304 : const char *line)
305 : {
306 1350 : struct rpc_host_iface_name *iface_names = *piface_names;
307 1350 : struct rpc_host_iface_name *tmp = NULL, *result = NULL;
308 1350 : size_t i, num_ifaces = talloc_array_length(iface_names);
309 : struct ndr_syntax_id iface;
310 1350 : char *name = NULL;
311 : bool ok;
312 :
313 1350 : ok = ndr_syntax_id_from_string(line, &iface);
314 1350 : if (!ok) {
315 0 : DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
316 : line);
317 0 : return NULL;
318 : }
319 :
320 1350 : name = strchr(line, ' ');
321 1350 : if (name == NULL) {
322 0 : return NULL;
323 : }
324 1350 : name += 1;
325 :
326 3420 : for (i=0; i<num_ifaces; i++) {
327 2070 : result = &iface_names[i];
328 :
329 2070 : if (ndr_syntax_id_equal(&result->iface, &iface)) {
330 0 : return result;
331 : }
332 : }
333 :
334 1350 : if (num_ifaces + 1 < num_ifaces) {
335 0 : return NULL;
336 : }
337 :
338 1350 : name = talloc_strdup(mem_ctx, name);
339 1350 : if (name == NULL) {
340 0 : return NULL;
341 : }
342 :
343 1350 : tmp = talloc_realloc(
344 : mem_ctx,
345 : iface_names,
346 : struct rpc_host_iface_name,
347 : num_ifaces + 1);
348 1350 : if (tmp == NULL) {
349 0 : TALLOC_FREE(name);
350 0 : return NULL;
351 : }
352 1350 : iface_names = tmp;
353 :
354 1350 : result = &iface_names[num_ifaces];
355 :
356 1350 : *result = (struct rpc_host_iface_name) {
357 : .iface = iface,
358 1350 : .name = talloc_move(iface_names, &name),
359 : };
360 :
361 1350 : *piface_names = iface_names;
362 :
363 1350 : return result;
364 : }
365 :
366 1350 : static struct rpc_host_iface_name *rpc_host_iface_names_find(
367 : struct rpc_host_iface_name *iface_names,
368 : const struct ndr_syntax_id *iface)
369 : {
370 1350 : size_t i, num_iface_names = talloc_array_length(iface_names);
371 :
372 3420 : for (i=0; i<num_iface_names; i++) {
373 3420 : struct rpc_host_iface_name *iface_name = &iface_names[i];
374 :
375 3420 : if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
376 1350 : return iface_name;
377 : }
378 : }
379 :
380 0 : return NULL;
381 : }
382 :
383 3746 : static bool dcerpc_binding_same_endpoint(
384 : const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
385 : {
386 3746 : enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
387 3746 : enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
388 3746 : const char *e1 = NULL, *e2 = NULL;
389 : int cmp;
390 :
391 3746 : if (t1 != t2) {
392 714 : return false;
393 : }
394 :
395 3032 : e1 = dcerpc_binding_get_string_option(b1, "endpoint");
396 3032 : e2 = dcerpc_binding_get_string_option(b2, "endpoint");
397 :
398 3032 : if ((e1 == NULL) && (e2 == NULL)) {
399 54 : return true;
400 : }
401 2978 : if ((e1 == NULL) || (e2 == NULL)) {
402 0 : return false;
403 : }
404 2978 : cmp = strcmp(e1, e2);
405 2978 : return (cmp == 0);
406 : }
407 :
408 : /**
409 : * @brief Filter whether we want to serve an endpoint
410 : *
411 : * samba-dcerpcd might want to serve all endpoints a rpcd reported to
412 : * us via --list-interfaces.
413 : *
414 : * In member mode, we only serve named pipes. Indicated by NCACN_NP
415 : * passed in via "only_transport".
416 : *
417 : * @param[in] binding Which binding is in question?
418 : * @param[in] only_transport Exclusive transport to serve
419 : * @return Do we want to serve "binding" from samba-dcerpcd?
420 : */
421 :
422 3396 : static bool rpc_host_serve_endpoint(
423 : struct dcerpc_binding *binding,
424 : enum dcerpc_transport_t only_transport)
425 : {
426 : enum dcerpc_transport_t transport =
427 3396 : dcerpc_binding_get_transport(binding);
428 :
429 3396 : if (only_transport == NCA_UNKNOWN) {
430 : /* no filter around */
431 510 : return true;
432 : }
433 :
434 2886 : if (transport != only_transport) {
435 : /* filter out */
436 1531 : return false;
437 : }
438 :
439 1355 : return true;
440 : }
441 :
442 3396 : static struct rpc_host_endpoint *rpc_host_endpoint_find(
443 : struct rpc_server_get_endpoints_state *state,
444 : const char *binding_string)
445 : {
446 3396 : size_t i, num_endpoints = talloc_array_length(state->endpoints);
447 3396 : struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
448 : enum dcerpc_transport_t transport;
449 : NTSTATUS status;
450 : bool serve_this;
451 :
452 3396 : ep = talloc_zero(state, struct rpc_host_endpoint);
453 3396 : if (ep == NULL) {
454 0 : goto fail;
455 : }
456 :
457 3396 : status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
458 3396 : if (!NT_STATUS_IS_OK(status)) {
459 0 : DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
460 : binding_string,
461 : nt_errstr(status));
462 0 : goto fail;
463 : }
464 :
465 3396 : serve_this = rpc_host_serve_endpoint(
466 3396 : ep->binding, state->only_transport);
467 3396 : if (!serve_this) {
468 1531 : goto fail;
469 : }
470 :
471 1865 : transport = dcerpc_binding_get_transport(ep->binding);
472 :
473 1865 : if (transport == NCALRPC) {
474 150 : const char *ncalrpc_sock = dcerpc_binding_get_string_option(
475 150 : ep->binding, "endpoint");
476 :
477 150 : if (ncalrpc_sock == NULL) {
478 : /*
479 : * generic ncalrpc:, set program-specific
480 : * socket name. epmapper will redirect clients
481 : * properly.
482 : */
483 138 : status = dcerpc_binding_set_string_option(
484 138 : ep->binding,
485 : "endpoint",
486 138 : state->ncalrpc_endpoint);
487 138 : if (!NT_STATUS_IS_OK(status)) {
488 0 : DBG_DEBUG("dcerpc_binding_set_string_option "
489 : "failed: %s\n",
490 : nt_errstr(status));
491 0 : goto fail;
492 : }
493 : }
494 : }
495 :
496 5343 : for (i=0; i<num_endpoints; i++) {
497 :
498 3746 : bool ok = dcerpc_binding_same_endpoint(
499 3746 : ep->binding, state->endpoints[i]->binding);
500 :
501 3746 : if (ok) {
502 268 : TALLOC_FREE(ep);
503 268 : return state->endpoints[i];
504 : }
505 : }
506 :
507 1597 : if (num_endpoints + 1 < num_endpoints) {
508 0 : goto fail;
509 : }
510 :
511 1597 : tmp = talloc_realloc(
512 : state,
513 : state->endpoints,
514 : struct rpc_host_endpoint *,
515 : num_endpoints + 1);
516 1597 : if (tmp == NULL) {
517 0 : goto fail;
518 : }
519 1597 : state->endpoints = tmp;
520 1597 : state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
521 :
522 1597 : return state->endpoints[num_endpoints];
523 1531 : fail:
524 1531 : TALLOC_FREE(ep);
525 1531 : return NULL;
526 : }
527 :
528 1865 : static bool ndr_interfaces_add_unique(
529 : TALLOC_CTX *mem_ctx,
530 : struct ndr_syntax_id **pifaces,
531 : const struct ndr_syntax_id *iface)
532 : {
533 1865 : struct ndr_syntax_id *ifaces = *pifaces;
534 1865 : size_t i, num_ifaces = talloc_array_length(ifaces);
535 :
536 2229 : for (i=0; i<num_ifaces; i++) {
537 364 : if (ndr_syntax_id_equal(iface, &ifaces[i])) {
538 0 : return true;
539 : }
540 : }
541 :
542 1865 : if (num_ifaces + 1 < num_ifaces) {
543 0 : return false;
544 : }
545 1865 : ifaces = talloc_realloc(
546 : mem_ctx,
547 : ifaces,
548 : struct ndr_syntax_id,
549 : num_ifaces + 1);
550 1865 : if (ifaces == NULL) {
551 0 : return false;
552 : }
553 1865 : ifaces[num_ifaces] = *iface;
554 :
555 1865 : *pifaces = ifaces;
556 1865 : return true;
557 : }
558 :
559 : /*
560 : * Read the text reply from the rpcd_* process telling us what
561 : * endpoints it will serve when asked with --list-interfaces.
562 : */
563 728 : static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
564 : {
565 728 : struct tevent_req *req = tevent_req_callback_data(
566 : subreq, struct tevent_req);
567 728 : struct rpc_server_get_endpoints_state *state = tevent_req_data(
568 : req, struct rpc_server_get_endpoints_state);
569 728 : struct rpc_host_iface_name *iface = NULL;
570 728 : uint8_t *buf = NULL;
571 : size_t buflen;
572 728 : char **lines = NULL;
573 : int ret, i, num_lines;
574 :
575 728 : ret = file_ploadv_recv(subreq, state, &buf);
576 728 : TALLOC_FREE(subreq);
577 728 : if (tevent_req_error(req, ret)) {
578 0 : return;
579 : }
580 :
581 728 : buflen = talloc_get_size(buf);
582 728 : if (buflen == 0) {
583 0 : tevent_req_done(req);
584 0 : return;
585 : }
586 :
587 728 : lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
588 728 : if (tevent_req_nomem(lines, req)) {
589 0 : return;
590 : }
591 :
592 728 : if (num_lines < 2) {
593 0 : DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
594 0 : tevent_req_error(req, EINVAL);
595 0 : return;
596 : }
597 :
598 728 : state->num_workers = smb_strtoul(
599 : lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
600 728 : if (ret != 0) {
601 0 : DBG_DEBUG("Could not parse num_workers(%s): %s\n",
602 : lines[0],
603 : strerror(ret));
604 0 : tevent_req_error(req, ret);
605 0 : return;
606 : }
607 : /*
608 : * We need to limit the number of workers in order
609 : * to put the worker index into a 16-bit space,
610 : * in order to use a 16-bit association group space
611 : * per worker.
612 : */
613 728 : if (state->num_workers > 65536) {
614 0 : state->num_workers = 65536;
615 : }
616 :
617 1456 : state->idle_seconds = smb_strtoul(
618 728 : lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
619 728 : if (ret != 0) {
620 0 : DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
621 : lines[1],
622 : strerror(ret));
623 0 : tevent_req_error(req, ret);
624 0 : return;
625 : }
626 :
627 728 : DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
628 : state->num_workers,
629 : state->idle_seconds,
630 : state->argl[0]);
631 :
632 5474 : for (i=2; i<num_lines; i++) {
633 4746 : char *line = lines[i];
634 4746 : struct rpc_host_endpoint *endpoint = NULL;
635 : bool ok;
636 :
637 4746 : if (line[0] != ' ') {
638 1350 : iface = rpc_exe_parse_iface_line(
639 : state, &state->iface_names, line);
640 1350 : if (iface == NULL) {
641 0 : DBG_WARNING(
642 : "rpc_exe_parse_iface_line failed "
643 : "for: [%s] from %s\n",
644 : line,
645 : state->argl[0]);
646 0 : tevent_req_oom(req);
647 0 : return;
648 : }
649 1350 : continue;
650 : }
651 :
652 3396 : if (iface == NULL) {
653 0 : DBG_DEBUG("Interface GUID line missing\n");
654 0 : tevent_req_error(req, EINVAL);
655 0 : return;
656 : }
657 :
658 3396 : endpoint = rpc_host_endpoint_find(state, line+1);
659 3396 : if (endpoint == NULL) {
660 1531 : DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
661 : line+1);
662 1531 : continue;
663 : }
664 :
665 1865 : ok = ndr_interfaces_add_unique(
666 : endpoint,
667 : &endpoint->interfaces,
668 1865 : &iface->iface);
669 1865 : if (!ok) {
670 0 : DBG_DEBUG("ndr_interfaces_add_unique failed\n");
671 0 : tevent_req_oom(req);
672 0 : return;
673 : }
674 : }
675 :
676 728 : tevent_req_done(req);
677 : }
678 :
679 : /**
680 : * @brief Receive output from --list-interfaces
681 : *
682 : * @param[in] req The async req that just finished
683 : * @param[in] mem_ctx Where to put the output on
684 : * @param[out] endpoints The endpoints to be listened on
685 : * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
686 : * @return 0/errno
687 : */
688 728 : static int rpc_server_get_endpoints_recv(
689 : struct tevent_req *req,
690 : TALLOC_CTX *mem_ctx,
691 : struct rpc_host_endpoint ***endpoints,
692 : struct rpc_host_iface_name **iface_names,
693 : size_t *num_workers,
694 : size_t *idle_seconds)
695 : {
696 728 : struct rpc_server_get_endpoints_state *state = tevent_req_data(
697 : req, struct rpc_server_get_endpoints_state);
698 : int err;
699 :
700 728 : if (tevent_req_is_unix_error(req, &err)) {
701 0 : tevent_req_received(req);
702 0 : return err;
703 : }
704 :
705 728 : *endpoints = talloc_move(mem_ctx, &state->endpoints);
706 728 : *iface_names = talloc_move(mem_ctx, &state->iface_names);
707 728 : *num_workers = state->num_workers;
708 728 : *idle_seconds = state->idle_seconds;
709 728 : tevent_req_received(req);
710 728 : return 0;
711 : }
712 :
713 : /*
714 : * For NCACN_NP we get the named pipe auth info from smbd, if a client
715 : * comes in via TCP or NCALPRC we need to invent it ourselves with
716 : * anonymous session info.
717 : */
718 :
719 932 : static NTSTATUS rpc_host_generate_npa_info8_from_sock(
720 : TALLOC_CTX *mem_ctx,
721 : enum dcerpc_transport_t transport,
722 : int sock,
723 : const struct samba_sockaddr *peer_addr,
724 : struct named_pipe_auth_req_info8 **pinfo8)
725 : {
726 932 : struct named_pipe_auth_req_info8 *info8 = NULL;
727 932 : struct samba_sockaddr local_addr = {
728 : .sa_socklen = sizeof(struct sockaddr_storage),
729 : };
730 932 : struct tsocket_address *taddr = NULL;
731 932 : char *remote_client_name = NULL;
732 932 : char *remote_client_addr = NULL;
733 932 : char *local_server_name = NULL;
734 932 : char *local_server_addr = NULL;
735 932 : char *(*tsocket_address_to_name_fn)(
736 : const struct tsocket_address *addr,
737 : TALLOC_CTX *mem_ctx) = NULL;
738 932 : NTSTATUS status = NT_STATUS_NO_MEMORY;
739 : int ret;
740 :
741 : /*
742 : * For NCACN_NP we get the npa info from smbd
743 : */
744 932 : SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
745 :
746 932 : tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
747 932 : tsocket_address_inet_addr_string : tsocket_address_unix_path;
748 :
749 932 : info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
750 932 : if (info8 == NULL) {
751 0 : goto fail;
752 : }
753 932 : info8->session_info =
754 932 : talloc_zero(info8, struct auth_session_info_transport);
755 932 : if (info8->session_info == NULL) {
756 0 : goto fail;
757 : }
758 :
759 932 : status = make_session_info_anonymous(
760 932 : info8->session_info,
761 932 : &info8->session_info->session_info);
762 932 : if (!NT_STATUS_IS_OK(status)) {
763 0 : DBG_DEBUG("make_session_info_anonymous failed: %s\n",
764 : nt_errstr(status));
765 0 : goto fail;
766 : }
767 :
768 932 : ret = tsocket_address_bsd_from_samba_sockaddr(info8,
769 : peer_addr,
770 : &taddr);
771 932 : if (ret == -1) {
772 0 : status = map_nt_error_from_unix(errno);
773 0 : DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
774 : "%s\n",
775 : strerror(errno));
776 0 : goto fail;
777 : }
778 932 : remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
779 932 : if (remote_client_addr == NULL) {
780 0 : DBG_DEBUG("tsocket_address_to_name_fn failed\n");
781 0 : goto nomem;
782 : }
783 932 : TALLOC_FREE(taddr);
784 :
785 932 : remote_client_name = talloc_strdup(info8, remote_client_addr);
786 932 : if (remote_client_name == NULL) {
787 0 : DBG_DEBUG("talloc_strdup failed\n");
788 0 : goto nomem;
789 : }
790 :
791 932 : if (transport == NCACN_IP_TCP) {
792 798 : bool ok = samba_sockaddr_get_port(peer_addr,
793 : &info8->remote_client_port);
794 798 : if (!ok) {
795 0 : DBG_DEBUG("samba_sockaddr_get_port failed\n");
796 0 : status = NT_STATUS_INVALID_PARAMETER;
797 0 : goto fail;
798 : }
799 : }
800 :
801 932 : ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
802 932 : if (ret == -1) {
803 0 : status = map_nt_error_from_unix(errno);
804 0 : DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
805 0 : goto fail;
806 : }
807 :
808 932 : ret = tsocket_address_bsd_from_samba_sockaddr(info8,
809 : &local_addr,
810 : &taddr);
811 932 : if (ret == -1) {
812 0 : status = map_nt_error_from_unix(errno);
813 0 : DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
814 : "%s\n",
815 : strerror(errno));
816 0 : goto fail;
817 : }
818 932 : local_server_addr = tsocket_address_to_name_fn(taddr, info8);
819 932 : if (local_server_addr == NULL) {
820 0 : DBG_DEBUG("tsocket_address_to_name_fn failed\n");
821 0 : goto nomem;
822 : }
823 932 : TALLOC_FREE(taddr);
824 :
825 932 : local_server_name = talloc_strdup(info8, local_server_addr);
826 932 : if (local_server_name == NULL) {
827 0 : DBG_DEBUG("talloc_strdup failed\n");
828 0 : goto nomem;
829 : }
830 :
831 932 : if (transport == NCACN_IP_TCP) {
832 798 : bool ok = samba_sockaddr_get_port(&local_addr,
833 : &info8->local_server_port);
834 798 : if (!ok) {
835 0 : DBG_DEBUG("samba_sockaddr_get_port failed\n");
836 0 : status = NT_STATUS_INVALID_PARAMETER;
837 0 : goto fail;
838 : }
839 : }
840 :
841 932 : if (transport == NCALRPC) {
842 : uid_t uid;
843 : gid_t gid;
844 :
845 134 : ret = getpeereid(sock, &uid, &gid);
846 134 : if (ret < 0) {
847 0 : status = map_nt_error_from_unix(errno);
848 0 : DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
849 0 : goto fail;
850 : }
851 :
852 134 : if (uid == sec_initial_uid()) {
853 :
854 : /*
855 : * Indicate "root" to gensec
856 : */
857 :
858 134 : TALLOC_FREE(remote_client_addr);
859 134 : TALLOC_FREE(remote_client_name);
860 :
861 134 : ret = tsocket_address_unix_from_path(
862 : info8,
863 : AS_SYSTEM_MAGIC_PATH_TOKEN,
864 : &taddr);
865 134 : if (ret == -1) {
866 0 : DBG_DEBUG("tsocket_address_unix_from_path "
867 : "failed\n");
868 0 : goto nomem;
869 : }
870 :
871 : remote_client_addr =
872 134 : tsocket_address_unix_path(taddr, info8);
873 134 : if (remote_client_addr == NULL) {
874 0 : DBG_DEBUG("tsocket_address_unix_path "
875 : "failed\n");
876 0 : goto nomem;
877 : }
878 : remote_client_name =
879 134 : talloc_strdup(info8, remote_client_addr);
880 134 : if (remote_client_name == NULL) {
881 0 : DBG_DEBUG("talloc_strdup failed\n");
882 0 : goto nomem;
883 : }
884 : }
885 : }
886 :
887 932 : info8->remote_client_addr = remote_client_addr;
888 932 : info8->remote_client_name = remote_client_name;
889 932 : info8->local_server_addr = local_server_addr;
890 932 : info8->local_server_name = local_server_name;
891 :
892 932 : *pinfo8 = info8;
893 932 : return NT_STATUS_OK;
894 :
895 0 : nomem:
896 0 : status = NT_STATUS_NO_MEMORY;
897 0 : fail:
898 0 : TALLOC_FREE(info8);
899 0 : return status;
900 : }
901 :
902 : struct rpc_host_bind_read_state {
903 : struct tevent_context *ev;
904 :
905 : int sock;
906 : struct tstream_context *plain;
907 : struct tstream_context *npa_stream;
908 :
909 : struct ncacn_packet *pkt;
910 : struct rpc_host_client *client;
911 : };
912 :
913 : static void rpc_host_bind_read_cleanup(
914 : struct tevent_req *req, enum tevent_req_state req_state);
915 : static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
916 : static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
917 :
918 : /*
919 : * Wait for a bind packet from a client.
920 : */
921 36180 : static struct tevent_req *rpc_host_bind_read_send(
922 : TALLOC_CTX *mem_ctx,
923 : struct tevent_context *ev,
924 : enum dcerpc_transport_t transport,
925 : int *psock,
926 : const struct samba_sockaddr *peer_addr)
927 : {
928 36180 : struct tevent_req *req = NULL, *subreq = NULL;
929 36180 : struct rpc_host_bind_read_state *state = NULL;
930 : int rc, sock_dup;
931 : NTSTATUS status;
932 :
933 36180 : req = tevent_req_create(
934 : mem_ctx, &state, struct rpc_host_bind_read_state);
935 36180 : if (req == NULL) {
936 0 : return NULL;
937 : }
938 36180 : state->ev = ev;
939 :
940 36180 : state->sock = *psock;
941 36180 : *psock = -1;
942 :
943 36180 : tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
944 :
945 36180 : state->client = talloc_zero(state, struct rpc_host_client);
946 36180 : if (tevent_req_nomem(state->client, req)) {
947 0 : return tevent_req_post(req, ev);
948 : }
949 :
950 : /*
951 : * Dup the socket to read the first RPC packet:
952 : * tstream_bsd_existing_socket() takes ownership with
953 : * autoclose, but we need to send "sock" down to our worker
954 : * process later.
955 : */
956 36180 : sock_dup = dup(state->sock);
957 36180 : if (sock_dup == -1) {
958 0 : tevent_req_error(req, errno);
959 0 : return tevent_req_post(req, ev);
960 : }
961 :
962 36180 : rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
963 36180 : if (rc == -1) {
964 0 : DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
965 : strerror(errno));
966 0 : tevent_req_error(req, errno);
967 0 : close(sock_dup);
968 0 : return tevent_req_post(req, ev);
969 : }
970 : /* as server we want to fail early */
971 36180 : tstream_bsd_fail_readv_first_error(state->plain, true);
972 :
973 36180 : if (transport == NCACN_NP) {
974 35248 : subreq = tstream_npa_accept_existing_send(
975 : state,
976 : ev,
977 35248 : state->plain,
978 : FILE_TYPE_MESSAGE_MODE_PIPE,
979 : 0xff | 0x0400 | 0x0100,
980 : 4096);
981 35248 : if (tevent_req_nomem(subreq, req)) {
982 0 : return tevent_req_post(req, ev);
983 : }
984 35248 : tevent_req_set_callback(
985 : subreq, rpc_host_bind_read_got_npa, req);
986 35248 : return req;
987 : }
988 :
989 932 : status = rpc_host_generate_npa_info8_from_sock(
990 932 : state->client,
991 : transport,
992 932 : state->sock,
993 : peer_addr,
994 932 : &state->client->npa_info8);
995 932 : if (!NT_STATUS_IS_OK(status)) {
996 0 : tevent_req_oom(req);
997 0 : return tevent_req_post(req, ev);
998 : }
999 :
1000 932 : subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
1001 932 : if (tevent_req_nomem(subreq, req)) {
1002 0 : return tevent_req_post(req, ev);
1003 : }
1004 932 : tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1005 932 : return req;
1006 : }
1007 :
1008 72360 : static void rpc_host_bind_read_cleanup(
1009 : struct tevent_req *req, enum tevent_req_state req_state)
1010 : {
1011 72360 : struct rpc_host_bind_read_state *state = tevent_req_data(
1012 : req, struct rpc_host_bind_read_state);
1013 :
1014 72360 : if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1015 84 : close(state->sock);
1016 84 : state->sock = -1;
1017 : }
1018 72360 : }
1019 :
1020 35248 : static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1021 : {
1022 35248 : struct tevent_req *req = tevent_req_callback_data(
1023 : subreq, struct tevent_req);
1024 35248 : struct rpc_host_bind_read_state *state = tevent_req_data(
1025 : req, struct rpc_host_bind_read_state);
1026 35248 : struct named_pipe_auth_req_info8 *info8 = NULL;
1027 : int ret, err;
1028 :
1029 35248 : ret = tstream_npa_accept_existing_recv(subreq,
1030 : &err,
1031 : state,
1032 : &state->npa_stream,
1033 : &info8,
1034 : NULL, /* transport */
1035 : NULL, /* remote_client_addr */
1036 : NULL, /* remote_client_name */
1037 : NULL, /* local_server_addr */
1038 : NULL, /* local_server_name */
1039 : NULL); /* session_info */
1040 35248 : if (ret == -1) {
1041 0 : tevent_req_error(req, err);
1042 0 : return;
1043 : }
1044 :
1045 35248 : state->client->npa_info8 = talloc_move(state->client, &info8);
1046 :
1047 35248 : subreq = dcerpc_read_ncacn_packet_send(
1048 : state, state->ev, state->npa_stream);
1049 35248 : if (tevent_req_nomem(subreq, req)) {
1050 0 : return;
1051 : }
1052 35248 : tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1053 : }
1054 :
1055 36180 : static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1056 : {
1057 36180 : struct tevent_req *req = tevent_req_callback_data(
1058 : subreq, struct tevent_req);
1059 36180 : struct rpc_host_bind_read_state *state = tevent_req_data(
1060 : req, struct rpc_host_bind_read_state);
1061 36180 : struct ncacn_packet *pkt = NULL;
1062 : NTSTATUS status;
1063 :
1064 36180 : status = dcerpc_read_ncacn_packet_recv(
1065 : subreq,
1066 36180 : state->client,
1067 : &pkt,
1068 36180 : &state->client->bind_packet);
1069 36180 : TALLOC_FREE(subreq);
1070 36180 : if (!NT_STATUS_IS_OK(status)) {
1071 84 : DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1072 : nt_errstr(status));
1073 84 : tevent_req_error(req, EINVAL); /* TODO */
1074 84 : return;
1075 : }
1076 36096 : state->pkt = talloc_move(state, &pkt);
1077 :
1078 36096 : tevent_req_done(req);
1079 : }
1080 :
1081 36180 : static int rpc_host_bind_read_recv(
1082 : struct tevent_req *req,
1083 : TALLOC_CTX *mem_ctx,
1084 : int *sock,
1085 : struct rpc_host_client **client,
1086 : struct ncacn_packet **bind_pkt)
1087 : {
1088 36180 : struct rpc_host_bind_read_state *state = tevent_req_data(
1089 : req, struct rpc_host_bind_read_state);
1090 : int err;
1091 :
1092 36180 : if (tevent_req_is_unix_error(req, &err)) {
1093 84 : tevent_req_received(req);
1094 84 : return err;
1095 : }
1096 :
1097 36096 : *sock = state->sock;
1098 36096 : state->sock = -1;
1099 :
1100 36096 : *client = talloc_move(mem_ctx, &state->client);
1101 36096 : *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1102 36096 : tevent_req_received(req);
1103 36096 : return 0;
1104 : }
1105 :
1106 : /*
1107 : * Start the given rpcd_* binary.
1108 : */
1109 576 : static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1110 : {
1111 576 : struct rpc_work_process *worker = &server->workers[idx];
1112 576 : char **argv = NULL;
1113 576 : int ret = ENOMEM;
1114 :
1115 576 : argv = str_list_make_empty(server);
1116 576 : str_list_add_printf(
1117 : &argv, "%s", server->rpc_server_exe);
1118 576 : str_list_add_printf(
1119 : &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1120 576 : str_list_add_printf(
1121 : &argv, "--worker-group=%"PRIu32, server->server_index);
1122 576 : str_list_add_printf(
1123 : &argv, "--worker-index=%zu", idx);
1124 576 : str_list_add_printf(
1125 : &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1126 576 : if (!is_default_dyn_LOGFILEBASE()) {
1127 462 : str_list_add_printf(
1128 : &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1129 : }
1130 576 : if (argv == NULL) {
1131 0 : ret = ENOMEM;
1132 0 : goto fail;
1133 : }
1134 :
1135 576 : worker->pid = fork();
1136 1171 : if (worker->pid == -1) {
1137 0 : ret = errno;
1138 0 : goto fail;
1139 : }
1140 1171 : if (worker->pid == 0) {
1141 : /* Child. */
1142 595 : close(server->host->worker_stdin[1]);
1143 595 : ret = dup2(server->host->worker_stdin[0], 0);
1144 595 : if (ret != 0) {
1145 0 : exit(1);
1146 : }
1147 595 : execv(argv[0], argv);
1148 595 : _exit(1);
1149 : }
1150 :
1151 576 : DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1152 : server->rpc_server_exe,
1153 : idx,
1154 : (int)worker->pid);
1155 :
1156 576 : ret = 0;
1157 576 : fail:
1158 576 : TALLOC_FREE(argv);
1159 576 : return ret;
1160 : }
1161 :
1162 : /*
1163 : * Find an rpcd_* worker for an external client, respect server->max_workers
1164 : */
1165 5284 : static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1166 : {
1167 5284 : struct rpc_work_process *worker = NULL;
1168 5284 : struct rpc_work_process *perfect_worker = NULL;
1169 5284 : struct rpc_work_process *best_worker = NULL;
1170 5284 : size_t empty_slot = SIZE_MAX;
1171 : size_t i;
1172 :
1173 13195 : for (i=0; i<server->max_workers; i++) {
1174 12644 : worker = &server->workers[i];
1175 :
1176 12644 : if (worker->pid == -1) {
1177 994 : empty_slot = MIN(empty_slot, i);
1178 994 : continue;
1179 : }
1180 11650 : if (!worker->available) {
1181 0 : continue;
1182 : }
1183 11650 : if (worker->num_associations == 0) {
1184 : /*
1185 : * We have an idle worker...
1186 : */
1187 4733 : perfect_worker = worker;
1188 4733 : break;
1189 : }
1190 6917 : if (best_worker == NULL) {
1191 : /*
1192 : * It's busy, but the best so far...
1193 : */
1194 2398 : best_worker = worker;
1195 2398 : continue;
1196 : }
1197 4519 : if (worker->num_associations < best_worker->num_associations) {
1198 : /*
1199 : * It's also busy, but has less association groups
1200 : * (logical clients)
1201 : */
1202 240 : best_worker = worker;
1203 240 : continue;
1204 : }
1205 4279 : if (worker->num_associations > best_worker->num_associations) {
1206 : /*
1207 : * It's not better
1208 : */
1209 90 : continue;
1210 : }
1211 : /*
1212 : * Ok, with the same number of association groups
1213 : * we pick the one with the lowest number of connections
1214 : */
1215 4189 : if (worker->num_connections < best_worker->num_connections) {
1216 0 : best_worker = worker;
1217 0 : continue;
1218 : }
1219 : }
1220 :
1221 5284 : if (perfect_worker != NULL) {
1222 4733 : return perfect_worker;
1223 : }
1224 :
1225 551 : if (empty_slot < SIZE_MAX) {
1226 248 : int ret = rpc_host_exec_worker(server, empty_slot);
1227 248 : if (ret != 0) {
1228 0 : DBG_WARNING("Could not fork worker: %s\n",
1229 : strerror(ret));
1230 : }
1231 248 : return NULL;
1232 : }
1233 :
1234 303 : if (best_worker != NULL) {
1235 303 : return best_worker;
1236 : }
1237 :
1238 0 : return NULL;
1239 : }
1240 :
1241 : /*
1242 : * Find an rpcd_* worker for an internal connection, possibly go beyond
1243 : * server->max_workers
1244 : */
1245 31362 : static struct rpc_work_process *rpc_host_find_idle_worker(
1246 : struct rpc_server *server)
1247 : {
1248 31362 : struct rpc_work_process *worker = NULL, *tmp = NULL;
1249 31362 : size_t i, num_workers = talloc_array_length(server->workers);
1250 31362 : size_t empty_slot = SIZE_MAX;
1251 : int ret;
1252 :
1253 34871 : for (i=server->max_workers; i<num_workers; i++) {
1254 34543 : worker = &server->workers[i];
1255 :
1256 34543 : if (worker->pid == -1) {
1257 272 : empty_slot = MIN(empty_slot, i);
1258 272 : continue;
1259 : }
1260 34271 : if (!worker->available) {
1261 489 : continue;
1262 : }
1263 33782 : if (worker->num_associations == 0) {
1264 31034 : return &server->workers[i];
1265 : }
1266 : }
1267 :
1268 328 : if (empty_slot < SIZE_MAX) {
1269 82 : ret = rpc_host_exec_worker(server, empty_slot);
1270 82 : if (ret != 0) {
1271 0 : DBG_WARNING("Could not fork worker: %s\n",
1272 : strerror(ret));
1273 : }
1274 82 : return NULL;
1275 : }
1276 :
1277 : /*
1278 : * All workers are busy. We need to expand the number of
1279 : * workers because we were asked for an idle worker.
1280 : */
1281 246 : if (num_workers >= UINT16_MAX) {
1282 : /*
1283 : * The worker index would not fit into 16-bits
1284 : */
1285 0 : return NULL;
1286 : }
1287 246 : tmp = talloc_realloc(
1288 : server,
1289 : server->workers,
1290 : struct rpc_work_process,
1291 : num_workers+1);
1292 246 : if (tmp == NULL) {
1293 0 : return NULL;
1294 : }
1295 246 : server->workers = tmp;
1296 :
1297 246 : server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1298 :
1299 246 : ret = rpc_host_exec_worker(server, num_workers);
1300 246 : if (ret != 0) {
1301 0 : DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1302 : }
1303 :
1304 246 : return NULL;
1305 : }
1306 :
1307 : /*
1308 : * Find an rpcd_* process to talk to. Start a new one if necessary.
1309 : */
1310 72757 : static void rpc_host_distribute_clients(struct rpc_server *server)
1311 : {
1312 72757 : struct rpc_work_process *worker = NULL;
1313 72757 : struct rpc_host_pending_client *pending_client = NULL;
1314 : uint32_t assoc_group_id;
1315 : DATA_BLOB blob;
1316 : struct iovec iov;
1317 : enum ndr_err_code ndr_err;
1318 : NTSTATUS status;
1319 72757 : const char *client_type = NULL;
1320 :
1321 72757 : again:
1322 72757 : pending_client = server->pending_clients;
1323 72757 : if (pending_client == NULL) {
1324 36085 : DBG_DEBUG("No pending clients\n");
1325 36661 : return;
1326 : }
1327 :
1328 36672 : assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1329 :
1330 36672 : if (assoc_group_id != 0) {
1331 26 : size_t num_workers = talloc_array_length(server->workers);
1332 26 : uint16_t worker_index = assoc_group_id >> 16;
1333 :
1334 26 : client_type = "associated";
1335 :
1336 26 : if (worker_index >= num_workers) {
1337 0 : DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1338 : assoc_group_id);
1339 0 : goto done;
1340 : }
1341 26 : worker = &server->workers[worker_index];
1342 :
1343 26 : if ((worker->pid == -1) || !worker->available) {
1344 0 : DBG_DEBUG("Requested worker index %"PRIu16": "
1345 : "pid=%d, available=%d\n",
1346 : worker_index,
1347 : (int)worker->pid,
1348 : (int)worker->available);
1349 : /*
1350 : * Pick a random one for a proper bind nack
1351 : */
1352 0 : client_type = "associated+lost";
1353 0 : worker = rpc_host_find_worker(server);
1354 : }
1355 : } else {
1356 36646 : struct auth_session_info_transport *session_info =
1357 36646 : pending_client->client->npa_info8->session_info;
1358 36646 : uint32_t flags = 0;
1359 : bool found;
1360 :
1361 36646 : client_type = "new";
1362 :
1363 36646 : found = security_token_find_npa_flags(
1364 36646 : session_info->session_info->security_token,
1365 : &flags);
1366 :
1367 : /* fresh assoc group requested */
1368 36646 : if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1369 31362 : client_type = "new+exclusive";
1370 31362 : worker = rpc_host_find_idle_worker(server);
1371 : } else {
1372 5284 : client_type = "new";
1373 5284 : worker = rpc_host_find_worker(server);
1374 : }
1375 : }
1376 :
1377 36672 : if (worker == NULL) {
1378 576 : DBG_DEBUG("No worker found for %s client\n", client_type);
1379 576 : return;
1380 : }
1381 :
1382 36096 : DLIST_REMOVE(server->pending_clients, pending_client);
1383 :
1384 36096 : ndr_err = ndr_push_struct_blob(
1385 : &blob,
1386 : pending_client,
1387 36096 : pending_client->client,
1388 : (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1389 36096 : if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1390 0 : DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1391 : ndr_errstr(ndr_err));
1392 0 : goto done;
1393 : }
1394 :
1395 36096 : DBG_INFO("Sending %s client %s to %d with "
1396 : "%"PRIu32" associations and %"PRIu32" connections\n",
1397 : client_type,
1398 : server->rpc_server_exe,
1399 : worker->pid,
1400 : worker->num_associations,
1401 : worker->num_connections);
1402 :
1403 36096 : iov = (struct iovec) {
1404 36096 : .iov_base = blob.data, .iov_len = blob.length,
1405 : };
1406 :
1407 72192 : status = messaging_send_iov(
1408 36096 : server->host->msg_ctx,
1409 : pid_to_procid(worker->pid),
1410 : MSG_RPC_HOST_NEW_CLIENT,
1411 : &iov,
1412 : 1,
1413 36096 : &pending_client->sock,
1414 : 1);
1415 36096 : if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1416 0 : DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1417 : worker->pid);
1418 0 : DLIST_ADD(server->pending_clients, pending_client);
1419 0 : worker->available = false;
1420 0 : goto again;
1421 : }
1422 36096 : if (!NT_STATUS_IS_OK(status)) {
1423 0 : DBG_DEBUG("messaging_send_iov failed: %s\n",
1424 : nt_errstr(status));
1425 0 : goto done;
1426 : }
1427 36096 : if (assoc_group_id == 0) {
1428 36070 : worker->num_associations += 1;
1429 : }
1430 36096 : worker->num_connections += 1;
1431 36096 : TALLOC_FREE(worker->exit_timer);
1432 :
1433 36096 : TALLOC_FREE(server->host->np_helper_shutdown);
1434 :
1435 36021 : done:
1436 36096 : TALLOC_FREE(pending_client);
1437 : }
1438 :
1439 36096 : static int rpc_host_pending_client_destructor(
1440 : struct rpc_host_pending_client *p)
1441 : {
1442 36096 : TALLOC_FREE(p->hangup_wait);
1443 36096 : if (p->sock != -1) {
1444 36096 : close(p->sock);
1445 36096 : p->sock = -1;
1446 : }
1447 36096 : DLIST_REMOVE(p->server->pending_clients, p);
1448 36096 : return 0;
1449 : }
1450 :
1451 : /*
1452 : * Exception condition handler before rpcd_* worker
1453 : * is handling the socket. Either the client exited or
1454 : * sent unexpected data after the initial bind.
1455 : */
1456 0 : static void rpc_host_client_exited(struct tevent_req *subreq)
1457 : {
1458 0 : struct rpc_host_pending_client *pending = tevent_req_callback_data(
1459 : subreq, struct rpc_host_pending_client);
1460 : bool ok;
1461 : int err;
1462 :
1463 0 : ok = wait_for_read_recv(subreq, &err);
1464 :
1465 0 : TALLOC_FREE(subreq);
1466 0 : pending->hangup_wait = NULL;
1467 :
1468 0 : if (ok) {
1469 0 : DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1470 : } else {
1471 0 : DBG_DEBUG("client exited with %s\n", strerror(err));
1472 : }
1473 0 : TALLOC_FREE(pending);
1474 0 : }
1475 :
1476 : struct rpc_iface_binding_map {
1477 : struct ndr_syntax_id iface;
1478 : char *bindings;
1479 : };
1480 :
1481 1597 : static bool rpc_iface_binding_map_add_endpoint(
1482 : TALLOC_CTX *mem_ctx,
1483 : const struct rpc_host_endpoint *ep,
1484 : struct rpc_host_iface_name *iface_names,
1485 : struct rpc_iface_binding_map **pmaps)
1486 : {
1487 1597 : const struct ndr_syntax_id mgmt_iface = {
1488 : {0xafa8bd80,
1489 : 0x7d8a,
1490 : 0x11c9,
1491 : {0xbe,0xf4},
1492 : {0x08,0x00,0x2b,0x10,0x29,0x89}
1493 : },
1494 : 1.0};
1495 :
1496 1597 : struct rpc_iface_binding_map *maps = *pmaps;
1497 1597 : size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1498 1597 : char *binding_string = NULL;
1499 1597 : bool ok = false;
1500 :
1501 1597 : binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1502 1597 : if (binding_string == NULL) {
1503 0 : return false;
1504 : }
1505 :
1506 3462 : for (i=0; i<num_ifaces; i++) {
1507 1865 : const struct ndr_syntax_id *iface = &ep->interfaces[i];
1508 1865 : size_t j, num_maps = talloc_array_length(maps);
1509 1865 : struct rpc_iface_binding_map *map = NULL;
1510 1865 : char *p = NULL;
1511 :
1512 1865 : if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1513 : /*
1514 : * mgmt is offered everywhere, don't put it
1515 : * into epmdb.tdb.
1516 : */
1517 0 : continue;
1518 : }
1519 :
1520 4601 : for (j=0; j<num_maps; j++) {
1521 3251 : map = &maps[j];
1522 3251 : if (ndr_syntax_id_equal(&map->iface, iface)) {
1523 515 : break;
1524 : }
1525 : }
1526 :
1527 1865 : if (j == num_maps) {
1528 1350 : struct rpc_iface_binding_map *tmp = NULL;
1529 1350 : struct rpc_host_iface_name *iface_name = NULL;
1530 :
1531 1350 : iface_name = rpc_host_iface_names_find(
1532 : iface_names, iface);
1533 1350 : if (iface_name == NULL) {
1534 0 : goto fail;
1535 : }
1536 :
1537 1350 : tmp = talloc_realloc(
1538 : mem_ctx,
1539 : maps,
1540 : struct rpc_iface_binding_map,
1541 : num_maps+1);
1542 1350 : if (tmp == NULL) {
1543 0 : goto fail;
1544 : }
1545 1350 : maps = tmp;
1546 :
1547 1350 : map = &maps[num_maps];
1548 1350 : *map = (struct rpc_iface_binding_map) {
1549 1350 : .iface = *iface,
1550 1350 : .bindings = talloc_move(
1551 : maps, &iface_name->name),
1552 : };
1553 : }
1554 :
1555 1865 : p = strv_find(map->bindings, binding_string);
1556 1865 : if (p == NULL) {
1557 1865 : int ret = strv_add(
1558 : maps, &map->bindings, binding_string);
1559 1865 : if (ret != 0) {
1560 0 : goto fail;
1561 : }
1562 : }
1563 : }
1564 :
1565 1597 : ok = true;
1566 1597 : fail:
1567 1597 : *pmaps = maps;
1568 1597 : return ok;
1569 : }
1570 :
1571 728 : static bool rpc_iface_binding_map_add_endpoints(
1572 : TALLOC_CTX *mem_ctx,
1573 : struct rpc_host_endpoint **endpoints,
1574 : struct rpc_host_iface_name *iface_names,
1575 : struct rpc_iface_binding_map **pbinding_maps)
1576 : {
1577 728 : size_t i, num_endpoints = talloc_array_length(endpoints);
1578 :
1579 2325 : for (i=0; i<num_endpoints; i++) {
1580 1597 : bool ok = rpc_iface_binding_map_add_endpoint(
1581 1597 : mem_ctx, endpoints[i], iface_names, pbinding_maps);
1582 1597 : if (!ok) {
1583 0 : return false;
1584 : }
1585 : }
1586 728 : return true;
1587 : }
1588 :
1589 728 : static bool rpc_host_fill_epm_db(
1590 : struct tdb_wrap *db,
1591 : struct rpc_host_endpoint **endpoints,
1592 : struct rpc_host_iface_name *iface_names)
1593 : {
1594 728 : struct rpc_iface_binding_map *maps = NULL;
1595 : size_t i, num_maps;
1596 728 : bool ret = false;
1597 : bool ok;
1598 :
1599 728 : ok = rpc_iface_binding_map_add_endpoints(
1600 : talloc_tos(), endpoints, iface_names, &maps);
1601 728 : if (!ok) {
1602 0 : goto fail;
1603 : }
1604 :
1605 728 : num_maps = talloc_array_length(maps);
1606 :
1607 2078 : for (i=0; i<num_maps; i++) {
1608 1350 : struct rpc_iface_binding_map *map = &maps[i];
1609 : struct ndr_syntax_id_buf buf;
1610 1350 : char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1611 2700 : TDB_DATA value = {
1612 1350 : .dptr = (uint8_t *)map->bindings,
1613 1350 : .dsize = talloc_array_length(map->bindings),
1614 : };
1615 : int rc;
1616 :
1617 1350 : rc = tdb_store(
1618 : db->tdb, string_term_tdb_data(keystr), value, 0);
1619 1350 : if (rc == -1) {
1620 0 : DBG_DEBUG("tdb_store() failed: %s\n",
1621 : tdb_errorstr(db->tdb));
1622 0 : goto fail;
1623 : }
1624 : }
1625 :
1626 728 : ret = true;
1627 728 : fail:
1628 728 : TALLOC_FREE(maps);
1629 728 : return ret;
1630 : }
1631 :
1632 : struct rpc_server_setup_state {
1633 : struct rpc_server *server;
1634 : };
1635 :
1636 : static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1637 :
1638 : /*
1639 : * Async initialize state for all possible rpcd_* servers.
1640 : * Note this does not start them.
1641 : */
1642 728 : static struct tevent_req *rpc_server_setup_send(
1643 : TALLOC_CTX *mem_ctx,
1644 : struct tevent_context *ev,
1645 : struct rpc_host *host,
1646 : const char *rpc_server_exe)
1647 : {
1648 728 : struct tevent_req *req = NULL, *subreq = NULL;
1649 728 : struct rpc_server_setup_state *state = NULL;
1650 728 : struct rpc_server *server = NULL;
1651 :
1652 728 : req = tevent_req_create(
1653 : mem_ctx, &state, struct rpc_server_setup_state);
1654 728 : if (req == NULL) {
1655 0 : return NULL;
1656 : }
1657 728 : state->server = talloc_zero(state, struct rpc_server);
1658 728 : if (tevent_req_nomem(state->server, req)) {
1659 0 : return tevent_req_post(req, ev);
1660 : }
1661 :
1662 728 : server = state->server;
1663 :
1664 728 : *server = (struct rpc_server) {
1665 : .host = host,
1666 : .server_index = UINT32_MAX,
1667 728 : .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1668 : };
1669 728 : if (tevent_req_nomem(server->rpc_server_exe, req)) {
1670 0 : return tevent_req_post(req, ev);
1671 : }
1672 :
1673 728 : subreq = rpc_server_get_endpoints_send(
1674 : state,
1675 : ev,
1676 : rpc_server_exe,
1677 728 : host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1678 728 : if (tevent_req_nomem(subreq, req)) {
1679 0 : return tevent_req_post(req, ev);
1680 : }
1681 728 : tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1682 728 : return req;
1683 : }
1684 :
1685 728 : static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1686 : {
1687 728 : struct tevent_req *req = tevent_req_callback_data(
1688 : subreq, struct tevent_req);
1689 728 : struct rpc_server_setup_state *state = tevent_req_data(
1690 : req, struct rpc_server_setup_state);
1691 728 : struct rpc_server *server = state->server;
1692 : int ret;
1693 : size_t i, num_endpoints;
1694 : bool ok;
1695 :
1696 728 : ret = rpc_server_get_endpoints_recv(
1697 : subreq,
1698 : server,
1699 : &server->endpoints,
1700 : &server->iface_names,
1701 : &server->max_workers,
1702 : &server->idle_seconds);
1703 728 : TALLOC_FREE(subreq);
1704 728 : if (ret != 0) {
1705 0 : tevent_req_nterror(req, map_nt_error_from_unix(ret));
1706 0 : return;
1707 : }
1708 :
1709 728 : server->workers = talloc_array(
1710 : server, struct rpc_work_process, server->max_workers);
1711 728 : if (tevent_req_nomem(server->workers, req)) {
1712 0 : return;
1713 : }
1714 :
1715 3640 : for (i=0; i<server->max_workers; i++) {
1716 : /* mark as not yet created */
1717 2912 : server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1718 : }
1719 :
1720 728 : num_endpoints = talloc_array_length(server->endpoints);
1721 :
1722 2325 : for (i=0; i<num_endpoints; i++) {
1723 1597 : struct rpc_host_endpoint *e = server->endpoints[i];
1724 : NTSTATUS status;
1725 : size_t j;
1726 :
1727 1597 : e->server = server;
1728 :
1729 1597 : status = dcesrv_create_binding_sockets(
1730 : e->binding, e, &e->num_fds, &e->fds);
1731 1597 : if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1732 12 : continue;
1733 : }
1734 1585 : if (tevent_req_nterror(req, status)) {
1735 0 : DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1736 : nt_errstr(status));
1737 0 : return;
1738 : }
1739 :
1740 3230 : for (j=0; j<e->num_fds; j++) {
1741 1645 : ret = listen(e->fds[j], 256);
1742 1645 : if (ret == -1) {
1743 0 : tevent_req_nterror(
1744 : req, map_nt_error_from_unix(errno));
1745 0 : return;
1746 : }
1747 : }
1748 : }
1749 :
1750 728 : ok = rpc_host_fill_epm_db(
1751 728 : server->host->epmdb, server->endpoints, server->iface_names);
1752 728 : if (!ok) {
1753 0 : DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1754 : }
1755 :
1756 728 : tevent_req_done(req);
1757 : }
1758 :
1759 728 : static NTSTATUS rpc_server_setup_recv(
1760 : struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1761 : {
1762 728 : struct rpc_server_setup_state *state = tevent_req_data(
1763 : req, struct rpc_server_setup_state);
1764 : NTSTATUS status;
1765 :
1766 728 : if (tevent_req_is_nterror(req, &status)) {
1767 0 : tevent_req_received(req);
1768 0 : return status;
1769 : }
1770 :
1771 728 : *server = talloc_move(mem_ctx, &state->server);
1772 728 : tevent_req_received(req);
1773 728 : return NT_STATUS_OK;
1774 : }
1775 :
1776 : /*
1777 : * rpcd_* died. Called from SIGCHLD handler.
1778 : */
1779 666 : static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1780 : {
1781 666 : size_t i, num_servers = talloc_array_length(host->servers);
1782 666 : struct rpc_work_process *worker = NULL;
1783 666 : bool found_pid = false;
1784 666 : bool have_active_worker = false;
1785 :
1786 5994 : for (i=0; i<num_servers; i++) {
1787 5328 : struct rpc_server *server = host->servers[i];
1788 : size_t j, num_workers;
1789 :
1790 5328 : if (server == NULL) {
1791 : /* SIGCHLD for --list-interfaces run */
1792 720 : continue;
1793 : }
1794 :
1795 4608 : num_workers = talloc_array_length(server->workers);
1796 :
1797 27104 : for (j=0; j<num_workers; j++) {
1798 22496 : worker = &server->workers[j];
1799 22496 : if (worker->pid == pid) {
1800 524 : found_pid = true;
1801 524 : worker->pid = -1;
1802 524 : worker->available = false;
1803 : }
1804 :
1805 22496 : if (worker->pid != -1) {
1806 2918 : have_active_worker = true;
1807 : }
1808 : }
1809 : }
1810 :
1811 666 : if (!found_pid) {
1812 142 : DBG_WARNING("No worker with PID %d\n", (int)pid);
1813 142 : return;
1814 : }
1815 :
1816 524 : if (!have_active_worker && host->np_helper) {
1817 : /*
1818 : * We have nothing left to do as an np_helper.
1819 : * Terminate ourselves (samba-dcerpcd). We will
1820 : * be restarted on demand anyway.
1821 : */
1822 75 : DBG_DEBUG("Exiting idle np helper\n");
1823 75 : exit(0);
1824 : }
1825 : }
1826 :
1827 : /*
1828 : * rpcd_* died.
1829 : */
1830 1110 : static void rpc_host_sigchld(
1831 : struct tevent_context *ev,
1832 : struct tevent_signal *se,
1833 : int signum,
1834 : int count,
1835 : void *siginfo,
1836 : void *private_data)
1837 : {
1838 1110 : struct rpc_host *state = talloc_get_type_abort(
1839 : private_data, struct rpc_host);
1840 : pid_t pid;
1841 : int wstatus;
1842 :
1843 1701 : while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1844 666 : DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1845 666 : rpc_worker_exited(state, pid);
1846 : }
1847 1035 : }
1848 :
1849 : /*
1850 : * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1851 : */
1852 526 : static void rpc_host_exit_worker(
1853 : struct tevent_context *ev,
1854 : struct tevent_timer *te,
1855 : struct timeval current_time,
1856 : void *private_data)
1857 : {
1858 526 : struct rpc_server *server = talloc_get_type_abort(
1859 : private_data, struct rpc_server);
1860 526 : size_t i, num_workers = talloc_array_length(server->workers);
1861 :
1862 : /*
1863 : * Scan for the right worker. We don't have too many of those,
1864 : * and maintaining an index would be more data structure effort.
1865 : */
1866 :
1867 2874 : for (i=0; i<num_workers; i++) {
1868 2872 : struct rpc_work_process *w = &server->workers[i];
1869 : NTSTATUS status;
1870 :
1871 2872 : if (w->exit_timer != te) {
1872 2348 : continue;
1873 : }
1874 524 : w->exit_timer = NULL;
1875 :
1876 524 : SMB_ASSERT(w->num_associations == 0);
1877 :
1878 524 : status = messaging_send(
1879 524 : server->host->msg_ctx,
1880 : pid_to_procid(w->pid),
1881 : MSG_SHUTDOWN,
1882 : NULL);
1883 524 : if (!NT_STATUS_IS_OK(status)) {
1884 0 : DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1885 : nt_errstr(status));
1886 : }
1887 :
1888 524 : w->available = false;
1889 524 : break;
1890 : }
1891 526 : }
1892 :
1893 : /*
1894 : * rcpd_* worker replied with its status.
1895 : */
1896 36661 : static void rpc_host_child_status_recv(
1897 : struct messaging_context *msg,
1898 : void *private_data,
1899 : uint32_t msg_type,
1900 : struct server_id server_id,
1901 : DATA_BLOB *data)
1902 : {
1903 36661 : struct rpc_host *host = talloc_get_type_abort(
1904 : private_data, struct rpc_host);
1905 36661 : size_t num_servers = talloc_array_length(host->servers);
1906 36661 : struct rpc_server *server = NULL;
1907 : size_t num_workers;
1908 36661 : pid_t src_pid = procid_to_pid(&server_id);
1909 36661 : struct rpc_work_process *worker = NULL;
1910 : struct rpc_worker_status status_message;
1911 : enum ndr_err_code ndr_err;
1912 :
1913 36661 : ndr_err = ndr_pull_struct_blob_all_noalloc(
1914 : data,
1915 : &status_message,
1916 : (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1917 36661 : if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1918 : struct server_id_buf buf;
1919 0 : DBG_WARNING("Got invalid message from pid %s\n",
1920 : server_id_str_buf(server_id, &buf));
1921 0 : return;
1922 : }
1923 36661 : if (DEBUGLEVEL >= 10) {
1924 0 : NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1925 : }
1926 :
1927 36661 : if (status_message.server_index >= num_servers) {
1928 0 : DBG_WARNING("Got invalid server_index=%"PRIu32", "
1929 : "num_servers=%zu\n",
1930 : status_message.server_index,
1931 : num_servers);
1932 0 : return;
1933 : }
1934 :
1935 36661 : server = host->servers[status_message.server_index];
1936 :
1937 36661 : num_workers = talloc_array_length(server->workers);
1938 36661 : if (status_message.worker_index >= num_workers) {
1939 0 : DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1940 : "num_workers=%zu\n",
1941 : status_message.worker_index,
1942 : num_workers);
1943 0 : return;
1944 : }
1945 36661 : worker = &server->workers[status_message.worker_index];
1946 :
1947 36661 : if (src_pid != worker->pid) {
1948 0 : DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1949 : status_message.worker_index,
1950 : (int)src_pid,
1951 : worker->pid);
1952 0 : return;
1953 : }
1954 :
1955 36661 : worker->available = true;
1956 36661 : worker->num_associations = status_message.num_association_groups;
1957 36661 : worker->num_connections = status_message.num_connections;
1958 :
1959 36661 : if (worker->num_associations != 0) {
1960 321 : TALLOC_FREE(worker->exit_timer);
1961 : } else {
1962 36340 : worker->exit_timer = tevent_add_timer(
1963 : messaging_tevent_context(msg),
1964 : server->workers,
1965 : tevent_timeval_current_ofs(server->idle_seconds, 0),
1966 : rpc_host_exit_worker,
1967 : server);
1968 : /* No NULL check, it's not fatal if this does not work */
1969 : }
1970 :
1971 36661 : rpc_host_distribute_clients(server);
1972 : }
1973 :
1974 : /*
1975 : * samba-dcerpcd has been asked to shutdown.
1976 : * Mark the initial tevent_req as done so we
1977 : * exit the event loop.
1978 : */
1979 0 : static void rpc_host_msg_shutdown(
1980 : struct messaging_context *msg,
1981 : void *private_data,
1982 : uint32_t msg_type,
1983 : struct server_id server_id,
1984 : DATA_BLOB *data)
1985 : {
1986 0 : struct tevent_req *req = talloc_get_type_abort(
1987 : private_data, struct tevent_req);
1988 0 : tevent_req_done(req);
1989 0 : }
1990 :
1991 : /*
1992 : * Only match directory entries starting in rpcd_
1993 : */
1994 3350 : static int rpcd_filter(const struct dirent *d)
1995 : {
1996 3350 : int match = fnmatch("rpcd_*", d->d_name, 0);
1997 3350 : return (match == 0) ? 1 : 0;
1998 : }
1999 :
2000 : /*
2001 : * Scan the given libexecdir for rpcd_* services
2002 : * and return them as a strv list.
2003 : */
2004 12 : static int rpc_host_list_servers(
2005 : const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
2006 : {
2007 12 : char *servers = NULL;
2008 12 : struct dirent **namelist = NULL;
2009 : int i, num_servers;
2010 12 : int ret = ENOMEM;
2011 :
2012 12 : num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
2013 12 : if (num_servers == -1) {
2014 0 : DBG_DEBUG("scandir failed: %s\n", strerror(errno));
2015 0 : return errno;
2016 : }
2017 :
2018 108 : for (i=0; i<num_servers; i++) {
2019 96 : char *exe = talloc_asprintf(
2020 96 : mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
2021 96 : if (exe == NULL) {
2022 0 : goto fail;
2023 : }
2024 :
2025 96 : ret = strv_add(mem_ctx, &servers, exe);
2026 96 : TALLOC_FREE(exe);
2027 96 : if (ret != 0) {
2028 0 : goto fail;
2029 : }
2030 : }
2031 12 : fail:
2032 108 : for (i=0; i<num_servers; i++) {
2033 96 : SAFE_FREE(namelist[i]);
2034 : }
2035 12 : SAFE_FREE(namelist);
2036 :
2037 12 : if (ret != 0) {
2038 0 : TALLOC_FREE(servers);
2039 0 : return ret;
2040 : }
2041 12 : *pservers = servers;
2042 12 : return 0;
2043 : }
2044 :
2045 : struct rpc_host_endpoint_accept_state {
2046 : struct tevent_context *ev;
2047 : struct rpc_host_endpoint *endpoint;
2048 : };
2049 :
2050 : static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2051 : static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2052 :
2053 : /*
2054 : * Asynchronously wait for a DCERPC connection from a client.
2055 : */
2056 1597 : static struct tevent_req *rpc_host_endpoint_accept_send(
2057 : TALLOC_CTX *mem_ctx,
2058 : struct tevent_context *ev,
2059 : struct rpc_host_endpoint *endpoint)
2060 : {
2061 1597 : struct tevent_req *req = NULL;
2062 1597 : struct rpc_host_endpoint_accept_state *state = NULL;
2063 : size_t i;
2064 :
2065 1597 : req = tevent_req_create(
2066 : mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2067 1597 : if (req == NULL) {
2068 0 : return NULL;
2069 : }
2070 1597 : state->ev = ev;
2071 1597 : state->endpoint = endpoint;
2072 :
2073 3242 : for (i=0; i<endpoint->num_fds; i++) {
2074 1645 : struct tevent_req *subreq = NULL;
2075 :
2076 1645 : subreq = accept_send(state, ev, endpoint->fds[i]);
2077 1645 : if (tevent_req_nomem(subreq, req)) {
2078 0 : return tevent_req_post(req, ev);
2079 : }
2080 1645 : tevent_req_set_callback(
2081 : subreq, rpc_host_endpoint_accept_accepted, req);
2082 : }
2083 :
2084 1597 : return req;
2085 : }
2086 :
2087 : /*
2088 : * Accept a DCERPC connection from a client.
2089 : */
2090 36180 : static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2091 : {
2092 36180 : struct tevent_req *req = tevent_req_callback_data(
2093 : subreq, struct tevent_req);
2094 36180 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2095 : req, struct rpc_host_endpoint_accept_state);
2096 36180 : struct rpc_host_endpoint *endpoint = state->endpoint;
2097 : int sock, listen_sock, err;
2098 : struct samba_sockaddr peer_addr;
2099 :
2100 36180 : sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2101 36180 : TALLOC_FREE(subreq);
2102 36180 : if (sock == -1) {
2103 : /* What to do here? Just ignore the error and retry? */
2104 0 : DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2105 0 : tevent_req_error(req, err);
2106 0 : return;
2107 : }
2108 :
2109 36180 : subreq = accept_send(state, state->ev, listen_sock);
2110 36180 : if (tevent_req_nomem(subreq, req)) {
2111 0 : close(sock);
2112 0 : sock = -1;
2113 0 : return;
2114 : }
2115 36180 : tevent_req_set_callback(
2116 : subreq, rpc_host_endpoint_accept_accepted, req);
2117 :
2118 36180 : subreq = rpc_host_bind_read_send(
2119 : state,
2120 : state->ev,
2121 36180 : dcerpc_binding_get_transport(endpoint->binding),
2122 : &sock,
2123 : &peer_addr);
2124 36180 : if (tevent_req_nomem(subreq, req)) {
2125 0 : return;
2126 : }
2127 36180 : tevent_req_set_callback(
2128 : subreq, rpc_host_endpoint_accept_got_bind, req);
2129 : }
2130 :
2131 : /*
2132 : * Client sent us a DCERPC bind packet.
2133 : */
2134 36180 : static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2135 : {
2136 36180 : struct tevent_req *req = tevent_req_callback_data(
2137 : subreq, struct tevent_req);
2138 36180 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2139 : req, struct rpc_host_endpoint_accept_state);
2140 36180 : struct rpc_host_endpoint *endpoint = state->endpoint;
2141 36180 : struct rpc_server *server = endpoint->server;
2142 36180 : struct rpc_host_pending_client *pending = NULL;
2143 36180 : struct rpc_host_client *client = NULL;
2144 36180 : struct ncacn_packet *bind_pkt = NULL;
2145 : int ret;
2146 36180 : int sock=-1;
2147 :
2148 36180 : ret = rpc_host_bind_read_recv(
2149 : subreq, state, &sock, &client, &bind_pkt);
2150 36180 : TALLOC_FREE(subreq);
2151 36180 : if (ret != 0) {
2152 84 : DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2153 : strerror(ret));
2154 84 : goto fail;
2155 : }
2156 :
2157 36096 : client->binding = dcerpc_binding_string(client, endpoint->binding);
2158 36096 : if (client->binding == NULL) {
2159 0 : DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2160 0 : goto fail;
2161 : }
2162 :
2163 36096 : pending = talloc_zero(server, struct rpc_host_pending_client);
2164 36096 : if (pending == NULL) {
2165 0 : DBG_WARNING("talloc failed, dropping client\n");
2166 0 : goto fail;
2167 : }
2168 36096 : pending->server = server;
2169 36096 : pending->sock = sock;
2170 36096 : pending->bind_pkt = talloc_move(pending, &bind_pkt);
2171 36096 : pending->client = talloc_move(pending, &client);
2172 36096 : talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2173 36096 : sock = -1;
2174 :
2175 36096 : pending->hangup_wait = wait_for_read_send(
2176 : pending, state->ev, pending->sock, true);
2177 36096 : if (pending->hangup_wait == NULL) {
2178 0 : DBG_WARNING("wait_for_read_send failed, dropping client\n");
2179 0 : TALLOC_FREE(pending);
2180 36096 : return;
2181 : }
2182 36096 : tevent_req_set_callback(
2183 : pending->hangup_wait, rpc_host_client_exited, pending);
2184 :
2185 36096 : DLIST_ADD_END(server->pending_clients, pending);
2186 36096 : rpc_host_distribute_clients(server);
2187 36096 : return;
2188 :
2189 84 : fail:
2190 84 : TALLOC_FREE(client);
2191 84 : if (sock != -1) {
2192 0 : close(sock);
2193 : }
2194 : }
2195 :
2196 0 : static int rpc_host_endpoint_accept_recv(
2197 : struct tevent_req *req, struct rpc_host_endpoint **ep)
2198 : {
2199 0 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2200 : req, struct rpc_host_endpoint_accept_state);
2201 :
2202 0 : *ep = state->endpoint;
2203 :
2204 0 : return tevent_req_simple_recv_unix(req);
2205 : }
2206 :
2207 : /*
2208 : * Full state for samba-dcerpcd. Everything else
2209 : * is hung off this.
2210 : */
2211 : struct rpc_host_state {
2212 : struct tevent_context *ev;
2213 : struct rpc_host *host;
2214 :
2215 : bool is_ready;
2216 : const char *daemon_ready_progname;
2217 : struct tevent_immediate *ready_signal_immediate;
2218 : int *ready_signal_fds;
2219 :
2220 : size_t num_servers;
2221 : size_t num_prepared;
2222 : };
2223 :
2224 : /*
2225 : * Tell whoever invoked samba-dcerpcd we're ready to
2226 : * serve.
2227 : */
2228 156 : static void rpc_host_report_readiness(
2229 : struct tevent_context *ev,
2230 : struct tevent_immediate *im,
2231 : void *private_data)
2232 : {
2233 156 : struct rpc_host_state *state = talloc_get_type_abort(
2234 : private_data, struct rpc_host_state);
2235 156 : size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2236 :
2237 156 : if (!state->is_ready) {
2238 0 : DBG_DEBUG("Not yet ready\n");
2239 0 : return;
2240 : }
2241 :
2242 300 : for (i=0; i<num_fds; i++) {
2243 144 : uint8_t byte = 0;
2244 : ssize_t nwritten;
2245 :
2246 : do {
2247 144 : nwritten = write(
2248 144 : state->ready_signal_fds[i],
2249 : (void *)&byte,
2250 : sizeof(byte));
2251 144 : } while ((nwritten == -1) && (errno == EINTR));
2252 :
2253 144 : close(state->ready_signal_fds[i]);
2254 : }
2255 :
2256 156 : TALLOC_FREE(state->ready_signal_fds);
2257 : }
2258 :
2259 : /*
2260 : * Respond to a "are you ready" message.
2261 : */
2262 402 : static bool rpc_host_ready_signal_filter(
2263 : struct messaging_rec *rec, void *private_data)
2264 : {
2265 402 : struct rpc_host_state *state = talloc_get_type_abort(
2266 : private_data, struct rpc_host_state);
2267 402 : size_t num_fds = talloc_array_length(state->ready_signal_fds);
2268 402 : int *tmp = NULL;
2269 :
2270 402 : if (rec->msg_type != MSG_DAEMON_READY_FD) {
2271 337 : return false;
2272 : }
2273 65 : if (rec->num_fds != 1) {
2274 0 : DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2275 0 : return false;
2276 : }
2277 :
2278 65 : if (num_fds + 1 < num_fds) {
2279 0 : return false;
2280 : }
2281 65 : tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2282 65 : if (tmp == NULL) {
2283 0 : return false;
2284 : }
2285 65 : state->ready_signal_fds = tmp;
2286 :
2287 65 : state->ready_signal_fds[num_fds] = rec->fds[0];
2288 65 : rec->fds[0] = -1;
2289 :
2290 65 : tevent_schedule_immediate(
2291 : state->ready_signal_immediate,
2292 : state->ev,
2293 : rpc_host_report_readiness,
2294 : state);
2295 :
2296 65 : return false;
2297 : }
2298 :
2299 : /*
2300 : * Respond to a "what is your status" message.
2301 : */
2302 402 : static bool rpc_host_dump_status_filter(
2303 : struct messaging_rec *rec, void *private_data)
2304 : {
2305 402 : struct rpc_host_state *state = talloc_get_type_abort(
2306 : private_data, struct rpc_host_state);
2307 402 : struct rpc_host *host = state->host;
2308 402 : struct rpc_server **servers = host->servers;
2309 402 : size_t i, num_servers = talloc_array_length(servers);
2310 402 : FILE *f = NULL;
2311 : int fd;
2312 :
2313 402 : if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2314 402 : return false;
2315 : }
2316 0 : if (rec->num_fds != 1) {
2317 0 : DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2318 0 : return false;
2319 : }
2320 :
2321 0 : fd = dup(rec->fds[0]);
2322 0 : if (fd == -1) {
2323 0 : DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2324 : rec->fds[0],
2325 : strerror(errno));
2326 0 : return false;
2327 : }
2328 :
2329 0 : f = fdopen(fd, "w");
2330 0 : if (f == NULL) {
2331 0 : DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2332 0 : close(fd);
2333 0 : return false;
2334 : }
2335 :
2336 0 : for (i=0; i<num_servers; i++) {
2337 0 : struct rpc_server *server = servers[i];
2338 0 : size_t j, num_workers = talloc_array_length(server->workers);
2339 0 : size_t active_workers = 0;
2340 :
2341 0 : for (j=0; j<num_workers; j++) {
2342 0 : if (server->workers[j].pid != -1) {
2343 0 : active_workers += 1;
2344 : }
2345 : }
2346 :
2347 0 : fprintf(f,
2348 : "%s: active_workers=%zu\n",
2349 : server->rpc_server_exe,
2350 : active_workers);
2351 :
2352 0 : for (j=0; j<num_workers; j++) {
2353 0 : struct rpc_work_process *w = &server->workers[j];
2354 :
2355 0 : if (w->pid == (pid_t)-1) {
2356 0 : continue;
2357 : }
2358 :
2359 0 : fprintf(f,
2360 : " worker[%zu]: pid=%d, num_associations=%"PRIu32", num_connections=%"PRIu32"\n",
2361 : j,
2362 0 : (int)w->pid,
2363 : w->num_associations,
2364 : w->num_connections);
2365 : }
2366 : }
2367 :
2368 0 : fclose(f);
2369 :
2370 0 : return false;
2371 : }
2372 :
2373 : static void rpc_host_server_setup_done(struct tevent_req *subreq);
2374 : static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2375 :
2376 : /*
2377 : * Async startup for samba-dcerpcd.
2378 : */
2379 91 : static struct tevent_req *rpc_host_send(
2380 : TALLOC_CTX *mem_ctx,
2381 : struct tevent_context *ev,
2382 : struct messaging_context *msg_ctx,
2383 : char *servers,
2384 : int ready_signal_fd,
2385 : const char *daemon_ready_progname,
2386 : bool is_np_helper)
2387 : {
2388 91 : struct tevent_req *req = NULL, *subreq = NULL;
2389 91 : struct rpc_host_state *state = NULL;
2390 91 : struct rpc_host *host = NULL;
2391 91 : struct tevent_signal *se = NULL;
2392 91 : char *epmdb_path = NULL;
2393 91 : char *exe = NULL;
2394 91 : size_t i, num_servers = strv_count(servers);
2395 : NTSTATUS status;
2396 : int ret;
2397 :
2398 91 : req = tevent_req_create(req, &state, struct rpc_host_state);
2399 91 : if (req == NULL) {
2400 0 : return NULL;
2401 : }
2402 91 : state->ev = ev;
2403 91 : state->daemon_ready_progname = daemon_ready_progname;
2404 :
2405 91 : state->ready_signal_immediate = tevent_create_immediate(state);
2406 91 : if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2407 0 : return tevent_req_post(req, ev);
2408 : }
2409 :
2410 91 : if (ready_signal_fd != -1) {
2411 79 : state->ready_signal_fds = talloc_array(state, int, 1);
2412 79 : if (tevent_req_nomem(state->ready_signal_fds, req)) {
2413 0 : return tevent_req_post(req, ev);
2414 : }
2415 79 : state->ready_signal_fds[0] = ready_signal_fd;
2416 : }
2417 :
2418 91 : state->host = talloc_zero(state, struct rpc_host);
2419 91 : if (tevent_req_nomem(state->host, req)) {
2420 0 : return tevent_req_post(req, ev);
2421 : }
2422 91 : host = state->host;
2423 :
2424 91 : host->msg_ctx = msg_ctx;
2425 91 : host->np_helper = is_np_helper;
2426 :
2427 91 : ret = pipe(host->worker_stdin);
2428 91 : if (ret == -1) {
2429 0 : tevent_req_nterror(req, map_nt_error_from_unix(errno));
2430 0 : return tevent_req_post(req, ev);
2431 : }
2432 :
2433 91 : host->servers = talloc_zero_array(
2434 : host, struct rpc_server *, num_servers);
2435 91 : if (tevent_req_nomem(host->servers, req)) {
2436 0 : return tevent_req_post(req, ev);
2437 : }
2438 :
2439 91 : se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2440 91 : if (tevent_req_nomem(se, req)) {
2441 0 : return tevent_req_post(req, ev);
2442 : }
2443 91 : BlockSignals(false, SIGCHLD);
2444 :
2445 91 : status = messaging_register(
2446 : msg_ctx,
2447 : host,
2448 : MSG_RPC_WORKER_STATUS,
2449 : rpc_host_child_status_recv);
2450 91 : if (tevent_req_nterror(req, status)) {
2451 0 : return tevent_req_post(req, ev);
2452 : }
2453 :
2454 91 : status = messaging_register(
2455 : msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2456 91 : if (tevent_req_nterror(req, status)) {
2457 0 : return tevent_req_post(req, ev);
2458 : }
2459 :
2460 91 : subreq = messaging_filtered_read_send(
2461 : state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2462 91 : if (tevent_req_nomem(subreq, req)) {
2463 0 : return tevent_req_post(req, ev);
2464 : }
2465 :
2466 91 : subreq = messaging_filtered_read_send(
2467 : state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2468 91 : if (tevent_req_nomem(subreq, req)) {
2469 0 : return tevent_req_post(req, ev);
2470 : }
2471 :
2472 91 : epmdb_path = lock_path(state, "epmdb.tdb");
2473 91 : if (tevent_req_nomem(epmdb_path, req)) {
2474 0 : return tevent_req_post(req, ev);
2475 : }
2476 :
2477 91 : host->epmdb = tdb_wrap_open(
2478 : host,
2479 : epmdb_path,
2480 : 0,
2481 : TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2482 : O_RDWR|O_CREAT,
2483 : 0644);
2484 91 : if (host->epmdb == NULL) {
2485 0 : DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2486 : epmdb_path,
2487 : strerror(errno));
2488 0 : tevent_req_nterror(req, map_nt_error_from_unix(errno));
2489 0 : return tevent_req_post(req, ev);
2490 : }
2491 91 : TALLOC_FREE(epmdb_path);
2492 :
2493 91 : for (exe = strv_next(servers, exe), i = 0;
2494 819 : exe != NULL;
2495 728 : exe = strv_next(servers, exe), i++) {
2496 :
2497 728 : DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2498 :
2499 728 : subreq = rpc_server_setup_send(
2500 : state,
2501 : ev,
2502 : host,
2503 : exe);
2504 728 : if (tevent_req_nomem(subreq, req)) {
2505 0 : return tevent_req_post(req, ev);
2506 : }
2507 728 : tevent_req_set_callback(
2508 : subreq, rpc_host_server_setup_done, req);
2509 : }
2510 :
2511 91 : return req;
2512 : }
2513 :
2514 : /*
2515 : * Timer function called after we were initialized but no one
2516 : * connected. Shutdown.
2517 : */
2518 4 : static void rpc_host_shutdown(
2519 : struct tevent_context *ev,
2520 : struct tevent_timer *te,
2521 : struct timeval current_time,
2522 : void *private_data)
2523 : {
2524 4 : struct tevent_req *req = talloc_get_type_abort(
2525 : private_data, struct tevent_req);
2526 4 : DBG_DEBUG("Nobody connected -- shutting down\n");
2527 4 : tevent_req_done(req);
2528 4 : }
2529 :
2530 728 : static void rpc_host_server_setup_done(struct tevent_req *subreq)
2531 : {
2532 728 : struct tevent_req *req = tevent_req_callback_data(
2533 : subreq, struct tevent_req);
2534 728 : struct rpc_host_state *state = tevent_req_data(
2535 : req, struct rpc_host_state);
2536 728 : struct rpc_server *server = NULL;
2537 728 : struct rpc_host *host = state->host;
2538 728 : size_t i, num_servers = talloc_array_length(host->servers);
2539 : NTSTATUS status;
2540 :
2541 728 : status = rpc_server_setup_recv(subreq, host, &server);
2542 728 : TALLOC_FREE(subreq);
2543 728 : if (!NT_STATUS_IS_OK(status)) {
2544 0 : DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2545 : nt_errstr(status));
2546 0 : host->servers = talloc_realloc(
2547 : host,
2548 : host->servers,
2549 : struct rpc_server *,
2550 : num_servers-1);
2551 637 : return;
2552 : }
2553 :
2554 728 : server->server_index = state->num_prepared;
2555 728 : host->servers[state->num_prepared] = server;
2556 :
2557 728 : state->num_prepared += 1;
2558 :
2559 728 : if (state->num_prepared < num_servers) {
2560 637 : return;
2561 : }
2562 :
2563 819 : for (i=0; i<num_servers; i++) {
2564 : size_t j, num_endpoints;
2565 :
2566 728 : server = host->servers[i];
2567 728 : num_endpoints = talloc_array_length(server->endpoints);
2568 :
2569 2325 : for (j=0; j<num_endpoints; j++) {
2570 1597 : subreq = rpc_host_endpoint_accept_send(
2571 1597 : state, state->ev, server->endpoints[j]);
2572 1597 : if (tevent_req_nomem(subreq, req)) {
2573 0 : return;
2574 : }
2575 1597 : tevent_req_set_callback(
2576 : subreq, rpc_host_endpoint_failed, req);
2577 : }
2578 : }
2579 :
2580 91 : state->is_ready = true;
2581 :
2582 91 : if (state->daemon_ready_progname != NULL) {
2583 12 : daemon_ready(state->daemon_ready_progname);
2584 : }
2585 :
2586 91 : if (host->np_helper) {
2587 : /*
2588 : * If we're started as an np helper, and no one talks to
2589 : * us within 10 seconds, just shut ourselves down.
2590 : */
2591 79 : host->np_helper_shutdown = tevent_add_timer(
2592 : state->ev,
2593 : state,
2594 : timeval_current_ofs(10, 0),
2595 : rpc_host_shutdown,
2596 : req);
2597 79 : if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2598 0 : return;
2599 : }
2600 : }
2601 :
2602 91 : tevent_schedule_immediate(
2603 : state->ready_signal_immediate,
2604 : state->ev,
2605 : rpc_host_report_readiness,
2606 : state);
2607 : }
2608 :
2609 : /*
2610 : * Log accept fail on an endpoint.
2611 : */
2612 0 : static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2613 : {
2614 0 : struct tevent_req *req = tevent_req_callback_data(
2615 : subreq, struct tevent_req);
2616 0 : struct rpc_host_state *state = tevent_req_data(
2617 : req, struct rpc_host_state);
2618 0 : struct rpc_host_endpoint *endpoint = NULL;
2619 0 : char *binding_string = NULL;
2620 : int ret;
2621 :
2622 0 : ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2623 0 : TALLOC_FREE(subreq);
2624 :
2625 0 : binding_string = dcerpc_binding_string(state, endpoint->binding);
2626 0 : DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2627 : binding_string,
2628 : strerror(ret));
2629 0 : TALLOC_FREE(binding_string);
2630 0 : }
2631 :
2632 16 : static NTSTATUS rpc_host_recv(struct tevent_req *req)
2633 : {
2634 16 : return tevent_req_simple_recv_ntstatus(req);
2635 : }
2636 :
2637 158 : static int rpc_host_pidfile_create(
2638 : struct messaging_context *msg_ctx,
2639 : const char *progname,
2640 : int ready_signal_fd)
2641 158 : {
2642 158 : const char *piddir = lp_pid_directory();
2643 158 : size_t len = strlen(piddir) + strlen(progname) + 6;
2644 158 : char pidFile[len];
2645 : pid_t existing_pid;
2646 : int fd, ret;
2647 :
2648 158 : snprintf(pidFile,
2649 : sizeof(pidFile),
2650 : "%s/%s.pid",
2651 : piddir, progname);
2652 :
2653 158 : ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2654 158 : if (ret == 0) {
2655 : /* leak fd */
2656 91 : return 0;
2657 : }
2658 :
2659 67 : if (ret != EAGAIN) {
2660 0 : DBG_DEBUG("pidfile_path_create() failed: %s\n",
2661 : strerror(ret));
2662 0 : return ret;
2663 : }
2664 :
2665 67 : DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2666 :
2667 67 : if (ready_signal_fd != -1) {
2668 67 : NTSTATUS status = messaging_send_iov(
2669 : msg_ctx,
2670 : pid_to_procid(existing_pid),
2671 : MSG_DAEMON_READY_FD,
2672 : NULL,
2673 : 0,
2674 : &ready_signal_fd,
2675 : 1);
2676 67 : if (!NT_STATUS_IS_OK(status)) {
2677 0 : DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2678 : nt_errstr(status));
2679 : }
2680 : }
2681 :
2682 67 : return EAGAIN;
2683 : }
2684 :
2685 12 : static void samba_dcerpcd_stdin_handler(
2686 : struct tevent_context *ev,
2687 : struct tevent_fd *fde,
2688 : uint16_t flags,
2689 : void *private_data)
2690 : {
2691 12 : struct tevent_req *req = talloc_get_type_abort(
2692 : private_data, struct tevent_req);
2693 : char c;
2694 :
2695 12 : if (read(0, &c, 1) != 1) {
2696 : /* we have reached EOF on stdin, which means the
2697 : parent has exited. Shutdown the server */
2698 12 : tevent_req_done(req);
2699 : }
2700 12 : }
2701 :
2702 : /*
2703 : * samba-dcerpcd microservice startup !
2704 : */
2705 12 : int main(int argc, const char *argv[])
2706 : {
2707 : const struct loadparm_substitution *lp_sub =
2708 12 : loadparm_s3_global_substitution();
2709 12 : const char *progname = getprogname();
2710 12 : TALLOC_CTX *frame = NULL;
2711 12 : struct tevent_context *ev_ctx = NULL;
2712 12 : struct messaging_context *msg_ctx = NULL;
2713 12 : struct tevent_req *req = NULL;
2714 12 : char *servers = NULL;
2715 12 : const char *arg = NULL;
2716 : size_t num_servers;
2717 : poptContext pc;
2718 : int ret, err;
2719 : NTSTATUS status;
2720 : bool log_stdout;
2721 : bool ok;
2722 :
2723 12 : int libexec_rpcds = 0;
2724 12 : int np_helper = 0;
2725 12 : int ready_signal_fd = -1;
2726 :
2727 12 : struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2728 48 : struct poptOption long_options[] = {
2729 : POPT_AUTOHELP
2730 : {
2731 : .longName = "libexec-rpcds",
2732 : .argInfo = POPT_ARG_NONE,
2733 : .arg = &libexec_rpcds,
2734 : .descrip = "Use all rpcds in libexec",
2735 : },
2736 : {
2737 : .longName = "ready-signal-fd",
2738 : .argInfo = POPT_ARG_INT,
2739 : .arg = &ready_signal_fd,
2740 : .descrip = "fd to close when initialized",
2741 : },
2742 : {
2743 : .longName = "np-helper",
2744 : .argInfo = POPT_ARG_NONE,
2745 : .arg = &np_helper,
2746 : .descrip = "Internal named pipe server",
2747 : },
2748 12 : POPT_COMMON_SAMBA
2749 12 : POPT_COMMON_DAEMON
2750 12 : POPT_COMMON_VERSION
2751 : POPT_TABLEEND
2752 : };
2753 :
2754 : {
2755 12 : const char *fd_params[] = { "ready-signal-fd", };
2756 :
2757 12 : closefrom_except_fd_params(
2758 : 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2759 : }
2760 :
2761 12 : talloc_enable_null_tracking();
2762 12 : frame = talloc_stackframe();
2763 12 : umask(0);
2764 12 : sec_init();
2765 12 : smb_init_locale();
2766 :
2767 12 : ok = samba_cmdline_init(frame,
2768 : SAMBA_CMDLINE_CONFIG_SERVER,
2769 : true /* require_smbconf */);
2770 12 : if (!ok) {
2771 0 : DBG_ERR("Failed to init cmdline parser!\n");
2772 0 : TALLOC_FREE(frame);
2773 0 : exit(ENOMEM);
2774 : }
2775 :
2776 12 : pc = samba_popt_get_context(getprogname(),
2777 : argc,
2778 : argv,
2779 : long_options,
2780 : 0);
2781 12 : if (pc == NULL) {
2782 0 : DBG_ERR("Failed to setup popt context!\n");
2783 0 : TALLOC_FREE(frame);
2784 0 : exit(1);
2785 : }
2786 :
2787 12 : poptSetOtherOptionHelp(
2788 : pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2789 :
2790 12 : ret = poptGetNextOpt(pc);
2791 :
2792 12 : if (ret != -1) {
2793 0 : if (ret >= 0) {
2794 0 : fprintf(stderr,
2795 : "\nGot unexpected option %d\n",
2796 : ret);
2797 0 : } else if (ret == POPT_ERROR_BADOPT) {
2798 0 : fprintf(stderr,
2799 : "\nInvalid option %s: %s\n\n",
2800 : poptBadOption(pc, 0),
2801 : poptStrerror(ret));
2802 : } else {
2803 0 : fprintf(stderr,
2804 : "\npoptGetNextOpt returned %s\n",
2805 : poptStrerror(ret));
2806 : }
2807 :
2808 0 : poptFreeContext(pc);
2809 0 : TALLOC_FREE(frame);
2810 0 : exit(1);
2811 : }
2812 :
2813 12 : while ((arg = poptGetArg(pc)) != NULL) {
2814 0 : ret = strv_add(frame, &servers, arg);
2815 0 : if (ret != 0) {
2816 0 : DBG_ERR("strv_add() failed\n");
2817 0 : poptFreeContext(pc);
2818 0 : TALLOC_FREE(frame);
2819 0 : exit(1);
2820 : }
2821 : }
2822 :
2823 12 : log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2824 12 : if (log_stdout) {
2825 12 : setup_logging(progname, DEBUG_STDOUT);
2826 : } else {
2827 0 : setup_logging(progname, DEBUG_FILE);
2828 : }
2829 :
2830 : /*
2831 : * If "rpc start on demand helpers = true" in smb.conf we must
2832 : * not start as standalone, only on demand from
2833 : * local_np_connect() functions. Log an error message telling
2834 : * the admin how to fix and then exit.
2835 : */
2836 12 : if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2837 0 : DBG_ERR("Cannot start in standalone mode if smb.conf "
2838 : "[global] setting "
2839 : "\"rpc start on demand helpers = true\" - "
2840 : "exiting\n");
2841 0 : TALLOC_FREE(frame);
2842 0 : exit(1);
2843 : }
2844 :
2845 12 : if (libexec_rpcds != 0) {
2846 12 : ret = rpc_host_list_servers(
2847 : dyn_SAMBA_LIBEXECDIR, frame, &servers);
2848 12 : if (ret != 0) {
2849 0 : DBG_ERR("Could not list libexec: %s\n",
2850 : strerror(ret));
2851 0 : poptFreeContext(pc);
2852 0 : TALLOC_FREE(frame);
2853 0 : exit(1);
2854 : }
2855 : }
2856 :
2857 12 : num_servers = strv_count(servers);
2858 12 : if (num_servers == 0) {
2859 0 : poptPrintUsage(pc, stderr, 0);
2860 0 : poptFreeContext(pc);
2861 0 : TALLOC_FREE(frame);
2862 0 : exit(1);
2863 : }
2864 :
2865 12 : poptFreeContext(pc);
2866 :
2867 12 : cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2868 :
2869 12 : if (log_stdout && cmdline_daemon_cfg->fork) {
2870 0 : DBG_ERR("Can't log to stdout unless in foreground\n");
2871 0 : TALLOC_FREE(frame);
2872 0 : exit(1);
2873 : }
2874 :
2875 12 : msg_ctx = global_messaging_context();
2876 12 : if (msg_ctx == NULL) {
2877 0 : DBG_ERR("messaging_init() failed\n");
2878 0 : TALLOC_FREE(frame);
2879 0 : exit(1);
2880 : }
2881 12 : ev_ctx = messaging_tevent_context(msg_ctx);
2882 :
2883 12 : if (cmdline_daemon_cfg->fork) {
2884 0 : become_daemon(
2885 : true,
2886 0 : cmdline_daemon_cfg->no_process_group,
2887 : log_stdout);
2888 :
2889 146 : status = reinit_after_fork(msg_ctx, ev_ctx, false);
2890 146 : if (!NT_STATUS_IS_OK(status)) {
2891 0 : exit_daemon("reinit_after_fork() failed",
2892 : map_errno_from_nt_status(status));
2893 : }
2894 : } else {
2895 12 : DBG_DEBUG("Calling daemon_status\n");
2896 12 : daemon_status(progname, "Starting process ... ");
2897 : }
2898 :
2899 158 : BlockSignals(true, SIGPIPE);
2900 :
2901 158 : dump_core_setup(progname, lp_logfile(frame, lp_sub));
2902 :
2903 158 : reopen_logs();
2904 :
2905 158 : DBG_STARTUP_NOTICE("%s version %s started.\n%s\n",
2906 : progname,
2907 : samba_version_string(),
2908 : samba_copyright_string());
2909 :
2910 158 : (void)winbind_off();
2911 158 : ok = init_guest_session_info(frame);
2912 158 : (void)winbind_on();
2913 158 : if (!ok) {
2914 0 : DBG_ERR("init_guest_session_info failed\n");
2915 0 : global_messaging_context_free();
2916 0 : TALLOC_FREE(frame);
2917 0 : exit(1);
2918 : }
2919 :
2920 158 : ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2921 158 : if (ret != 0) {
2922 67 : DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2923 : strerror(ret));
2924 67 : global_messaging_context_free();
2925 67 : TALLOC_FREE(frame);
2926 67 : exit(1);
2927 : }
2928 :
2929 91 : req = rpc_host_send(
2930 : ev_ctx,
2931 : ev_ctx,
2932 : msg_ctx,
2933 : servers,
2934 : ready_signal_fd,
2935 91 : cmdline_daemon_cfg->fork ? NULL : progname,
2936 : np_helper != 0);
2937 91 : if (req == NULL) {
2938 0 : DBG_ERR("rpc_host_send failed\n");
2939 0 : global_messaging_context_free();
2940 0 : TALLOC_FREE(frame);
2941 0 : exit(1);
2942 : }
2943 :
2944 91 : if (!cmdline_daemon_cfg->fork) {
2945 : struct stat st;
2946 12 : if (fstat(0, &st) != 0) {
2947 0 : DBG_DEBUG("fstat(0) failed: %s\n",
2948 : strerror(errno));
2949 0 : global_messaging_context_free();
2950 0 : TALLOC_FREE(frame);
2951 0 : exit(1);
2952 : }
2953 12 : if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2954 12 : tevent_add_fd(
2955 : ev_ctx,
2956 : ev_ctx,
2957 : 0,
2958 : TEVENT_FD_READ,
2959 : samba_dcerpcd_stdin_handler,
2960 : req);
2961 : }
2962 : }
2963 :
2964 91 : ok = tevent_req_poll_unix(req, ev_ctx, &err);
2965 16 : if (!ok) {
2966 0 : DBG_ERR("tevent_req_poll_unix failed: %s\n",
2967 : strerror(err));
2968 0 : global_messaging_context_free();
2969 0 : TALLOC_FREE(frame);
2970 0 : exit(1);
2971 : }
2972 :
2973 16 : status = rpc_host_recv(req);
2974 16 : if (!NT_STATUS_IS_OK(status)) {
2975 0 : DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2976 0 : global_messaging_context_free();
2977 0 : TALLOC_FREE(frame);
2978 0 : exit(1);
2979 : }
2980 :
2981 16 : TALLOC_FREE(frame);
2982 :
2983 16 : return 0;
2984 : }
|