pacemaker  1.1.24-3850484742
Scalable High-Availability cluster resource manager
container.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <crm_internal.h>
20 
21 #include <ctype.h>
22 
23 #include <crm/pengine/rules.h>
24 #include <crm/pengine/status.h>
25 #include <crm/pengine/internal.h>
26 #include <unpack.h>
27 #include <crm/msg_xml.h>
28 
29 #define VARIANT_CONTAINER 1
30 #include "./variant.h"
31 
32 void tuple_free(container_grouping_t *tuple);
33 
34 static char *
35 next_ip(const char *last_ip)
36 {
37  unsigned int oct1 = 0;
38  unsigned int oct2 = 0;
39  unsigned int oct3 = 0;
40  unsigned int oct4 = 0;
41  int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
42 
43  if (rc != 4) {
44  /*@ TODO check for IPv6 */
45  return NULL;
46 
47  } else if (oct3 > 253) {
48  return NULL;
49 
50  } else if (oct4 > 253) {
51  ++oct3;
52  oct4 = 1;
53 
54  } else {
55  ++oct4;
56  }
57 
58  return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
59 }
60 
61 static int
62 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max)
63 {
64  if(data->ip_range_start == NULL) {
65  return 0;
66 
67  } else if(data->ip_last) {
68  tuple->ipaddr = next_ip(data->ip_last);
69 
70  } else {
71  tuple->ipaddr = strdup(data->ip_range_start);
72  }
73 
74  data->ip_last = tuple->ipaddr;
75 #if 0
76  return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
77  data->prefix, tuple->offset, tuple->ipaddr,
78  data->prefix, tuple->offset, data->prefix, tuple->offset);
79 #else
80  if (data->type == PE_CONTAINER_TYPE_DOCKER) {
81  return snprintf(buffer, max, " --add-host=%s-%d:%s",
82  data->prefix, tuple->offset, tuple->ipaddr);
83  } else if (data->type == PE_CONTAINER_TYPE_RKT) {
84  return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
85  tuple->ipaddr, data->prefix, tuple->offset);
86  } else {
87  return 0;
88  }
89 #endif
90 }
91 
92 static xmlNode *
93 create_resource(const char *name, const char *provider, const char *kind)
94 {
95  xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
96 
97  crm_xml_add(rsc, XML_ATTR_ID, name);
99  crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
100  crm_xml_add(rsc, XML_ATTR_TYPE, kind);
101 
102  return rsc;
103 }
104 
117 static bool
118 valid_network(container_variant_data_t *data)
119 {
120  if(data->ip_range_start) {
121  return TRUE;
122  }
123  if(data->control_port) {
124  if(data->replicas_per_host > 1) {
125  pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
126  data->replicas_per_host = 1;
127  /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
128  }
129  return TRUE;
130  }
131  return FALSE;
132 }
133 
134 static bool
135 create_ip_resource(
136  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
137  pe_working_set_t * data_set)
138 {
139  if(data->ip_range_start) {
140  char *id = NULL;
141  xmlNode *xml_ip = NULL;
142  xmlNode *xml_obj = NULL;
143 
144  id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
146  xml_ip = create_resource(id, "heartbeat", "IPaddr2");
147  free(id);
148 
149  xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
150  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
151 
152  crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
153  if(data->host_network) {
154  crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
155  }
156 
157  if(data->host_netmask) {
158  crm_create_nvpair_xml(xml_obj, NULL,
159  "cidr_netmask", data->host_netmask);
160 
161  } else {
162  crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
163  }
164 
165  xml_obj = create_xml_node(xml_ip, "operations");
166  crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
167 
168  // TODO: Other ops? Timeouts and intervals from underlying resource?
169 
170  if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
171  return FALSE;
172  }
173 
174  parent->children = g_list_append(parent->children, tuple->ip);
175  }
176  return TRUE;
177 }
178 
179 static bool
180 create_docker_resource(
181  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
182  pe_working_set_t * data_set)
183 {
184  int offset = 0, max = 4096;
185  char *buffer = calloc(1, max+1);
186 
187  int doffset = 0, dmax = 1024;
188  char *dbuffer = calloc(1, dmax+1);
189 
190  char *id = NULL;
191  xmlNode *xml_docker = NULL;
192  xmlNode *xml_obj = NULL;
193 
194  id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
196  xml_docker = create_resource(id, "heartbeat", "docker");
197  free(id);
198 
199  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
200  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
201 
202  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
203  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
204  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
205  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
206 
207  offset += snprintf(buffer+offset, max-offset, " --restart=no");
208 
209  /* Set a container hostname only if we have an IP to map it to.
210  * The user can set -h or --uts=host themselves if they want a nicer
211  * name for logs, but this makes applications happy who need their
212  * hostname to match the IP they bind to.
213  */
214  if (data->ip_range_start != NULL) {
215  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
216  data->prefix, tuple->offset);
217  }
218 
219  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
220 
221  if(data->docker_network) {
222 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
223  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
224  }
225 
226  if(data->control_port) {
227  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
228  } else {
229  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
230  }
231 
232  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
233  container_mount_t *mount = pIter->data;
234 
235  if(mount->flags) {
236  char *source = crm_strdup_printf(
237  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
238 
239  if(doffset > 0) {
240  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
241  }
242  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
243  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
244  free(source);
245 
246  } else {
247  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
248  }
249  if(mount->options) {
250  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
251  }
252  }
253 
254  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
255  container_port_t *port = pIter->data;
256 
257  if(tuple->ipaddr) {
258  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
259  tuple->ipaddr, port->source, port->target);
260  } else if(safe_str_neq(data->docker_network, "host")) {
261  // No need to do port mapping if net=host
262  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
263  }
264  }
265 
266  if(data->docker_run_options) {
267  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
268  }
269 
270  if(data->docker_host_options) {
271  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
272  }
273 
274  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
275  free(buffer);
276 
277  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
278  free(dbuffer);
279 
280  if(tuple->child) {
281  if(data->docker_run_command) {
282  crm_create_nvpair_xml(xml_obj, NULL,
283  "run_cmd", data->docker_run_command);
284  } else {
285  crm_create_nvpair_xml(xml_obj, NULL,
286  "run_cmd", SBIN_DIR "/pacemaker_remoted");
287  }
288 
289  /* TODO: Allow users to specify their own?
290  *
291  * We just want to know if the container is alive, we'll
292  * monitor the child independently
293  */
294  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
295  /* } else if(child && data->untrusted) {
296  * Support this use-case?
297  *
298  * The ability to have resources started/stopped by us, but
299  * unable to set attributes, etc.
300  *
301  * Arguably better to control API access this with ACLs like
302  * "normal" remote nodes
303  *
304  * crm_create_nvpair_xml(xml_obj, NULL,
305  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
306  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
307  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
308  */
309  } else {
310  if(data->docker_run_command) {
311  crm_create_nvpair_xml(xml_obj, NULL,
312  "run_cmd", data->docker_run_command);
313  }
314 
315  /* TODO: Allow users to specify their own?
316  *
317  * We don't know what's in the container, so we just want
318  * to know if it is alive
319  */
320  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
321  }
322 
323 
324  xml_obj = create_xml_node(xml_docker, "operations");
325  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
326 
327  // TODO: Other ops? Timeouts and intervals from underlying resource?
328  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
329  return FALSE;
330  }
331  parent->children = g_list_append(parent->children, tuple->docker);
332  return TRUE;
333 }
334 
335 static bool
336 create_rkt_resource(
337  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
338  pe_working_set_t * data_set)
339 {
340  int offset = 0, max = 4096;
341  char *buffer = calloc(1, max+1);
342 
343  int doffset = 0, dmax = 1024;
344  char *dbuffer = calloc(1, dmax+1);
345 
346  char *id = NULL;
347  xmlNode *xml_docker = NULL;
348  xmlNode *xml_obj = NULL;
349 
350  int volid = 0;
351 
352  id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
354  xml_docker = create_resource(id, "heartbeat", "rkt");
355  free(id);
356 
357  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
358  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
359 
360  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
361  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
362  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
363  crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
364 
365  /* Set a container hostname only if we have an IP to map it to.
366  * The user can set -h or --uts=host themselves if they want a nicer
367  * name for logs, but this makes applications happy who need their
368  * hostname to match the IP they bind to.
369  */
370  if (data->ip_range_start != NULL) {
371  offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
372  data->prefix, tuple->offset);
373  }
374 
375  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
376 
377  if(data->docker_network) {
378 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
379  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
380  }
381 
382  if(data->control_port) {
383  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
384  } else {
385  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
386  }
387 
388  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
389  container_mount_t *mount = pIter->data;
390 
391  if(mount->flags) {
392  char *source = crm_strdup_printf(
393  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
394 
395  if(doffset > 0) {
396  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
397  }
398  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
399  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
400  if(mount->options) {
401  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
402  }
403  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
404  free(source);
405 
406  } else {
407  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
408  if(mount->options) {
409  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
410  }
411  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
412  }
413  volid++;
414  }
415 
416  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
417  container_port_t *port = pIter->data;
418 
419  if(tuple->ipaddr) {
420  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
421  port->target, tuple->ipaddr, port->source);
422  } else {
423  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
424  }
425  }
426 
427  if(data->docker_run_options) {
428  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
429  }
430 
431  if(data->docker_host_options) {
432  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
433  }
434 
435  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
436  free(buffer);
437 
438  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
439  free(dbuffer);
440 
441  if(tuple->child) {
442  if(data->docker_run_command) {
443  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
444  } else {
445  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR"/pacemaker_remoted");
446  }
447 
448  /* TODO: Allow users to specify their own?
449  *
450  * We just want to know if the container is alive, we'll
451  * monitor the child independently
452  */
453  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
454  /* } else if(child && data->untrusted) {
455  * Support this use-case?
456  *
457  * The ability to have resources started/stopped by us, but
458  * unable to set attributes, etc.
459  *
460  * Arguably better to control API access this with ACLs like
461  * "normal" remote nodes
462  *
463  * crm_create_nvpair_xml(xml_obj, NULL,
464  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
465  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
466  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
467  */
468  } else {
469  if(data->docker_run_command) {
470  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
471  data->docker_run_command);
472  }
473 
474  /* TODO: Allow users to specify their own?
475  *
476  * We don't know what's in the container, so we just want
477  * to know if it is alive
478  */
479  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
480  }
481 
482 
483  xml_obj = create_xml_node(xml_docker, "operations");
484  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
485 
486  // TODO: Other ops? Timeouts and intervals from underlying resource?
487 
488  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
489  return FALSE;
490  }
491  parent->children = g_list_append(parent->children, tuple->docker);
492  return TRUE;
493 }
494 
501 static void
502 disallow_node(resource_t *rsc, const char *uname)
503 {
504  gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
505 
506  if (match) {
507  ((pe_node_t *) match)->weight = -INFINITY;
508  ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
509  }
510  if (rsc->children) {
511  GListPtr child;
512 
513  for (child = rsc->children; child != NULL; child = child->next) {
514  disallow_node((resource_t *) (child->data), uname);
515  }
516  }
517 }
518 
519 static bool
520 create_remote_resource(
521  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
522  pe_working_set_t * data_set)
523 {
524  if (tuple->child && valid_network(data)) {
525  GHashTableIter gIter;
526  GListPtr rsc_iter = NULL;
527  node_t *node = NULL;
528  xmlNode *xml_remote = NULL;
529  char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
530  char *port_s = NULL;
531  const char *uname = NULL;
532  const char *connect_name = NULL;
533 
534  if (remote_id_conflict(id, data_set)) {
535  free(id);
536  // The biggest hammer we have
537  id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
538  CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
539  }
540 
541  /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
542  * connection does not have its own IP is a magic string that we use to
543  * support nested remotes (i.e. a bundle running on a remote node).
544  */
545  connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
546 
547  if (data->control_port == NULL) {
548  port_s = crm_itoa(DEFAULT_REMOTE_PORT);
549  }
550 
551  /* This sets tuple->docker as tuple->remote's container, which is
552  * similar to what happens with guest nodes. This is how the PE knows
553  * that the bundle node is fenced by recovering docker, and that
554  * remote should be ordered relative to docker.
555  */
556  xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
557  NULL, NULL, "60s", NULL,
558  NULL, connect_name,
559  (data->control_port?
560  data->control_port : port_s));
561  free(port_s);
562 
563  /* Abandon our created ID, and pull the copy from the XML, because we
564  * need something that will get freed during data set cleanup to use as
565  * the node ID and uname.
566  */
567  free(id);
568  id = NULL;
569  uname = ID(xml_remote);
570 
571  /* Ensure a node has been created for the guest (it may have already
572  * been, if it has a permanent node attribute), and ensure its weight is
573  * -INFINITY so no other resources can run on it.
574  */
575  node = pe_find_node(data_set->nodes, uname);
576  if (node == NULL) {
577  node = pe_create_node(uname, uname, "remote", "-INFINITY",
578  data_set);
579  } else {
580  node->weight = -INFINITY;
581  }
583 
584  /* unpack_remote_nodes() ensures that each remote node and guest node
585  * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
586  * Unfortunately, a bundle has to be mostly unpacked before it's obvious
587  * what nodes will be needed, so we do it just above.
588  *
589  * Worse, that means that the node may have been utilized while
590  * unpacking other resources, without our weight correction. The most
591  * likely place for this to happen is when common_unpack() calls
592  * resource_location() to set a default score in symmetric clusters.
593  * This adds a node *copy* to each resource's allowed nodes, and these
594  * copies will have the wrong weight.
595  *
596  * As a hacky workaround, fix those copies here.
597  *
598  * @TODO Possible alternative: ensure bundles are unpacked before other
599  * resources, so the weight is correct before any copies are made.
600  */
601  for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
602  disallow_node((resource_t *) (rsc_iter->data), uname);
603  }
604 
605  tuple->node = node_copy(node);
606  tuple->node->weight = 500;
607  tuple->node->rsc_discover_mode = pe_discover_exclusive;
608 
609  /* Ensure the node shows up as allowed and with the correct discovery set */
610  if (tuple->child->allowed_nodes != NULL) {
611  g_hash_table_destroy(tuple->child->allowed_nodes);
612  }
613  tuple->child->allowed_nodes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
614  g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
615 
616  {
617  node_t *copy = node_copy(tuple->node);
618  copy->weight = -INFINITY;
619  g_hash_table_insert(tuple->child->parent->allowed_nodes, (gpointer) tuple->node->details->id, copy);
620  }
621  if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
622  return FALSE;
623  }
624 
625  g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
626  while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
627  if(is_remote_node(node)) {
628  /* Remote resources can only run on 'normal' cluster node */
629  node->weight = -INFINITY;
630  }
631  }
632 
633  tuple->node->details->remote_rsc = tuple->remote;
634  tuple->remote->container = tuple->docker; // Ensures is_container_remote_node() functions correctly immediately
635 
636  /* A bundle's #kind is closer to "container" (guest node) than the
637  * "remote" set by pe_create_node().
638  */
639  g_hash_table_insert(tuple->node->details->attrs,
640  strdup(CRM_ATTR_KIND), strdup("container"));
641 
642  /* One effect of this is that setup_container() will add
643  * tuple->remote to tuple->docker's fillers, which will make
644  * rsc_contains_remote_node() true for tuple->docker.
645  *
646  * tuple->child does NOT get added to tuple->docker's fillers.
647  * The only noticeable effect if it did would be for its fail count to
648  * be taken into account when checking tuple->docker's migration
649  * threshold.
650  */
651  parent->children = g_list_append(parent->children, tuple->remote);
652  }
653  return TRUE;
654 }
655 
656 static bool
657 create_container(
658  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
659  pe_working_set_t * data_set)
660 {
661 
662  if (data->type == PE_CONTAINER_TYPE_DOCKER &&
663  create_docker_resource(parent, data, tuple, data_set) == FALSE) {
664  return FALSE;
665  }
666  if (data->type == PE_CONTAINER_TYPE_RKT &&
667  create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
668  return FALSE;
669  }
670 
671  if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
672  return FALSE;
673  }
674  if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
675  return FALSE;
676  }
677  if(tuple->child && tuple->ipaddr) {
678  add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
679  }
680 
681  if(tuple->remote) {
682  /*
683  * Allow the remote connection resource to be allocated to a
684  * different node than the one on which the docker container
685  * is active.
686  *
687  * Makes it possible to have remote nodes, running docker
688  * containers with pacemaker_remoted inside in order to start
689  * services inside those containers.
690  */
691  set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
692  }
693 
694  return TRUE;
695 }
696 
697 static void
698 mount_add(container_variant_data_t *container_data, const char *source,
699  const char *target, const char *options, int flags)
700 {
701  container_mount_t *mount = calloc(1, sizeof(container_mount_t));
702 
703  mount->source = strdup(source);
704  mount->target = strdup(target);
705  if (options) {
706  mount->options = strdup(options);
707  }
708  mount->flags = flags;
709  container_data->mounts = g_list_append(container_data->mounts, mount);
710 }
711 
712 static void mount_free(container_mount_t *mount)
713 {
714  free(mount->source);
715  free(mount->target);
716  free(mount->options);
717  free(mount);
718 }
719 
720 static void port_free(container_port_t *port)
721 {
722  free(port->source);
723  free(port->target);
724  free(port);
725 }
726 
727 static container_grouping_t *
728 tuple_for_remote(resource_t *remote)
729 {
730  resource_t *top = remote;
731  container_variant_data_t *container_data = NULL;
732 
733  if (top == NULL) {
734  return NULL;
735  }
736 
737  while (top->parent != NULL) {
738  top = top->parent;
739  }
740 
741  get_container_variant_data(container_data, top);
742  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
743  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
744  if(tuple->remote == remote) {
745  return tuple;
746  }
747  }
748  CRM_LOG_ASSERT(FALSE);
749  return NULL;
750 }
751 
752 bool
754 {
755  const char *name;
756  const char *value;
757  const char *attr_list[] = {
761  };
762  const char *value_list[] = {
763  "remote",
765  "pacemaker"
766  };
767 
768  if(rsc == NULL) {
769  return FALSE;
770  }
771 
772  name = "addr";
773  value = g_hash_table_lookup(rsc->parameters, name);
774  if (safe_str_eq(value, "#uname") == FALSE) {
775  return FALSE;
776  }
777 
778  for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) {
779  value = crm_element_value(rsc->xml, attr_list[lpc]);
780  if (safe_str_eq(value, value_list[lpc]) == FALSE) {
781  return FALSE;
782  }
783  }
784  return TRUE;
785 }
786 
787 const char *
788 container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
789 {
790  // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
791 
792  pe_node_t *node = NULL;
793  container_grouping_t *tuple = NULL;
794 
795  if(container_fix_remote_addr(rsc) == FALSE) {
796  return NULL;
797  }
798 
799  tuple = tuple_for_remote(rsc);
800  if(tuple == NULL) {
801  return NULL;
802  }
803 
804  node = tuple->docker->allocated_to;
805  if (node == NULL) {
806  /* If it won't be running anywhere after the
807  * transition, go with where it's running now.
808  */
809  node = pe__current_node(tuple->docker);
810  }
811 
812  if(node == NULL) {
813  crm_trace("Cannot determine address for bundle connection %s", rsc->id);
814  return NULL;
815  }
816 
817  crm_trace("Setting address for bundle connection %s to bundle host %s",
818  rsc->id, node->details->uname);
819  if(xml != NULL && field != NULL) {
820  crm_xml_add(xml, field, node->details->uname);
821  }
822 
823  return node->details->uname;
824 }
825 
826 gboolean
828 {
829  const char *value = NULL;
830  xmlNode *xml_obj = NULL;
831  xmlNode *xml_resource = NULL;
832  container_variant_data_t *container_data = NULL;
833 
834  CRM_ASSERT(rsc != NULL);
835  pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
836 
837  container_data = calloc(1, sizeof(container_variant_data_t));
838  rsc->variant_opaque = container_data;
839  container_data->prefix = strdup(rsc->id);
840 
841  xml_obj = first_named_child(rsc->xml, "docker");
842  if (xml_obj != NULL) {
843  container_data->type = PE_CONTAINER_TYPE_DOCKER;
844  } else {
845  xml_obj = first_named_child(rsc->xml, "rkt");
846  if (xml_obj != NULL) {
847  container_data->type = PE_CONTAINER_TYPE_RKT;
848  } else {
849  return FALSE;
850  }
851  }
852 
853  value = crm_element_value(xml_obj, "masters");
854  container_data->masters = crm_parse_int(value, "0");
855  if (container_data->masters < 0) {
856  pe_err("'masters' for %s must be nonnegative integer, using 0",
857  rsc->id);
858  container_data->masters = 0;
859  }
860 
861  value = crm_element_value(xml_obj, "replicas");
862  if ((value == NULL) && (container_data->masters > 0)) {
863  container_data->replicas = container_data->masters;
864  } else {
865  container_data->replicas = crm_parse_int(value, "1");
866  }
867  if (container_data->replicas < 1) {
868  pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
869  container_data->replicas = 1;
870  }
871 
872  /*
873  * Communication between containers on the same host via the
874  * floating IPs only works if docker is started with:
875  * --userland-proxy=false --ip-masq=false
876  */
877  value = crm_element_value(xml_obj, "replicas-per-host");
878  container_data->replicas_per_host = crm_parse_int(value, "1");
879  if (container_data->replicas_per_host < 1) {
880  pe_err("'replicas-per-host' for %s must be positive integer, using 1",
881  rsc->id);
882  container_data->replicas_per_host = 1;
883  }
884  if (container_data->replicas_per_host == 1) {
886  }
887 
888  container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
889  container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
890  container_data->image = crm_element_value_copy(xml_obj, "image");
891  container_data->docker_network = crm_element_value_copy(xml_obj, "network");
892 
893  xml_obj = first_named_child(rsc->xml, "network");
894  if(xml_obj) {
895 
896  container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
897  container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
898  container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
899  container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
900 
901  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
902  xml_child = __xml_next_element(xml_child)) {
903 
904  container_port_t *port = calloc(1, sizeof(container_port_t));
905  port->source = crm_element_value_copy(xml_child, "port");
906 
907  if(port->source == NULL) {
908  port->source = crm_element_value_copy(xml_child, "range");
909  } else {
910  port->target = crm_element_value_copy(xml_child, "internal-port");
911  }
912 
913  if(port->source != NULL && strlen(port->source) > 0) {
914  if(port->target == NULL) {
915  port->target = strdup(port->source);
916  }
917  container_data->ports = g_list_append(container_data->ports, port);
918 
919  } else {
920  pe_err("Invalid port directive %s", ID(xml_child));
921  port_free(port);
922  }
923  }
924  }
925 
926  xml_obj = first_named_child(rsc->xml, "storage");
927  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
928  xml_child = __xml_next_element(xml_child)) {
929 
930  const char *source = crm_element_value(xml_child, "source-dir");
931  const char *target = crm_element_value(xml_child, "target-dir");
932  const char *options = crm_element_value(xml_child, "options");
933  int flags = 0;
934 
935  if (source == NULL) {
936  source = crm_element_value(xml_child, "source-dir-root");
937  flags = 1;
938  }
939 
940  if (source && target) {
941  mount_add(container_data, source, target, options, flags);
942  } else {
943  pe_err("Invalid mount directive %s", ID(xml_child));
944  }
945  }
946 
947  xml_obj = first_named_child(rsc->xml, "primitive");
948  if (xml_obj && valid_network(container_data)) {
949  char *value = NULL;
950  xmlNode *xml_set = NULL;
951 
952  if(container_data->masters > 0) {
953  xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER);
954 
955  } else {
956  xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
957  }
958 
959  crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name);
960 
961  xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
962  crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
963 
964  crm_create_nvpair_xml(xml_set, NULL,
966 
967  value = crm_itoa(container_data->replicas);
968  crm_create_nvpair_xml(xml_set, NULL,
970  free(value);
971 
972  value = crm_itoa(container_data->replicas_per_host);
973  crm_create_nvpair_xml(xml_set, NULL,
975  free(value);
976 
978  (container_data->replicas_per_host > 1)?
980 
981  if(container_data->masters) {
982  value = crm_itoa(container_data->masters);
983  crm_create_nvpair_xml(xml_set, NULL,
984  XML_RSC_ATTR_MASTER_MAX, value);
985  free(value);
986  }
987 
988  //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
989  add_node_copy(xml_resource, xml_obj);
990 
991  } else if(xml_obj) {
992  pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
993  rsc->id, ID(xml_obj));
994  return FALSE;
995  }
996 
997  if(xml_resource) {
998  int lpc = 0;
999  GListPtr childIter = NULL;
1000  resource_t *new_rsc = NULL;
1001  container_port_t *port = NULL;
1002 
1003  int offset = 0, max = 1024;
1004  char *buffer = NULL;
1005 
1006  if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
1007  pe_err("Failed unpacking resource %s", ID(rsc->xml));
1008  if (new_rsc != NULL && new_rsc->fns != NULL) {
1009  new_rsc->fns->free(new_rsc);
1010  }
1011  return FALSE;
1012  }
1013 
1014  container_data->child = new_rsc;
1015 
1016  /* Currently, we always map the default authentication key location
1017  * into the same location inside the container.
1018  *
1019  * Ideally, we would respect the host's PCMK_authkey_location, but:
1020  * - it may be different on different nodes;
1021  * - the actual connection will do extra checking to make sure the key
1022  * file exists and is readable, that we can't do here on the DC
1023  * - tools such as crm_resource and crm_simulate may not have the same
1024  * environment variables as the cluster, causing operation digests to
1025  * differ
1026  *
1027  * Always using the default location inside the container is fine,
1028  * because we control the pacemaker_remote environment, and it avoids
1029  * having to pass another environment variable to the container.
1030  *
1031  * @TODO A better solution may be to have only pacemaker_remote use the
1032  * environment variable, and have the cluster nodes use a new
1033  * cluster option for key location. This would introduce the limitation
1034  * of the location being the same on all cluster nodes, but that's
1035  * reasonable.
1036  */
1037  mount_add(container_data, DEFAULT_REMOTE_KEY_LOCATION,
1038  DEFAULT_REMOTE_KEY_LOCATION, NULL, 0);
1039 
1040  mount_add(container_data, CRM_LOG_DIR "/bundles", "/var/log", NULL, 1);
1041 
1042  port = calloc(1, sizeof(container_port_t));
1043  if(container_data->control_port) {
1044  port->source = strdup(container_data->control_port);
1045  } else {
1046  /* If we wanted to respect PCMK_remote_port, we could use
1047  * crm_default_remote_port() here and elsewhere in this file instead
1048  * of DEFAULT_REMOTE_PORT.
1049  *
1050  * However, it gains nothing, since we control both the container
1051  * environment and the connection resource parameters, and the user
1052  * can use a different port if desired by setting control-port.
1053  */
1054  port->source = crm_itoa(DEFAULT_REMOTE_PORT);
1055  }
1056  port->target = strdup(port->source);
1057  container_data->ports = g_list_append(container_data->ports, port);
1058 
1059  buffer = calloc(1, max+1);
1060  for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
1061  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1062  tuple->child = childIter->data;
1063  tuple->child->exclusive_discover = TRUE;
1064  tuple->offset = lpc++;
1065 
1066  // Ensure the child's notify gets set based on the underlying primitive's value
1067  if(is_set(tuple->child->flags, pe_rsc_notify)) {
1068  set_bit(container_data->child->flags, pe_rsc_notify);
1069  }
1070 
1071  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1072  container_data->tuples = g_list_append(container_data->tuples, tuple);
1073  container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
1074  }
1075  container_data->docker_host_options = buffer;
1076  if(container_data->attribute_target) {
1077  g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1078  g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1079  }
1080 
1081  } else {
1082  // Just a naked container, no pacemaker-remote
1083  int offset = 0, max = 1024;
1084  char *buffer = calloc(1, max+1);
1085 
1086  for(int lpc = 0; lpc < container_data->replicas; lpc++) {
1087  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1088  tuple->offset = lpc;
1089  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1090  container_data->tuples = g_list_append(container_data->tuples, tuple);
1091  }
1092 
1093  container_data->docker_host_options = buffer;
1094  }
1095 
1096  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1097  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1098  if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
1099  pe_err("Failed unpacking resource %s", rsc->id);
1100  rsc->fns->free(rsc);
1101  return FALSE;
1102  }
1103  }
1104 
1105  if(container_data->child) {
1106  rsc->children = g_list_append(rsc->children, container_data->child);
1107  }
1108  return TRUE;
1109 }
1110 
1111 static int
1112 tuple_rsc_active(resource_t *rsc, gboolean all)
1113 {
1114  if (rsc) {
1115  gboolean child_active = rsc->fns->active(rsc, all);
1116 
1117  if (child_active && !all) {
1118  return TRUE;
1119  } else if (!child_active && all) {
1120  return FALSE;
1121  }
1122  }
1123  return -1;
1124 }
1125 
1126 gboolean
1127 container_active(resource_t * rsc, gboolean all)
1128 {
1129  container_variant_data_t *container_data = NULL;
1130  GListPtr iter = NULL;
1131 
1132  get_container_variant_data(container_data, rsc);
1133  for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
1134  container_grouping_t *tuple = (container_grouping_t *)(iter->data);
1135  int rsc_active;
1136 
1137  rsc_active = tuple_rsc_active(tuple->ip, all);
1138  if (rsc_active >= 0) {
1139  return (gboolean) rsc_active;
1140  }
1141 
1142  rsc_active = tuple_rsc_active(tuple->child, all);
1143  if (rsc_active >= 0) {
1144  return (gboolean) rsc_active;
1145  }
1146 
1147  rsc_active = tuple_rsc_active(tuple->docker, all);
1148  if (rsc_active >= 0) {
1149  return (gboolean) rsc_active;
1150  }
1151 
1152  rsc_active = tuple_rsc_active(tuple->remote, all);
1153  if (rsc_active >= 0) {
1154  return (gboolean) rsc_active;
1155  }
1156  }
1157 
1158  /* If "all" is TRUE, we've already checked that no resources were inactive,
1159  * so return TRUE; if "all" is FALSE, we didn't find any active resources,
1160  * so return FALSE.
1161  */
1162  return all;
1163 }
1164 
1174 resource_t *
1175 find_container_child(const resource_t *bundle, const node_t *node)
1176 {
1177  container_variant_data_t *container_data = NULL;
1178  CRM_ASSERT(bundle && node);
1179 
1180  get_container_variant_data(container_data, bundle);
1181  for (GListPtr gIter = container_data->tuples; gIter != NULL;
1182  gIter = gIter->next) {
1183  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1184 
1185  CRM_ASSERT(tuple && tuple->node);
1186  if (tuple->node->details == node->details) {
1187  return tuple->child;
1188  }
1189  }
1190  return NULL;
1191 }
1192 
1193 static void
1194 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
1195  void *print_data)
1196 {
1197  if (rsc != NULL) {
1198  if (options & pe_print_html) {
1199  status_print("<li>");
1200  }
1201  rsc->fns->print(rsc, pre_text, options, print_data);
1202  if (options & pe_print_html) {
1203  status_print("</li>\n");
1204  }
1205  }
1206 }
1207 
1208 static const char*
1209 container_type_as_string(enum container_type t)
1210 {
1211  if (t == PE_CONTAINER_TYPE_DOCKER) {
1212  return PE_CONTAINER_TYPE_DOCKER_S;
1213  } else if (t == PE_CONTAINER_TYPE_RKT) {
1214  return PE_CONTAINER_TYPE_RKT_S;
1215  } else {
1216  return PE_CONTAINER_TYPE_UNKNOWN_S;
1217  }
1218 }
1219 
1220 static void
1221 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
1222 {
1223  container_variant_data_t *container_data = NULL;
1224  char *child_text = NULL;
1225  CRM_CHECK(rsc != NULL, return);
1226 
1227  if (pre_text == NULL) {
1228  pre_text = "";
1229  }
1230  child_text = crm_concat(pre_text, " ", ' ');
1231 
1232  get_container_variant_data(container_data, rsc);
1233 
1234  status_print("%s<bundle ", pre_text);
1235  status_print("id=\"%s\" ", rsc->id);
1236 
1237  // Always lowercase the container technology type for use as XML value
1238  status_print("type=\"");
1239  for (const char *c = container_type_as_string(container_data->type);
1240  *c; ++c) {
1241  status_print("%c", tolower(*c));
1242  }
1243  status_print("\" ");
1244 
1245  status_print("image=\"%s\" ", container_data->image);
1246  status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
1247  status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
1248  status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
1249  status_print(">\n");
1250 
1251  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1252  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1253 
1254  CRM_ASSERT(tuple);
1255  status_print("%s <replica id=\"%d\">\n", pre_text, tuple->offset);
1256  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1257  print_rsc_in_list(tuple->child, child_text, options, print_data);
1258  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1259  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1260  status_print("%s </replica>\n", pre_text);
1261  }
1262  status_print("%s</bundle>\n", pre_text);
1263  free(child_text);
1264 }
1265 
1266 static void
1267 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
1268 {
1269  node_t *node = NULL;
1270  resource_t *rsc = tuple->child;
1271 
1272  int offset = 0;
1273  char buffer[LINE_MAX];
1274 
1275  if(rsc == NULL) {
1276  rsc = tuple->docker;
1277  }
1278 
1279  if(tuple->remote) {
1280  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
1281  } else {
1282  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
1283  }
1284  if(tuple->ipaddr) {
1285  offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
1286  }
1287 
1288  node = pe__current_node(tuple->docker);
1289  common_print(rsc, pre_text, buffer, node, options, print_data);
1290 }
1291 
1292 void
1293 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
1294 {
1295  container_variant_data_t *container_data = NULL;
1296  char *child_text = NULL;
1297  CRM_CHECK(rsc != NULL, return);
1298 
1299  if (options & pe_print_xml) {
1300  container_print_xml(rsc, pre_text, options, print_data);
1301  return;
1302  }
1303 
1304  get_container_variant_data(container_data, rsc);
1305 
1306  if (pre_text == NULL) {
1307  pre_text = " ";
1308  }
1309 
1310  status_print("%s%s container%s: %s [%s]%s%s\n",
1311  pre_text, container_type_as_string(container_data->type),
1312  container_data->replicas>1?" set":"", rsc->id, container_data->image,
1313  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
1314  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
1315  if (options & pe_print_html) {
1316  status_print("<br />\n<ul>\n");
1317  }
1318 
1319 
1320  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1321  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1322 
1323  CRM_ASSERT(tuple);
1324  if (options & pe_print_html) {
1325  status_print("<li>");
1326  }
1327 
1328  if (is_set(options, pe_print_implicit)) {
1329  child_text = crm_strdup_printf(" %s", pre_text);
1330  if(g_list_length(container_data->tuples) > 1) {
1331  status_print(" %sReplica[%d]\n", pre_text, tuple->offset);
1332  }
1333  if (options & pe_print_html) {
1334  status_print("<br />\n<ul>\n");
1335  }
1336  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1337  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1338  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1339  print_rsc_in_list(tuple->child, child_text, options, print_data);
1340  if (options & pe_print_html) {
1341  status_print("</ul>\n");
1342  }
1343  } else {
1344  child_text = crm_strdup_printf("%s ", pre_text);
1345  tuple_print(tuple, child_text, options, print_data);
1346  }
1347  free(child_text);
1348 
1349  if (options & pe_print_html) {
1350  status_print("</li>\n");
1351  }
1352  }
1353  if (options & pe_print_html) {
1354  status_print("</ul>\n");
1355  }
1356 }
1357 
1358 void
1359 tuple_free(container_grouping_t *tuple)
1360 {
1361  if(tuple == NULL) {
1362  return;
1363  }
1364 
1365  if(tuple->node) {
1366  free(tuple->node);
1367  tuple->node = NULL;
1368  }
1369 
1370  if(tuple->ip) {
1371  free_xml(tuple->ip->xml);
1372  tuple->ip->xml = NULL;
1373  tuple->ip->fns->free(tuple->ip);
1374  tuple->ip = NULL;
1375  }
1376  if(tuple->docker) {
1377  free_xml(tuple->docker->xml);
1378  tuple->docker->xml = NULL;
1379  tuple->docker->fns->free(tuple->docker);
1380  tuple->docker = NULL;
1381  }
1382  if(tuple->remote) {
1383  free_xml(tuple->remote->xml);
1384  tuple->remote->xml = NULL;
1385  tuple->remote->fns->free(tuple->remote);
1386  tuple->remote = NULL;
1387  }
1388  free(tuple->ipaddr);
1389  free(tuple);
1390 }
1391 
1392 void
1394 {
1395  container_variant_data_t *container_data = NULL;
1396  CRM_CHECK(rsc != NULL, return);
1397 
1398  get_container_variant_data(container_data, rsc);
1399  pe_rsc_trace(rsc, "Freeing %s", rsc->id);
1400 
1401  free(container_data->prefix);
1402  free(container_data->image);
1403  free(container_data->control_port);
1404  free(container_data->host_network);
1405  free(container_data->host_netmask);
1406  free(container_data->ip_range_start);
1407  free(container_data->docker_network);
1408  free(container_data->docker_run_options);
1409  free(container_data->docker_run_command);
1410  free(container_data->docker_host_options);
1411 
1412  g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
1413  g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
1414  g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
1415  g_list_free(rsc->children);
1416 
1417  if(container_data->child) {
1418  free_xml(container_data->child->xml);
1419  container_data->child->xml = NULL;
1420  container_data->child->fns->free(container_data->child);
1421  }
1422  common_free(rsc);
1423 }
1424 
1425 enum rsc_role_e
1426 container_resource_state(const resource_t * rsc, gboolean current)
1427 {
1428  enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
1429  return container_role;
1430 }
1431 
1439 int
1441 {
1442  if ((rsc == NULL) || (rsc->variant != pe_container)) {
1443  return 0;
1444  } else {
1445  container_variant_data_t *container_data = NULL;
1446 
1447  get_container_variant_data(container_data, rsc);
1448  return container_data->replicas;
1449  }
1450 }
1451 
1452 void
1454 {
1455  container_variant_data_t *bundle_data = NULL;
1456 
1457  get_container_variant_data(bundle_data, rsc);
1458  for (GList *item = bundle_data->tuples; item != NULL; item = item->next) {
1459  container_grouping_t *replica = item->data;
1460 
1461  if (replica->ip) {
1462  replica->ip->fns->count(replica->ip);
1463  }
1464  if (replica->child) {
1465  replica->child->fns->count(replica->child);
1466  }
1467  if (replica->docker) {
1468  replica->docker->fns->count(replica->docker);
1469  }
1470  if (replica->remote) {
1471  replica->remote->fns->count(replica->remote);
1472  }
1473  }
1474 }
bool remote_id_conflict(const char *remote_name, pe_working_set_t *data)
Definition: unpack.c:437
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:190
GListPtr nodes
Definition: status.h:125
const char * uname
Definition: status.h:173
xmlNode * xml
Definition: status.h:294
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:182
#define INFINITY
Definition: crm.h:73
int pe_bundle_replicas(const resource_t *rsc)
Get the number of configured replicas in a bundle.
Definition: container.c:1440
#define CRM_ATTR_KIND
Definition: crm.h:90
node_t * node_copy(const node_t *this_node)
Definition: utils.c:141
int weight
Definition: status.h:210
node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t *data_set)
Definition: unpack.c:375
#define XML_ATTR_TYPE
Definition: msg_xml.h:105
void(* free)(resource_t *)
Definition: complex.h:41
#define XML_BOOLEAN_FALSE
Definition: msg_xml.h:118
gboolean common_unpack(xmlNode *xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set)
Definition: complex.c:463
enum pe_obj_types variant
Definition: status.h:300
void common_free(resource_t *rsc)
Definition: complex.c:918
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:216
#define status_print(fmt, args...)
Definition: unpack.h:79
int crm_parse_int(const char *text, const char *default_text)
Parse an integer value from a string.
Definition: strings.c:157
GListPtr resources
Definition: status.h:126
node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:444
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:176
#define clear_bit(word, bit)
Definition: crm_internal.h:211
#define XML_RSC_ATTR_INCARNATION_MAX
Definition: msg_xml.h:213
GListPtr children
Definition: status.h:337
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:204
#define pe_rsc_allow_remote_remotes
Definition: status.h:235
void crm_xml_sanitize_id(char *id)
Sanitize a string so it is usable as an XML ID.
Definition: xml.c:2416
char * id
Definition: status.h:292
GHashTable * parameters
Definition: status.h:334
#define DEFAULT_REMOTE_PORT
Definition: lrmd.h:54
#define DEFAULT_REMOTE_KEY_LOCATION
Definition: lrmd.h:52
#define CRM_LOG_DIR
Definition: config.h:59
#define XML_TAG_ATTR_SETS
Definition: msg_xml.h:185
char uname[MAX_NAME]
Definition: internal.h:81
gboolean is_remote_node(node_t *node)
Definition: remote.c:52
struct node_shared_s * details
Definition: status.h:213
#define set_bit(word, bit)
Definition: crm_internal.h:210
#define PCMK_RESOURCE_CLASS_OCF
Definition: services.h:57
xmlNode * pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *interval, const char *monitor_timeout, const char *start_timeout, const char *server, const char *port)
Definition: remote.c:148
char * crm_element_value_copy(const xmlNode *data, const char *name)
Retrieve a copy of the value of an XML attribute.
Definition: nvpair.c:570
#define XML_ATTR_ID
Definition: msg_xml.h:102
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:393
#define XML_CIB_TAG_RESOURCE
Definition: msg_xml.h:196
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:117
#define pe_rsc_failed
Definition: status.h:237
resource_object_functions_t * fns
Definition: status.h:301
resource_t * find_container_child(const resource_t *bundle, const node_t *node)
Definition: container.c:1175
GHashTable * allowed_nodes
Definition: status.h:328
void * variant_opaque
Definition: status.h:299
#define crm_trace(fmt, args...)
Definition: logging.h:280
xmlNode * add_node_copy(xmlNode *new_parent, xmlNode *xml_node)
Definition: xml.c:1955
xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval, const char *timeout)
Create a CIB XML element for an operation.
Definition: operations.c:455
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:258
#define XML_RSC_ATTR_ORDERED
Definition: msg_xml.h:210
#define XML_TAG_META_SETS
Definition: msg_xml.h:186
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:1977
unsigned long long flags
Definition: status.h:316
#define XML_RSC_ATTR_INCARNATION_NODEMAX
Definition: msg_xml.h:215
resource_t * parent
Definition: status.h:298
void free_xml(xmlNode *child)
Definition: xml.c:2108
bool container_fix_remote_addr(resource_t *rsc)
Definition: container.c:753
#define XML_RSC_ATTR_UNIQUE
Definition: msg_xml.h:221
gboolean(* active)(resource_t *, gboolean)
Definition: complex.h:38
void common_print(resource_t *rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
Definition: native.c:554
#define XML_RSC_ATTR_MASTER_MAX
Definition: msg_xml.h:216
void(* print)(resource_t *, const char *, long, void *)
Definition: complex.h:37
#define pe_rsc_unique
Definition: status.h:225
gboolean container_unpack(resource_t *rsc, pe_working_set_t *data_set)
Definition: container.c:827
#define SBIN_DIR
Definition: config.h:697
GHashTable * meta
Definition: status.h:333
enum rsc_role_e container_resource_state(const resource_t *rsc, gboolean current)
Definition: container.c:1426
Cluster status and scheduling.
void tuple_free(container_grouping_t *tuple)
Definition: container.c:1359
#define XML_CIB_TAG_INCARNATION
Definition: msg_xml.h:198
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:442
void pe__count_bundle(pe_resource_t *rsc)
Definition: container.c:1453
void crm_xml_set_id(xmlNode *xml, const char *format,...) __attribute__((__format__(__printf__
gboolean container_active(resource_t *rsc, gboolean all)
Definition: container.c:1127
#define DIMOF(a)
Definition: crm.h:29
#define pe_rsc_managed
Definition: status.h:220
#define crm_str_hash
Definition: util.h:75
#define CRM_ASSERT(expr)
Definition: error.h:20
char data[0]
Definition: internal.h:86
rsc_role_e
Definition: common.h:81
#define XML_CIB_TAG_MASTER
Definition: msg_xml.h:199
int rsc_discover_mode
Definition: status.h:214
xmlNode * first_named_child(xmlNode *parent, const char *name)
Definition: xml.c:4241
Definition: status.h:209
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:16
xmlNode * crm_create_nvpair_xml(xmlNode *parent, const char *id, const char *name, const char *value)
Create an XML name/value pair.
Definition: nvpair.c:692
char * crm_concat(const char *prefix, const char *suffix, char join)
Definition: strings.c:33
#define ID(x)
Definition: msg_xml.h:452
#define pe_err(fmt...)
Definition: internal.h:18
char * crm_itoa(int an_int)
Definition: strings.c:61
#define safe_str_eq(a, b)
Definition: util.h:74
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
const char * container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
Definition: container.c:788
void container_print(resource_t *rsc, const char *pre_text, long options, void *print_data)
Definition: container.c:1293
GList * GListPtr
Definition: crm.h:210
#define pe_rsc_notify
Definition: status.h:224
void g_hash_destroy_str(gpointer data)
Definition: strings.c:75
const char * rsc_printable_id(resource_t *rsc)
Definition: utils.c:2291
uint64_t flags
Definition: remote.c:156
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:257
void container_free(resource_t *rsc)
Definition: container.c:1393