22 #include <linux/slab.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
52 node = o2nm_single_cluster->
cl_nodes[node_num];
107 if (ret_parent !=
NULL)
108 *ret_parent = parent;
122 node = o2nm_node_ip_tree_lookup(cluster, addr,
NULL,
NULL);
148 if (o2nm_single_cluster && o2nm_single_cluster->
cl_has_local)
170 static void o2nm_node_release(
struct config_item *item)
172 struct o2nm_node *node = to_o2nm_node(item);
185 return to_o2nm_cluster(node->
nd_item.ci_parent->ci_parent);
198 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
200 char *p = (
char *)page;
203 if (!p || (*p && (*p !=
'\n')))
237 const char *page,
size_t count)
240 char *p = (
char *)page;
243 if (!p || (*p && (*p !=
'\n')))
256 static ssize_t o2nm_node_ipv4_address_read(
struct o2nm_node *node,
char *page)
265 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
268 unsigned int octets[4];
271 ret =
sscanf(page,
"%3u.%3u.%3u.%3u", &octets[3], &octets[2],
272 &octets[1], &octets[0]);
279 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
284 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
304 static ssize_t o2nm_node_local_write(
struct o2nm_node *node,
const char *page,
307 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
309 char *p = (
char *)page;
313 if (!p || (*p && (*p !=
'\n')))
363 .show = o2nm_node_num_read,
364 .store = o2nm_node_num_write,
369 .ca_name =
"ipv4_port",
371 .show = o2nm_node_ipv4_port_read,
372 .store = o2nm_node_ipv4_port_write,
377 .ca_name =
"ipv4_address",
379 .show = o2nm_node_ipv4_address_read,
380 .store = o2nm_node_ipv4_address_write,
387 .show = o2nm_node_local_read,
388 .store = o2nm_node_local_write,
402 for (i = 0; i <
ARRAY_SIZE(o2nm_node_attrs); i++) {
403 if (attr == o2nm_node_attrs[i])
414 struct o2nm_node *node = to_o2nm_node(item);
419 if (o2nm_node_attr->
show)
420 ret = o2nm_node_attr->
show(node, page);
426 const char *page,
size_t count)
428 struct o2nm_node *node = to_o2nm_node(item);
432 int attr_index = o2nm_attr_index(attr);
442 ret = o2nm_node_attr->
store(node, page, count);
452 .release = o2nm_node_release,
453 .show_attribute = o2nm_node_show,
454 .store_attribute = o2nm_node_store,
458 .ct_item_ops = &o2nm_node_item_ops,
459 .ct_attrs = o2nm_node_attrs,
485 static ssize_t o2nm_cluster_attr_write(
const char *page,
ssize_t count,
489 char *p = (
char *)page;
492 if (!p || (*p && (*p !=
'\n')))
505 static ssize_t o2nm_cluster_attr_idle_timeout_ms_read(
511 static ssize_t o2nm_cluster_attr_idle_timeout_ms_write(
512 struct o2nm_cluster *cluster,
const char *page,
size_t count)
517 ret = o2nm_cluster_attr_write(page, count, &val);
523 "o2net: cannot change idle timeout after "
524 "the first peer has agreed to it."
525 " %d connected peers\n",
530 "than keepalive delay\n");
540 static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read(
546 static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write(
547 struct o2nm_cluster *cluster,
const char *page,
size_t count)
552 ret = o2nm_cluster_attr_write(page, count, &val);
558 "o2net: cannot change keepalive delay after"
559 " the first peer has agreed to it."
560 " %d connected peers\n",
565 "smaller than idle timeout\n");
575 static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read(
581 static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
582 struct o2nm_cluster *cluster,
const char *page,
size_t count)
584 return o2nm_cluster_attr_write(page, count,
588 static ssize_t o2nm_cluster_attr_fence_method_read(
599 static ssize_t o2nm_cluster_attr_fence_method_write(
600 struct o2nm_cluster *cluster,
const char *page,
size_t count)
604 if (page[count - 1] !=
'\n')
626 .ca_name =
"idle_timeout_ms",
628 .show = o2nm_cluster_attr_idle_timeout_ms_read,
629 .store = o2nm_cluster_attr_idle_timeout_ms_write,
634 .ca_name =
"keepalive_delay_ms",
636 .show = o2nm_cluster_attr_keepalive_delay_ms_read,
637 .store = o2nm_cluster_attr_keepalive_delay_ms_write,
642 .ca_name =
"reconnect_delay_ms",
644 .show = o2nm_cluster_attr_reconnect_delay_ms_read,
645 .store = o2nm_cluster_attr_reconnect_delay_ms_write,
650 .ca_name =
"fence_method",
652 .show = o2nm_cluster_attr_fence_method_read,
653 .store = o2nm_cluster_attr_fence_method_write,
657 &o2nm_cluster_attr_idle_timeout_ms.
attr,
658 &o2nm_cluster_attr_keepalive_delay_ms.
attr,
659 &o2nm_cluster_attr_reconnect_delay_ms.
attr,
660 &o2nm_cluster_attr_fence_method.
attr,
672 if (o2nm_cluster_attr->
show)
673 ret = o2nm_cluster_attr->
show(cluster, page);
679 const char *page,
size_t count)
691 ret = o2nm_cluster_attr->
store(cluster, page, count);
719 static void o2nm_node_group_drop_item(
struct config_group *group,
722 struct o2nm_node *node = to_o2nm_node(item);
750 config_item_name(&node->
nd_item));
756 .make_item = o2nm_node_group_make_item,
757 .drop_item = o2nm_node_group_drop_item,
761 .ct_group_ops = &o2nm_node_group_group_ops,
767 static void o2nm_cluster_release(
struct config_item *item)
776 .release = o2nm_cluster_release,
777 .show_attribute = o2nm_cluster_show,
778 .store_attribute = o2nm_cluster_store,
782 .ct_item_ops = &o2nm_cluster_item_ops,
783 .ct_attrs = o2nm_cluster_attrs,
813 if (o2nm_single_cluster)
826 &o2nm_node_group_type);
828 cluster->
cl_group.default_groups = defs;
830 cluster->
cl_group.default_groups[1] = o2hb_group;
860 BUG_ON(o2nm_single_cluster != cluster);
861 o2nm_single_cluster =
NULL;
863 for (i = 0; cluster->
cl_group.default_groups[
i]; i++) {
864 killme = &cluster->
cl_group.default_groups[
i]->cg_item;
873 .make_group = o2nm_cluster_group_make_group,
874 .drop_item = o2nm_cluster_group_drop_item,
878 .ct_group_ops = &o2nm_cluster_group_group_ops,
886 .ci_namebuf =
"cluster",
887 .ci_type = &o2nm_cluster_group_type,
933 static void __exit exit_o2nm(
void)
944 static int __init init_o2nm(
void)