42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
104 static void put_work(
struct iwcm_work *work)
119 struct iwcm_work *
work;
121 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
125 dealloc_work_entries(cm_id_priv);
128 work->
cm_id = cm_id_priv;
129 INIT_LIST_HEAD(&work->
list);
147 event->private_data =
p;
153 dealloc_work_entries(cm_id_priv);
185 if (iwcm_deref_id(cm_id_priv) &&
188 free_cm_id(cm_id_priv);
200 cm_id_priv = kzalloc(
sizeof(*cm_id_priv),
GFP_KERNEL);
205 cm_id_priv->
id.device = device;
206 cm_id_priv->
id.cm_handler = cm_handler;
208 cm_id_priv->
id.event_handler = cm_event_handler;
209 cm_id_priv->
id.add_ref = add_ref;
210 cm_id_priv->
id.rem_ref = rem_ref;
218 return &cm_id_priv->
id;
223 static int iwcm_modify_qp_err(
struct ib_qp *
qp)
238 static int iwcm_modify_qp_sqd(
struct ib_qp *qp)
272 switch (cm_id_priv->
state) {
301 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
305 ret = iwcm_modify_qp_err(qp);
307 ret = iwcm_modify_qp_sqd(qp);
326 static void destroy_cm_id(
struct iw_cm_id *cm_id)
341 switch (cm_id_priv->
state) {
344 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
346 ret = cm_id->
device->iwcm->destroy_listen(cm_id);
351 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
353 (
void)iwcm_modify_qp_err(cm_id_priv->
qp);
368 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
378 if (cm_id_priv->
qp) {
379 cm_id_priv->
id.device->iwcm->rem_ref(cm_id_priv->
qp);
382 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
384 (
void)iwcm_deref_id(cm_id_priv);
400 destroy_cm_id(cm_id);
404 free_cm_id(cm_id_priv);
422 ret = alloc_work_entries(cm_id_priv, backlog);
427 switch (cm_id_priv->
state) {
430 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
431 ret = cm_id->
device->iwcm->create_listen(cm_id, backlog);
439 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
463 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
469 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
471 ret = cm_id->
device->iwcm->reject(cm_id, private_data,
501 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
509 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
514 cm_id->
device->iwcm->add_ref(qp);
516 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
518 ret = cm_id->
device->iwcm->accept(cm_id, iw_param);
524 if (cm_id_priv->
qp) {
525 cm_id->
device->iwcm->rem_ref(qp);
528 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
553 ret = alloc_work_entries(cm_id_priv, 4);
561 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
570 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
575 cm_id->
device->iwcm->add_ref(qp);
578 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
580 ret = cm_id->
device->iwcm->connect(cm_id, iw_param);
583 if (cm_id_priv->
qp) {
584 cm_id->
device->iwcm->rem_ref(qp);
587 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
613 static void cm_conn_req_handler(
struct iwcm_id_private *listen_id_priv,
628 listen_id_priv->
id.cm_handler,
629 listen_id_priv->
id.context);
647 spin_unlock_irqrestore(&listen_id_priv->
lock, flags);
652 spin_unlock_irqrestore(&listen_id_priv->
lock, flags);
654 ret = alloc_work_entries(cm_id_priv, 3);
666 destroy_cm_id(cm_id);
668 free_cm_id(cm_id_priv);
704 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
705 ret = cm_id_priv->
id.cm_handler(&cm_id_priv->
id, iw_event);
731 if (iw_event->
status == 0) {
737 cm_id_priv->
id.device->iwcm->rem_ref(cm_id_priv->
qp);
741 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
742 ret = cm_id_priv->
id.cm_handler(&cm_id_priv->
id, iw_event);
766 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
787 if (cm_id_priv->
qp) {
788 cm_id_priv->
id.device->iwcm->rem_ref(cm_id_priv->
qp);
791 switch (cm_id_priv->
state) {
795 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
796 ret = cm_id_priv->
id.cm_handler(&cm_id_priv->
id, iw_event);
804 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
814 switch (iw_event->
event) {
816 cm_conn_req_handler(cm_id_priv, iw_event);
819 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
822 ret = cm_conn_est_handler(cm_id_priv, iw_event);
825 cm_disconnect_handler(cm_id_priv, iw_event);
828 ret = cm_close_handler(cm_id_priv, iw_event);
846 static void cm_work_handler(
struct work_struct *_work)
848 struct iwcm_work *work =
container_of(_work,
struct iwcm_work, work);
857 empty = list_empty(&cm_id_priv->
work_list);
860 struct iwcm_work,
list);
861 list_del_init(&work->
list);
862 empty = list_empty(&cm_id_priv->
work_list);
863 levent = work->
event;
865 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
867 ret = process_event(cm_id_priv, &levent);
870 destroy_cm_id(&cm_id_priv->
id);
874 if (iwcm_deref_id(cm_id_priv)) {
877 free_cm_id(cm_id_priv);
883 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
901 static int cm_event_handler(
struct iw_cm_id *cm_id,
904 struct iwcm_work *
work;
912 work = get_work(cm_id_priv);
919 work->
cm_id = cm_id_priv;
920 work->
event = *iw_event;
924 work->
event.private_data_len) {
925 ret = copy_private_data(&work->
event);
933 if (list_empty(&cm_id_priv->
work_list)) {
939 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
951 switch (cm_id_priv->
state) {
965 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
977 switch (cm_id_priv->
state) {
989 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1004 ret = iwcm_init_qp_init_attr(cm_id_priv,
1005 qp_attr, qp_attr_mask);
1008 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1009 qp_attr, qp_attr_mask);
1019 static int __init iw_cm_init(
void)
1028 static void __exit iw_cm_cleanup(
void)