Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hpimsgx.c
Go to the documentation of this file.
1 /******************************************************************************
2 
3  AudioScience HPI driver
4  Copyright (C) 1997-2011 AudioScience Inc. <[email protected]>
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of version 2 of the GNU General Public License as
8  published by the Free Software Foundation;
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License
16  along with this program; if not, write to the Free Software
17  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 
19 Extended Message Function With Response Caching
20 
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpi_version.h"
26 #include "hpimsginit.h"
27 #include "hpicmn.h"
28 #include "hpimsgx.h"
29 #include "hpidebug.h"
30 
31 static struct pci_device_id asihpi_pci_tbl[] = {
32 #include "hpipcida.h"
33 };
34 
35 static struct hpios_spinlock msgx_lock;
36 
37 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38 
39 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
40  *pci_info)
41 {
42 
43  int i;
44 
45  for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
46  if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
47  && asihpi_pci_tbl[i].vendor !=
48  pci_info->pci_dev->vendor)
49  continue;
50  if (asihpi_pci_tbl[i].device != PCI_ANY_ID
51  && asihpi_pci_tbl[i].device !=
52  pci_info->pci_dev->device)
53  continue;
54  if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
55  && asihpi_pci_tbl[i].subvendor !=
56  pci_info->pci_dev->subsystem_vendor)
57  continue;
58  if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
59  && asihpi_pci_tbl[i].subdevice !=
60  pci_info->pci_dev->subsystem_device)
61  continue;
62 
63  /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
64  asihpi_pci_tbl[i].driver_data); */
65  return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
66  }
67 
68  return NULL;
69 }
70 
71 static inline void hw_entry_point(struct hpi_message *phm,
72  struct hpi_response *phr)
73 {
74  if ((phm->adapter_index < HPI_MAX_ADAPTERS)
75  && hpi_entry_points[phm->adapter_index])
76  hpi_entry_points[phm->adapter_index] (phm, phr);
77  else
78  hpi_init_response(phr, phm->object, phm->function,
80 }
81 
82 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
83 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
84 
85 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
86 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
87 
88 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
89  void *h_owner);
90 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
91  void *h_owner);
92 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
93  void *h_owner);
94 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
95  void *h_owner);
96 
97 static void HPIMSGX__reset(u16 adapter_index);
98 
99 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
100 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
101 
102 #ifndef DISABLE_PRAGMA_PACK1
103 #pragma pack(push, 1)
104 #endif
105 
109 };
110 
114 };
115 
118  struct hpi_mixer_res m;
119 };
120 
124 };
125 
126 struct adapter_info {
130 };
131 
134  void *h_owner;
135 };
136 
137 #ifndef DISABLE_PRAGMA_PACK1
138 #pragma pack(pop)
139 #endif
140 
141 /* Globals */
142 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
143 
144 static struct hpi_stream_response
145  rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
146 
147 static struct hpi_stream_response
148  rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
149 
150 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
151 
152 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
153 
154 /* use these to keep track of opens from user mode apps/DLLs */
155 static struct asi_open_state
156  outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
157 
158 static struct asi_open_state
159  instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
160 
161 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
162  void *h_owner)
163 {
166  "suspicious adapter index %d in subsys message 0x%x.\n",
167  phm->adapter_index, phm->function);
168 
169  switch (phm->function) {
173  phr->u.s.version = HPI_VER >> 8; /* return major.minor */
174  phr->u.s.data = HPI_VER; /* return major.minor.release */
175  break;
176  case HPI_SUBSYS_OPEN:
177  /*do not propagate the message down the chain */
179  break;
180  case HPI_SUBSYS_CLOSE:
181  /*do not propagate the message down the chain */
183  0);
184  HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
185  break;
187  /* Initialize this module's internal state */
188  hpios_msgxlock_init(&msgx_lock);
189  memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
190  /* Init subsys_findadapters response to no-adapters */
191  HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
194  /* individual HPIs dont implement driver load */
195  HPI_COMMON(phm, phr);
196  break;
198  HPI_COMMON(phm, phr);
199  HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
202  return;
203 
206  HPI_COMMON(phm, phr);
207  break;
208 
210  HPIMSGX__init(phm, phr);
211  break;
212 
213  default:
214  /* Must explicitly handle every subsys message in this switch */
217  break;
218  }
219 }
220 
221 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
222  void *h_owner)
223 {
224  switch (phm->function) {
225  case HPI_ADAPTER_OPEN:
226  adapter_open(phm, phr);
227  break;
228  case HPI_ADAPTER_CLOSE:
229  adapter_close(phm, phr);
230  break;
231  case HPI_ADAPTER_DELETE:
232  HPIMSGX__cleanup(phm->adapter_index, h_owner);
233  {
234  struct hpi_message hm;
235  struct hpi_response hr;
238  hm.adapter_index = phm->adapter_index;
239  hw_entry_point(&hm, &hr);
240  }
241  hw_entry_point(phm, phr);
242  break;
243 
244  default:
245  hw_entry_point(phm, phr);
246  break;
247  }
248 }
249 
250 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
251 {
252  switch (phm->function) {
253  case HPI_MIXER_OPEN:
254  mixer_open(phm, phr);
255  break;
256  case HPI_MIXER_CLOSE:
257  mixer_close(phm, phr);
258  break;
259  default:
260  hw_entry_point(phm, phr);
261  break;
262  }
263 }
264 
265 static void outstream_message(struct hpi_message *phm,
266  struct hpi_response *phr, void *h_owner)
267 {
268  if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
271  return;
272  }
273 
274  switch (phm->function) {
275  case HPI_OSTREAM_OPEN:
276  outstream_open(phm, phr, h_owner);
277  break;
278  case HPI_OSTREAM_CLOSE:
279  outstream_close(phm, phr, h_owner);
280  break;
281  default:
282  hw_entry_point(phm, phr);
283  break;
284  }
285 }
286 
287 static void instream_message(struct hpi_message *phm,
288  struct hpi_response *phr, void *h_owner)
289 {
290  if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
293  return;
294  }
295 
296  switch (phm->function) {
297  case HPI_ISTREAM_OPEN:
298  instream_open(phm, phr, h_owner);
299  break;
300  case HPI_ISTREAM_CLOSE:
301  instream_close(phm, phr, h_owner);
302  break;
303  default:
304  hw_entry_point(phm, phr);
305  break;
306  }
307 }
308 
309 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
310  * HPI_MessageEx so that functions in hpifunc.c compile.
311  */
312 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
313  void *h_owner)
314 {
315  HPI_DEBUG_MESSAGE(DEBUG, phm);
316 
317  if (phm->type != HPI_TYPE_REQUEST) {
318  hpi_init_response(phr, phm->object, phm->function,
320  return;
321  }
322 
323  if (phm->adapter_index >= HPI_MAX_ADAPTERS
324  && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
325  hpi_init_response(phr, phm->object, phm->function,
327  return;
328  }
329 
330  switch (phm->object) {
331  case HPI_OBJ_SUBSYSTEM:
332  subsys_message(phm, phr, h_owner);
333  break;
334 
335  case HPI_OBJ_ADAPTER:
336  adapter_message(phm, phr, h_owner);
337  break;
338 
339  case HPI_OBJ_MIXER:
340  mixer_message(phm, phr);
341  break;
342 
343  case HPI_OBJ_OSTREAM:
344  outstream_message(phm, phr, h_owner);
345  break;
346 
347  case HPI_OBJ_ISTREAM:
348  instream_message(phm, phr, h_owner);
349  break;
350 
351  default:
352  hw_entry_point(phm, phr);
353  break;
354  }
355  HPI_DEBUG_RESPONSE(phr);
356 
357 }
358 
359 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
360 {
361  HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
362  memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
363  sizeof(rESP_HPI_ADAPTER_OPEN[0]));
364 }
365 
366 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
367 {
368  HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
370 }
371 
372 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
373 {
374  memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
375  sizeof(rESP_HPI_MIXER_OPEN[0]));
376 }
377 
378 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
379 {
381 }
382 
383 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
384  void *h_owner)
385 {
386 
387  struct hpi_message hm;
388  struct hpi_response hr;
389 
391 
392  hpios_msgxlock_lock(&msgx_lock);
393 
394  if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
396  else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
397  [phm->obj_index].h.error)
398  memcpy(phr,
399  &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
400  obj_index],
401  sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
402  else {
403  instream_user_open[phm->adapter_index][phm->
404  obj_index].open_flag = 1;
405  hpios_msgxlock_unlock(&msgx_lock);
406 
407  /* issue a reset */
410  hm.adapter_index = phm->adapter_index;
411  hm.obj_index = phm->obj_index;
412  hw_entry_point(&hm, &hr);
413 
414  hpios_msgxlock_lock(&msgx_lock);
415  if (hr.error) {
416  instream_user_open[phm->adapter_index][phm->
417  obj_index].open_flag = 0;
418  phr->error = hr.error;
419  } else {
420  instream_user_open[phm->adapter_index][phm->
421  obj_index].open_flag = 1;
422  instream_user_open[phm->adapter_index][phm->
423  obj_index].h_owner = h_owner;
424  memcpy(phr,
425  &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
426  [phm->obj_index],
427  sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
428  }
429  }
430  hpios_msgxlock_unlock(&msgx_lock);
431 }
432 
433 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
434  void *h_owner)
435 {
436 
437  struct hpi_message hm;
438  struct hpi_response hr;
439 
441 
442  hpios_msgxlock_lock(&msgx_lock);
443  if (h_owner ==
444  instream_user_open[phm->adapter_index][phm->
445  obj_index].h_owner) {
446  /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
447  "instream %d owned by %p\n",
448  phm->wAdapterIndex, phm->wObjIndex, hOwner); */
449  instream_user_open[phm->adapter_index][phm->
450  obj_index].h_owner = NULL;
451  hpios_msgxlock_unlock(&msgx_lock);
452  /* issue a reset */
455  hm.adapter_index = phm->adapter_index;
456  hm.obj_index = phm->obj_index;
457  hw_entry_point(&hm, &hr);
458  hpios_msgxlock_lock(&msgx_lock);
459  if (hr.error) {
460  instream_user_open[phm->adapter_index][phm->
461  obj_index].h_owner = h_owner;
462  phr->error = hr.error;
463  } else {
464  instream_user_open[phm->adapter_index][phm->
465  obj_index].open_flag = 0;
466  instream_user_open[phm->adapter_index][phm->
467  obj_index].h_owner = NULL;
468  }
469  } else {
471  "%p trying to close %d instream %d owned by %p\n",
472  h_owner, phm->adapter_index, phm->obj_index,
473  instream_user_open[phm->adapter_index][phm->
474  obj_index].h_owner);
476  }
477  hpios_msgxlock_unlock(&msgx_lock);
478 }
479 
480 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
481  void *h_owner)
482 {
483 
484  struct hpi_message hm;
485  struct hpi_response hr;
486 
488 
489  hpios_msgxlock_lock(&msgx_lock);
490 
491  if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
493  else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
494  [phm->obj_index].h.error)
495  memcpy(phr,
496  &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
497  obj_index],
498  sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
499  else {
500  outstream_user_open[phm->adapter_index][phm->
501  obj_index].open_flag = 1;
502  hpios_msgxlock_unlock(&msgx_lock);
503 
504  /* issue a reset */
507  hm.adapter_index = phm->adapter_index;
508  hm.obj_index = phm->obj_index;
509  hw_entry_point(&hm, &hr);
510 
511  hpios_msgxlock_lock(&msgx_lock);
512  if (hr.error) {
513  outstream_user_open[phm->adapter_index][phm->
514  obj_index].open_flag = 0;
515  phr->error = hr.error;
516  } else {
517  outstream_user_open[phm->adapter_index][phm->
518  obj_index].open_flag = 1;
519  outstream_user_open[phm->adapter_index][phm->
520  obj_index].h_owner = h_owner;
521  memcpy(phr,
522  &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
523  [phm->obj_index],
524  sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
525  }
526  }
527  hpios_msgxlock_unlock(&msgx_lock);
528 }
529 
530 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
531  void *h_owner)
532 {
533 
534  struct hpi_message hm;
535  struct hpi_response hr;
536 
538 
539  hpios_msgxlock_lock(&msgx_lock);
540 
541  if (h_owner ==
542  outstream_user_open[phm->adapter_index][phm->
543  obj_index].h_owner) {
544  /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
545  "outstream %d owned by %p\n",
546  phm->wAdapterIndex, phm->wObjIndex, hOwner); */
547  outstream_user_open[phm->adapter_index][phm->
548  obj_index].h_owner = NULL;
549  hpios_msgxlock_unlock(&msgx_lock);
550  /* issue a reset */
553  hm.adapter_index = phm->adapter_index;
554  hm.obj_index = phm->obj_index;
555  hw_entry_point(&hm, &hr);
556  hpios_msgxlock_lock(&msgx_lock);
557  if (hr.error) {
558  outstream_user_open[phm->adapter_index][phm->
559  obj_index].h_owner = h_owner;
560  phr->error = hr.error;
561  } else {
562  outstream_user_open[phm->adapter_index][phm->
563  obj_index].open_flag = 0;
564  outstream_user_open[phm->adapter_index][phm->
565  obj_index].h_owner = NULL;
566  }
567  } else {
569  "%p trying to close %d outstream %d owned by %p\n",
570  h_owner, phm->adapter_index, phm->obj_index,
571  outstream_user_open[phm->adapter_index][phm->
572  obj_index].h_owner);
574  }
575  hpios_msgxlock_unlock(&msgx_lock);
576 }
577 
578 static u16 adapter_prepare(u16 adapter)
579 {
580  struct hpi_message hm;
581  struct hpi_response hr;
582 
583  /* Open the adapter and streams */
584  u16 i;
585 
586  /* call to HPI_ADAPTER_OPEN */
589  hm.adapter_index = adapter;
590  hw_entry_point(&hm, &hr);
591  memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
592  sizeof(rESP_HPI_ADAPTER_OPEN[0]));
593  if (hr.error)
594  return hr.error;
595 
596  /* call to HPI_ADAPTER_GET_INFO */
599  hm.adapter_index = adapter;
600  hw_entry_point(&hm, &hr);
601  if (hr.error)
602  return hr.error;
603 
604  aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
605  aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
606  aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
607 
608  /* call to HPI_OSTREAM_OPEN */
609  for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
612  hm.adapter_index = adapter;
613  hm.obj_index = i;
614  hw_entry_point(&hm, &hr);
615  memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
616  sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
617  outstream_user_open[adapter][i].open_flag = 0;
618  outstream_user_open[adapter][i].h_owner = NULL;
619  }
620 
621  /* call to HPI_ISTREAM_OPEN */
622  for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
625  hm.adapter_index = adapter;
626  hm.obj_index = i;
627  hw_entry_point(&hm, &hr);
628  memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
629  sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
630  instream_user_open[adapter][i].open_flag = 0;
631  instream_user_open[adapter][i].h_owner = NULL;
632  }
633 
634  /* call to HPI_MIXER_OPEN */
636  hm.adapter_index = adapter;
637  hw_entry_point(&hm, &hr);
638  memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
639  sizeof(rESP_HPI_MIXER_OPEN[0]));
640 
641  return 0;
642 }
643 
644 static void HPIMSGX__reset(u16 adapter_index)
645 {
646  int i;
647  u16 adapter;
648  struct hpi_response hr;
649 
650  if (adapter_index == HPIMSGX_ALLADAPTERS) {
651  for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
652 
655  memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
656  sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
657 
660  memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
661  sizeof(rESP_HPI_MIXER_OPEN[adapter]));
662 
663  for (i = 0; i < HPI_MAX_STREAMS; i++) {
667  memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
668  &hr,
669  sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
670  [i]));
674  memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
675  &hr,
676  sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
677  [i]));
678  }
679  }
680  } else if (adapter_index < HPI_MAX_ADAPTERS) {
681  rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
683  rESP_HPI_MIXER_OPEN[adapter_index].h.error =
685  for (i = 0; i < HPI_MAX_STREAMS; i++) {
686  rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
688  rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
690  }
691  }
692 }
693 
694 static u16 HPIMSGX__init(struct hpi_message *phm,
695  /* HPI_SUBSYS_CREATE_ADAPTER structure with */
696  /* resource list or NULL=find all */
697  struct hpi_response *phr
698  /* response from HPI_ADAPTER_GET_INFO */
699  )
700 {
701  hpi_handler_func *entry_point_func;
702  struct hpi_response hr;
703 
704  /* Init response here so we can pass in previous adapter list */
705  hpi_init_response(&hr, phm->object, phm->function,
707 
708  entry_point_func =
709  hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
710 
711  if (entry_point_func) {
712  HPI_DEBUG_MESSAGE(DEBUG, phm);
713  entry_point_func(phm, &hr);
714  } else {
716  return phr->error;
717  }
718  if (hr.error == 0) {
719  /* the adapter was created successfully
720  save the mapping for future use */
721  hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
722  /* prepare adapter (pre-open streams etc.) */
724  "HPI_SUBSYS_CREATE_ADAPTER successful,"
725  " preparing adapter\n");
726  adapter_prepare(hr.u.s.adapter_index);
727  }
728  memcpy(phr, &hr, hr.size);
729  return phr->error;
730 }
731 
732 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
733 {
734  int i, adapter, adapter_limit;
735 
736  if (!h_owner)
737  return;
738 
739  if (adapter_index == HPIMSGX_ALLADAPTERS) {
740  adapter = 0;
741  adapter_limit = HPI_MAX_ADAPTERS;
742  } else {
743  adapter = adapter_index;
744  adapter_limit = adapter + 1;
745  }
746 
747  for (; adapter < adapter_limit; adapter++) {
748  /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
749  for (i = 0; i < HPI_MAX_STREAMS; i++) {
750  if (h_owner ==
751  outstream_user_open[adapter][i].h_owner) {
752  struct hpi_message hm;
753  struct hpi_response hr;
754 
756  "Close adapter %d ostream %d\n",
757  adapter, i);
758 
759  hpi_init_message_response(&hm, &hr,
761  hm.adapter_index = (u16)adapter;
762  hm.obj_index = (u16)i;
763  hw_entry_point(&hm, &hr);
764 
765  hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
766  hw_entry_point(&hm, &hr);
767 
768  hm.function = HPI_OSTREAM_GROUP_RESET;
769  hw_entry_point(&hm, &hr);
770 
771  outstream_user_open[adapter][i].open_flag = 0;
772  outstream_user_open[adapter][i].h_owner =
773  NULL;
774  }
775  if (h_owner == instream_user_open[adapter][i].h_owner) {
776  struct hpi_message hm;
777  struct hpi_response hr;
778 
780  "Close adapter %d istream %d\n",
781  adapter, i);
782 
783  hpi_init_message_response(&hm, &hr,
785  hm.adapter_index = (u16)adapter;
786  hm.obj_index = (u16)i;
787  hw_entry_point(&hm, &hr);
788 
789  hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
790  hw_entry_point(&hm, &hr);
791 
792  hm.function = HPI_ISTREAM_GROUP_RESET;
793  hw_entry_point(&hm, &hr);
794 
795  instream_user_open[adapter][i].open_flag = 0;
796  instream_user_open[adapter][i].h_owner = NULL;
797  }
798  }
799  }
800 }