From f1bdc9a47fe7e740a3976099a12792cb3284a6ba Mon Sep 17 00:00:00 2001 From: eeb <eeb> Date: Wed, 17 Aug 2005 18:35:20 +0000 Subject: [PATCH] * running on 2 GM nodes @ HP --- lnet/klnds/gmlnd/gmlnd.h | 89 ++--------------- lnet/klnds/gmlnd/gmlnd_api.c | 73 ++++++++------ lnet/klnds/gmlnd/gmlnd_cb.c | 12 +-- lnet/klnds/gmlnd/gmlnd_comm.c | 140 +++++++++++++------------- lnet/klnds/gmlnd/gmlnd_module.c | 10 +- lnet/klnds/gmlnd/gmlnd_utils.c | 171 +++++++++++++------------------- 6 files changed, 202 insertions(+), 293 deletions(-) diff --git a/lnet/klnds/gmlnd/gmlnd.h b/lnet/klnds/gmlnd/gmlnd.h index 2193a21ba2..6076d14b2b 100644 --- a/lnet/klnds/gmlnd/gmlnd.h +++ b/lnet/klnds/gmlnd/gmlnd.h @@ -71,6 +71,13 @@ #include "portals/nal.h" #include "portals/lib-p30.h" +/* undefine these before including the GM headers which clash */ +#undef PACKAGE_BUGREPORT +#undef PACKAGE_NAME +#undef PACKAGE_STRING +#undef PACKAGE_TARNAME +#undef PACKAGE_VERSION + #define GM_STRONG_TYPES 1 #ifdef VERSION #undef VERSION @@ -89,23 +96,17 @@ * insmod can set small_msg_size * which is used to populate nal_data.small_msg_size */ +#define GMNAL_MAGIC 0x1234abcd + #define GMNAL_SMALL_MESSAGE 1078 #define GMNAL_LARGE_MESSAGE_INIT 1079 -#define GMNAL_LARGE_MESSAGE_ACK 1080 +#define GMNAL_LARGE_MESSAGE_ACK 1080 #define GMNAL_LARGE_MESSAGE_FINI 1081 extern int gmnal_small_msg_size; extern int num_rx_threads; extern int num_stxds; extern int gm_port_id; -#define GMNAL_SMALL_MSG_SIZE(a) a->small_msg_size -#define GMNAL_IS_SMALL_MESSAGE(n,a,b,c) gmnal_is_small_msg(n, a, b, c) -#define GMNAL_MAGIC 0x1234abcd -/* - * The gm_port to use for gmnal - */ -#define GMNAL_GM_PORT_ID gm_port_id - /* * Small Transmit Descriptor @@ -178,7 +179,6 @@ typedef struct _gmnal_msghdr { __s32 niov; gm_remote_ptr_t stxd_remote_ptr; /* 64 bits */ } WIRE_ATTR gmnal_msghdr_t; -#define GMNAL_MSGHDR_SIZE sizeof(gmnal_msghdr_t) /* * the caretaker thread (ct_thread) gets receive events @@ -274,55 +274,6 @@ extern gmnal_data_t *global_nal_data; * FUNCTION PROTOTYPES */ -/* - * Locking macros - */ - -/* - * For the Small tx and rx descriptor lists - */ -#define GMNAL_TXD_LOCK_INIT(a) spin_lock_init(&a->stxd_lock); -#define GMNAL_TXD_LOCK(a) spin_lock(&a->stxd_lock); -#define GMNAL_TXD_UNLOCK(a) spin_unlock(&a->stxd_lock); -#define GMNAL_TXD_TOKEN_INIT(a, n) sema_init(&a->stxd_token, n); -#define GMNAL_TXD_GETTOKEN(a) down(&a->stxd_token); -#define GMNAL_TXD_TRYGETTOKEN(a) down_trylock(&a->stxd_token) -#define GMNAL_TXD_RETURNTOKEN(a) up(&a->stxd_token); - -#define GMNAL_RXT_TXD_LOCK_INIT(a) spin_lock_init(&a->rxt_stxd_lock); -#define GMNAL_RXT_TXD_LOCK(a) spin_lock(&a->rxt_stxd_lock); -#define GMNAL_RXT_TXD_UNLOCK(a) spin_unlock(&a->rxt_stxd_lock); -#define GMNAL_RXT_TXD_TOKEN_INIT(a, n) sema_init(&a->rxt_stxd_token, n); -#define GMNAL_RXT_TXD_GETTOKEN(a) down(&a->rxt_stxd_token); -#define GMNAL_RXT_TXD_TRYGETTOKEN(a) down_trylock(&a->rxt_stxd_token) -#define GMNAL_RXT_TXD_RETURNTOKEN(a) up(&a->rxt_stxd_token); - -#define GMNAL_LTXD_LOCK_INIT(a) spin_lock_init(&a->ltxd_lock); -#define GMNAL_LTXD_LOCK(a) spin_lock(&a->ltxd_lock); -#define GMNAL_LTXD_UNLOCK(a) spin_unlock(&a->ltxd_lock); -#define GMNAL_LTXD_TOKEN_INIT(a, n) sema_init(&a->ltxd_token, n); -#define GMNAL_LTXD_GETTOKEN(a) down(&a->ltxd_token); -#define GMNAL_LTXD_TRYGETTOKEN(a) down_trylock(&a->ltxd_token) -#define GMNAL_LTXD_RETURNTOKEN(a) up(&a->ltxd_token); - -#define GMNAL_RXD_LOCK_INIT(a) spin_lock_init(&a->srxd_lock); -#define GMNAL_RXD_LOCK(a) spin_lock(&a->srxd_lock); -#define GMNAL_RXD_UNLOCK(a) spin_unlock(&a->srxd_lock); -#define GMNAL_RXD_TOKEN_INIT(a, n) sema_init(&a->srxd_token, n); -#define GMNAL_RXD_GETTOKEN(a) down(&a->srxd_token); -#define GMNAL_RXD_TRYGETTOKEN(a) down_trylock(&a->srxd_token) -#define GMNAL_RXD_RETURNTOKEN(a) up(&a->srxd_token); - -#define GMNAL_GM_LOCK_INIT(a) spin_lock_init(&a->gm_lock); -#define GMNAL_GM_LOCK(a) spin_lock(&a->gm_lock); -#define GMNAL_GM_UNLOCK(a) spin_unlock(&a->gm_lock); -#define GMNAL_CB_LOCK_INIT(a) spin_lock_init(&a->cb_lock); - - -/* - * Memory Allocator - */ - /* * API NAL */ @@ -342,13 +293,6 @@ void gmnal_api_lock(nal_t *, unsigned long *); void gmnal_api_unlock(nal_t *, unsigned long *); -#define GMNAL_INIT_NAL(a) do { \ - (a)->nal_ni_init = gmnal_api_startup; \ - (a)->nal_ni_fini = gmnal_api_shutdown; \ - (a)->nal_data = NULL; \ - } while (0) - - /* * CB NAL */ @@ -372,19 +316,6 @@ int gmnal_init(void); void gmnal_fini(void); - -#define GMNAL_INIT_NAL_CB(a) do { \ - a->libnal_send = gmnal_cb_send; \ - a->libnal_send_pages = gmnal_cb_send_pages; \ - a->libnal_recv = gmnal_cb_recv; \ - a->libnal_recv_pages = gmnal_cb_recv_pages; \ - a->libnal_map = NULL; \ - a->libnal_unmap = NULL; \ - a->libnal_dist = gmnal_cb_dist; \ - a->libnal_data = NULL; \ - } while (0) - - /* * Small and Large Transmit and Receive Descriptor Functions */ diff --git a/lnet/klnds/gmlnd/gmlnd_api.c b/lnet/klnds/gmlnd/gmlnd_api.c index a382471893..12efc6321e 100644 --- a/lnet/klnds/gmlnd/gmlnd_api.c +++ b/lnet/klnds/gmlnd/gmlnd_api.c @@ -90,10 +90,10 @@ gmnal_api_shutdown(nal_t *nal) gmnal_stop_ctthread(nal_data); gmnal_free_txd(nal_data); gmnal_free_srxd(nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (nal_data->sysctl) unregister_sysctl_table (nal_data->sysctl); /* Don't free 'nal'; it's a static struct */ @@ -150,10 +150,19 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); return(PTL_NO_SPACE); } + memset(libnal, 0, sizeof(lib_nal_t)); + libnal->libnal_send = gmnal_cb_send; + libnal->libnal_send_pages = gmnal_cb_send_pages; + libnal->libnal_recv = gmnal_cb_recv; + libnal->libnal_recv_pages = gmnal_cb_recv_pages; + libnal->libnal_map = NULL; + libnal->libnal_unmap = NULL; + libnal->libnal_dist = gmnal_cb_dist; + libnal->libnal_data = NULL; + CDEBUG(D_INFO, "Allocd and reset libnal[%p]\n", libnal); - GMNAL_INIT_NAL_CB(libnal); /* * String them all together */ @@ -161,7 +170,7 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, nal_data->nal = nal; nal_data->libnal = libnal; - GMNAL_GM_LOCK_INIT(nal_data); + spin_lock_init(&nal_data->gm_lock); /* @@ -177,13 +186,13 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, CDEBUG(D_NET, "Calling gm_open with port [%d], " - "name [%s], version [%d]\n", GMNAL_GM_PORT_ID, + "name [%s], version [%d]\n", gm_port_id, "gmnal", GM_API_VERSION); - GMNAL_GM_LOCK(nal_data); - gm_status = gm_open(&nal_data->gm_port, 0, GMNAL_GM_PORT_ID, "gmnal", + spin_lock(&nal_data->gm_lock); + gm_status = gm_open(&nal_data->gm_port, 0, gm_port_id, "gmnal", GM_API_VERSION); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CDEBUG(D_INFO, "gm_open returned [%d]\n", gm_status); if (gm_status == GM_SUCCESS) { @@ -210,9 +219,9 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, gm_status); break; } - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -225,10 +234,10 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, if (gmnal_alloc_srxd(nal_data) != GMNAL_STATUS_OK) { CERROR("Failed to allocate small rx descriptors\n"); gmnal_free_txd(nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -242,11 +251,11 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, while((srxd = gmnal_get_srxd(nal_data, 0))) { CDEBUG(D_NET, "giving [%p] to gm_provide_recvive_buffer\n", srxd->buffer); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer, srxd->gmsize, GM_LOW_PRIORITY, 0); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); } /* @@ -254,10 +263,10 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, */ if (gmnal_alloc_txd(nal_data) != GMNAL_STATUS_OK) { CERROR("Failed to allocate small tx descriptors\n"); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -267,19 +276,19 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, * Initialise the portals library */ CDEBUG(D_NET, "Getting node id\n"); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_get_node_id(nal_data->gm_port, &local_nid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { gmnal_stop_rxthread(nal_data); gmnal_stop_ctthread(nal_data); CERROR("can't determine node id\n"); gmnal_free_txd(nal_data); gmnal_free_srxd(nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -288,20 +297,20 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, nal_data->gm_local_nid = local_nid; CDEBUG(D_INFO, "Local node id is [%u]\n", local_nid); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_node_id_to_global_id(nal_data->gm_port, local_nid, &global_nid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { CERROR("failed to obtain global id\n"); gmnal_stop_rxthread(nal_data); gmnal_stop_ctthread(nal_data); gmnal_free_txd(nal_data); gmnal_free_srxd(nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -327,10 +336,10 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, gmnal_stop_ctthread(nal_data); gmnal_free_txd(nal_data); gmnal_free_srxd(nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -360,10 +369,10 @@ gmnal_api_startup(nal_t *nal, ptl_pid_t requested_pid, gmnal_stop_ctthread(nal_data); gmnal_free_txd(nal_data); gmnal_free_srxd(nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_close(nal_data->gm_port); gm_finalize(); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(nal_data, sizeof(gmnal_data_t)); PORTAL_FREE(libnal, sizeof(lib_nal_t)); return(PTL_FAIL); @@ -390,9 +399,13 @@ int gmnal_init(void) { int rc; - memset(&the_gm_nal, 0, sizeof(nal_t)); CDEBUG(D_INFO, "reset nal[%p]\n", &the_gm_nal); - GMNAL_INIT_NAL(&the_gm_nal); + + the_gm_nal = (nal_t) { + .nal_ni_init = gmnal_api_startup, + .nal_ni_fini = gmnal_api_shutdown, + .nal_data = NULL, + }; rc = ptl_register_nal(GMNAL, &the_gm_nal); if (rc != PTL_OK) diff --git a/lnet/klnds/gmlnd/gmlnd_cb.c b/lnet/klnds/gmlnd/gmlnd_cb.c index a96f6e6aa0..d94bb88869 100644 --- a/lnet/klnds/gmlnd/gmlnd_cb.c +++ b/lnet/klnds/gmlnd/gmlnd_cb.c @@ -48,7 +48,7 @@ ptl_err_t gmnal_cb_recv(lib_nal_t *libnal, void *private, lib_msg_t *cookie, * side occurrence of filling pkmap_count[]. */ buffer = srxd->buffer; - buffer += GMNAL_MSGHDR_SIZE; + buffer += sizeof(gmnal_msghdr_t); buffer += sizeof(ptl_hdr_t); while(niov--) { @@ -99,7 +99,7 @@ ptl_err_t gmnal_cb_recv_pages(lib_nal_t *libnal, void *private, size_t nob; buffer = srxd->buffer; - buffer += GMNAL_MSGHDR_SIZE; + buffer += sizeof(gmnal_msghdr_t); buffer += sizeof(ptl_hdr_t); /* @@ -163,7 +163,7 @@ ptl_err_t gmnal_cb_send(lib_nal_t *libnal, void *private, lib_msg_t *cookie, CDEBUG(D_INFO, "nal_data [%p]\n", nal_data); } - if (GMNAL_IS_SMALL_MESSAGE(nal_data, niov, iov, len)) { + if (gmnal_is_small_msg(nal_data, niov, iov, len)) { size_t msglen = len; size_t nob; @@ -175,7 +175,7 @@ ptl_err_t gmnal_cb_send(lib_nal_t *libnal, void *private, lib_msg_t *cookie, stxd = gmnal_get_stxd(nal_data, 1); CDEBUG(D_INFO, "stxd [%p]\n", stxd); /* Set the offset of the data to copy into the buffer */ - buffer = stxd->buffer + GMNAL_MSGHDR_SIZE + sizeof(ptl_hdr_t); + buffer = stxd->buffer + sizeof(gmnal_msghdr_t) + sizeof(ptl_hdr_t); while(niov--) { if (offset >= iov->iov_len) { offset -= iov->iov_len; @@ -235,9 +235,9 @@ ptl_err_t gmnal_cb_send_pages(lib_nal_t *libnal, void *private, stxd = gmnal_get_stxd(nal_data, 1); CDEBUG(D_INFO, "stxd [%p]\n", stxd); /* Set the offset of the data to copy into the buffer */ - buffer = stxd->buffer + GMNAL_MSGHDR_SIZE + sizeof(ptl_hdr_t); + buffer = stxd->buffer + sizeof(gmnal_msghdr_t) + sizeof(ptl_hdr_t); - if (GMNAL_IS_SMALL_MESSAGE(nal_data, 0, NULL, len)) { + if (gmnal_is_small_msg(nal_data, 0, NULL, len)) { size_t msglen = len; size_t nob; diff --git a/lnet/klnds/gmlnd/gmlnd_comm.c b/lnet/klnds/gmlnd/gmlnd_comm.c index e66915309b..c618680026 100644 --- a/lnet/klnds/gmlnd/gmlnd_comm.c +++ b/lnet/klnds/gmlnd/gmlnd_comm.c @@ -54,7 +54,7 @@ gmnal_ct_thread(void *arg) nal_data->ctthread_flag = GMNAL_CTTHREAD_STARTED; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); while(nal_data->ctthread_flag == GMNAL_CTTHREAD_STARTED) { CDEBUG(D_NET, "waiting\n"); rxevent = gm_blocking_receive_no_spin(nal_data->gm_port); @@ -68,9 +68,9 @@ gmnal_ct_thread(void *arg) case(GM_RECV_EVENT): CDEBUG(D_NET, "CTTHREAD:: GM_RECV_EVENT\n"); recv = (gm_recv_t*)&rxevent->recv; - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); gmnal_add_rxtwe(nal_data, recv); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); CDEBUG(D_NET, "CTTHREAD:: Added event to Q\n"); break; case(_GM_SLEEP_EVENT): @@ -80,9 +80,9 @@ gmnal_ct_thread(void *arg) * Don't know what this is */ CDEBUG(D_NET, "Sleeping in gm_unknown\n"); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); gm_unknown(nal_data->gm_port, rxevent); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); CDEBUG(D_INFO, "Awake from gm_unknown\n"); break; @@ -94,13 +94,13 @@ gmnal_ct_thread(void *arg) * FAST_RECV_EVENTS here. */ CDEBUG(D_NET, "Passing event to gm_unknown\n"); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); gm_unknown(nal_data->gm_port, rxevent); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); CDEBUG(D_INFO, "Processed unknown event\n"); } } - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); nal_data->ctthread_flag = GMNAL_THREAD_RESET; CDEBUG(D_INFO, "thread nal_data [%p] is exiting\n", nal_data); return(GMNAL_STATUS_OK); @@ -209,7 +209,7 @@ gmnal_pre_receive(gmnal_data_t *nal_data, gmnal_rxtwe_t *we, int gmnal_type) length = we->length; gmnal_msghdr = (gmnal_msghdr_t*)buffer; - portals_hdr = (ptl_hdr_t*)(buffer+GMNAL_MSGHDR_SIZE); + portals_hdr = (ptl_hdr_t*)(buffer+sizeof(gmnal_msghdr_t)); CDEBUG(D_INFO, "rx_event:: Sender node [%d], Sender Port [%d], " "type [%d], length [%d], buffer [%p]\n", @@ -249,7 +249,7 @@ gmnal_pre_receive(gmnal_data_t *nal_data, gmnal_rxtwe_t *we, int gmnal_type) srxd->gm_source_node = gmnal_msghdr->sender_node_id; CDEBUG(D_PORTALS, "Calling lib_parse buffer is [%p]\n", - buffer+GMNAL_MSGHDR_SIZE); + buffer+sizeof(gmnal_msghdr_t)); /* * control passes to lib, which calls cb_recv * cb_recv is responsible for returning the buffer @@ -281,10 +281,10 @@ gmnal_rx_requeue_buffer(gmnal_data_t *nal_data, gmnal_srxd_t *srxd) CDEBUG(D_NET, "requeueing srxd[%p] nal_data[%p]\n", srxd, nal_data); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer, srxd->gmsize, GM_LOW_PRIORITY, 0 ); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return(GMNAL_STATUS_OK); } @@ -349,10 +349,10 @@ gmnal_small_rx(lib_nal_t *libnal, void *private, lib_msg_t *cookie) * return buffer so it can be used again */ CDEBUG(D_NET, "calling gm_provide_receive_buffer\n"); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer, srxd->gmsize, GM_LOW_PRIORITY, 0); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return(PTL_OK); } @@ -391,10 +391,10 @@ gmnal_small_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, CDEBUG(D_INFO, "nal_data [%p]\n", nal_data); } - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_global_id_to_node_id(nal_data->gm_port, global_nid, &local_nid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { CERROR("Failed to obtain local id\n"); return(PTL_FAIL); @@ -417,7 +417,7 @@ gmnal_small_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, msghdr->sender_node_id = nal_data->gm_global_nid; CDEBUG(D_INFO, "processing msghdr at [%p]\n", buffer); - buffer += GMNAL_MSGHDR_SIZE; + buffer += sizeof(gmnal_msghdr_t); CDEBUG(D_INFO, "processing portals hdr at [%p]\n", buffer); gm_bcopy(hdr, buffer, sizeof(ptl_hdr_t)); @@ -425,7 +425,7 @@ gmnal_small_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, buffer += sizeof(ptl_hdr_t); CDEBUG(D_INFO, "sending\n"); - tot_size = size+sizeof(ptl_hdr_t)+GMNAL_MSGHDR_SIZE; + tot_size = size+sizeof(ptl_hdr_t)+sizeof(gmnal_msghdr_t); stxd->msg_size = tot_size; @@ -434,14 +434,14 @@ gmnal_small_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, "stxd [%p]\n", nal_data->gm_port, stxd->buffer, stxd->gm_size, stxd->msg_size, global_nid, local_nid, stxd); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); stxd->gm_priority = GM_LOW_PRIORITY; stxd->gm_target_node = local_nid; gm_send_to_peer_with_callback(nal_data->gm_port, stxd->buffer, stxd->gm_size, stxd->msg_size, GM_LOW_PRIORITY, local_nid, gmnal_small_tx_callback, (void*)stxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CDEBUG(D_INFO, "done\n"); return(PTL_OK); @@ -470,10 +470,10 @@ gmnal_small_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status) return; } if (status != GM_SUCCESS) { - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_node_id_to_global_id(nal_data->gm_port, stxd->gm_target_node,&gnid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { CDEBUG(D_INFO, "gm_node_id_to_global_id failed[%d]\n", gm_status); @@ -494,7 +494,7 @@ gmnal_small_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status) * do a resend on the dropped ones */ CERROR("send stxd [%p] dropped, resending\n", context); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_send_to_peer_with_callback(nal_data->gm_port, stxd->buffer, stxd->gm_size, @@ -503,7 +503,7 @@ gmnal_small_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status) stxd->gm_target_node, gmnal_small_tx_callback, context); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return; case(GM_TIMED_OUT): case(GM_SEND_TIMED_OUT): @@ -511,11 +511,11 @@ gmnal_small_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status) * drop these ones */ CDEBUG(D_INFO, "calling gm_drop_sends\n"); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_drop_sends(nal_data->gm_port, stxd->gm_priority, - stxd->gm_target_node, GMNAL_GM_PORT_ID, + stxd->gm_target_node, gm_port_id, gmnal_drop_sends_callback, context); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return; @@ -567,7 +567,7 @@ gmnal_small_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status) case(GM_YP_NO_MATCH): default: gm_resume_sending(nal_data->gm_port, stxd->gm_priority, - stxd->gm_target_node, GMNAL_GM_PORT_ID, + stxd->gm_target_node, gm_port_id, gmnal_resume_sending_callback, context); return; @@ -617,14 +617,14 @@ void gmnal_drop_sends_callback(struct gm_port *gm_port, void *context, CDEBUG(D_TRACE, "status is [%d] context is [%p]\n", status, context); if (status == GM_SUCCESS) { - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_send_to_peer_with_callback(gm_port, stxd->buffer, stxd->gm_size, stxd->msg_size, stxd->gm_priority, stxd->gm_target_node, gmnal_small_tx_callback, context); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); } else { CERROR("send_to_peer status for stxd [%p] is " "[%d][%s]\n", stxd, status, gmnal_gm_error(status)); @@ -700,8 +700,8 @@ gmnal_large_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, msghdr->sender_node_id = nal_data->gm_global_nid; msghdr->stxd_remote_ptr = (gm_remote_ptr_t)stxd; msghdr->niov = niov ; - buffer += GMNAL_MSGHDR_SIZE; - mlen = GMNAL_MSGHDR_SIZE; + buffer += sizeof(gmnal_msghdr_t); + mlen = sizeof(gmnal_msghdr_t); CDEBUG(D_INFO, "mlen is [%d]\n", mlen); @@ -750,28 +750,28 @@ gmnal_large_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, while(niov--) { CDEBUG(D_INFO, "Registering memory [%p] len ["LPSZ"] \n", iov->iov_base, iov->iov_len); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_register_memory(nal_data->gm_port, iov->iov_base, iov->iov_len); if (gm_status != GM_SUCCESS) { - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CERROR("gm_register_memory returns [%d][%s] " "for memory [%p] len ["LPSZ"]\n", gm_status, gmnal_gm_error(gm_status), iov->iov_base, iov->iov_len); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); while (iov_dup != iov) { gm_deregister_memory(nal_data->gm_port, iov_dup->iov_base, iov_dup->iov_len); iov_dup++; } - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); gmnal_return_stxd(nal_data, stxd); return(PTL_FAIL); } - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); iov++; } @@ -779,11 +779,11 @@ gmnal_large_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, * Send the init message to the target */ CDEBUG(D_INFO, "sending mlen [%d]\n", mlen); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_global_id_to_node_id(nal_data->gm_port, global_nid, &local_nid); if (gm_status != GM_SUCCESS) { - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CERROR("Failed to obtain local id\n"); gmnal_return_stxd(nal_data, stxd); /* TO DO deregister memory on failure */ @@ -794,7 +794,7 @@ gmnal_large_tx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, stxd->gm_size, mlen, GM_LOW_PRIORITY, local_nid, gmnal_large_tx_callback, (void*)stxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CDEBUG(D_INFO, "done\n"); @@ -844,7 +844,7 @@ gmnal_large_rx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, buffer = srxd->buffer; msghdr = (gmnal_msghdr_t*)buffer; - buffer += GMNAL_MSGHDR_SIZE; + buffer += sizeof(gmnal_msghdr_t); buffer += sizeof(ptl_hdr_t); /* @@ -887,30 +887,30 @@ gmnal_large_rx(lib_nal_t *libnal, void *private, lib_msg_t *cookie, while(nriov--) { CDEBUG(D_INFO, "Registering memory [%p] len ["LPSZ"] \n", riov->iov_base, riov->iov_len); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_register_memory(nal_data->gm_port, riov->iov_base, riov->iov_len); if (gm_status != GM_SUCCESS) { - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CERROR("gm_register_memory returns [%d][%s] " "for memory [%p] len ["LPSZ"]\n", gm_status, gmnal_gm_error(gm_status), riov->iov_base, riov->iov_len); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); while (riov_dup != riov) { gm_deregister_memory(nal_data->gm_port, riov_dup->iov_base, riov_dup->iov_len); riov_dup++; } - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); /* * give back srxd and buffer. Send NACK to sender */ PORTAL_FREE(srxd->riov, nriov_dup*(sizeof(struct iovec))); return(PTL_FAIL); } - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); riov++; } @@ -994,17 +994,17 @@ gmnal_copyiov(int do_copy, gmnal_srxd_t *srxd, int nsiov, CERROR("Bad args No nal_data\n"); return(GMNAL_STATUS_FAIL); } - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); if (gm_global_id_to_node_id(nal_data->gm_port, srxd->gm_source_node, &source_node) != GM_SUCCESS) { CERROR("cannot resolve global_id [%u] " "to local node_id\n", srxd->gm_source_node); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return(GMNAL_STATUS_FAIL); } - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); /* * We need a send token to use gm_get * getting an stxd gets us a send token. @@ -1026,7 +1026,7 @@ gmnal_copyiov(int do_copy, gmnal_srxd_t *srxd, int nsiov, CDEBUG(D_INFO, "slen>rlen\n"); ltxd = gmnal_get_ltxd(nal_data); ltxd->srxd = srxd; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); /* * funny business to get rid * of compiler warning @@ -1035,9 +1035,9 @@ gmnal_copyiov(int do_copy, gmnal_srxd_t *srxd, int nsiov, remote_ptr = (gm_remote_ptr_t)sbuf_long; gm_get(nal_data->gm_port, remote_ptr, rbuf, rlen, GM_LOW_PRIORITY, source_node, - GMNAL_GM_PORT_ID, + gm_port_id, gmnal_remote_get_callback, ltxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); } /* * at the end of 1 iov element @@ -1054,14 +1054,14 @@ gmnal_copyiov(int do_copy, gmnal_srxd_t *srxd, int nsiov, CDEBUG(D_INFO, "slen<rlen\n"); ltxd = gmnal_get_ltxd(nal_data); ltxd->srxd = srxd; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); sbuf_long = (unsigned long) sbuf; remote_ptr = (gm_remote_ptr_t)sbuf_long; gm_get(nal_data->gm_port, remote_ptr, rbuf, slen, GM_LOW_PRIORITY, source_node, - GMNAL_GM_PORT_ID, + gm_port_id, gmnal_remote_get_callback, ltxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); } /* * at end of siov element @@ -1077,14 +1077,14 @@ gmnal_copyiov(int do_copy, gmnal_srxd_t *srxd, int nsiov, CDEBUG(D_INFO, "rlen=slen\n"); ltxd = gmnal_get_ltxd(nal_data); ltxd->srxd = srxd; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); sbuf_long = (unsigned long) sbuf; remote_ptr = (gm_remote_ptr_t)sbuf_long; gm_get(nal_data->gm_port, remote_ptr, rbuf, rlen, GM_LOW_PRIORITY, source_node, - GMNAL_GM_PORT_ID, + gm_port_id, gmnal_remote_get_callback, ltxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); } /* * at end of siov and riov element @@ -1161,7 +1161,7 @@ gmnal_remote_get_callback(gm_port_t *gm_port, void *context, */ nriov = srxd->nriov; riov = srxd->riov; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); while (nriov--) { CERROR("deregister memory [%p]\n", riov->iov_base); if (gm_deregister_memory(srxd->nal_data->gm_port, @@ -1171,16 +1171,16 @@ gmnal_remote_get_callback(gm_port_t *gm_port, void *context, } riov++; } - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(srxd->riov, sizeof(struct iovec)*nriov); /* * repost the receive buffer (return receive token) */ - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer, srxd->gmsize, GM_LOW_PRIORITY, 0); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return; } @@ -1204,10 +1204,10 @@ gmnal_large_tx_ack(gmnal_data_t *nal_data, gmnal_srxd_t *srxd) CDEBUG(D_TRACE, "srxd[%p] target_node [%u]\n", srxd, srxd->gm_source_node); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_global_id_to_node_id(nal_data->gm_port, srxd->gm_source_node, &local_nid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { CERROR("Failed to obtain local id\n"); return; @@ -1238,14 +1238,14 @@ gmnal_large_tx_ack(gmnal_data_t *nal_data, gmnal_srxd_t *srxd) CDEBUG(D_INFO, "processing msghdr at [%p]\n", buffer); CDEBUG(D_INFO, "sending\n"); - stxd->msg_size= GMNAL_MSGHDR_SIZE; + stxd->msg_size= sizeof(gmnal_msghdr_t); CDEBUG(D_NET, "Calling gm_send_to_peer port [%p] buffer [%p] " "gmsize [%lu] msize [%d] global_nid [%u] local_nid[%d] " "stxd [%p]\n", nal_data->gm_port, stxd->buffer, stxd->gm_size, stxd->msg_size, srxd->gm_source_node, local_nid, stxd); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); stxd->gm_priority = GM_LOW_PRIORITY; stxd->gm_target_node = local_nid; gm_send_to_peer_with_callback(nal_data->gm_port, stxd->buffer, @@ -1254,7 +1254,7 @@ gmnal_large_tx_ack(gmnal_data_t *nal_data, gmnal_srxd_t *srxd) gmnal_large_tx_ack_callback, (void*)stxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CDEBUG(D_INFO, "gmnal_large_tx_ack :: done\n"); return; @@ -1283,7 +1283,7 @@ gmnal_large_tx_ack_callback(gm_port_t *gm_port, void *context, stxd, status); gmnal_return_stxd(stxd->nal_data, stxd); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); return; } @@ -1323,10 +1323,10 @@ gmnal_large_tx_ack_received(gmnal_data_t *nal_data, gmnal_srxd_t *srxd) while(stxd->niov--) { CDEBUG(D_INFO, "deregister memory [%p] size ["LPSZ"]\n", iov->iov_base, iov->iov_len); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_deregister_memory(nal_data->gm_port, iov->iov_base, iov->iov_len); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); iov++; } diff --git a/lnet/klnds/gmlnd/gmlnd_module.c b/lnet/klnds/gmlnd/gmlnd_module.c index e2549ef26a..9fa2ea518d 100644 --- a/lnet/klnds/gmlnd/gmlnd_module.c +++ b/lnet/klnds/gmlnd/gmlnd_module.c @@ -22,7 +22,7 @@ #include "gmnal.h" -int gmnal_small_msg_size = GMNAL_MSGHDR_SIZE + sizeof(ptl_hdr_t) + PTL_MTU + 928; +int gmnal_small_msg_size = sizeof(gmnal_msghdr_t) + sizeof(ptl_hdr_t) + PTL_MTU + 928; /* * -1 indicates default value. * This is 1 thread per cpu @@ -54,21 +54,21 @@ gmnal_cmd(struct portals_cfg *pcfg, void *private) PORTAL_ALLOC(name, pcfg->pcfg_plen1); copy_from_user(name, PCFG_PBUF(pcfg, 1), pcfg->pcfg_plen1); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); //nid = gm_host_name_to_node_id(nal_data->gm_port, name); gm_status = gm_host_name_to_node_id_ex(nal_data->gm_port, 0, name, &nid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { CDEBUG(D_INFO, "gm_host_name_to_node_id_ex(...host %s) " "failed[%d]\n", name, gm_status); return (-1); } else CDEBUG(D_INFO, "Local node %s id is [%d]\n", name, nid); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_status = gm_node_id_to_global_id(nal_data->gm_port, nid, &gnid); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (gm_status != GM_SUCCESS) { CDEBUG(D_INFO, "gm_node_id_to_global_id failed[%d]\n", gm_status); diff --git a/lnet/klnds/gmlnd/gmlnd_utils.c b/lnet/klnds/gmlnd/gmlnd_utils.c index a725088608..1cbb728b8d 100644 --- a/lnet/klnds/gmlnd/gmlnd_utils.c +++ b/lnet/klnds/gmlnd/gmlnd_utils.c @@ -61,12 +61,12 @@ gmnal_alloc_txd(gmnal_data_t *nal_data) CDEBUG(D_TRACE, "gmnal_alloc_small tx\n"); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); /* * total number of transmit tokens */ ntx = gm_num_send_tokens(nal_data->gm_port); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CDEBUG(D_INFO, "total number of send tokens available is [%d]\n", ntx); /* @@ -99,12 +99,12 @@ gmnal_alloc_txd(gmnal_data_t *nal_data) * When token is obtained acquire the spinlock * to manipulate the list */ - GMNAL_TXD_TOKEN_INIT(nal_data, nstx); - GMNAL_TXD_LOCK_INIT(nal_data); - GMNAL_RXT_TXD_TOKEN_INIT(nal_data, nrxt_stx); - GMNAL_RXT_TXD_LOCK_INIT(nal_data); - GMNAL_LTXD_TOKEN_INIT(nal_data, nltx); - GMNAL_LTXD_LOCK_INIT(nal_data); + sema_init(&nal_data->stxd_token, nstx); + spin_lock_init(&nal_data->stxd_lock); + sema_init(&nal_data->rxt_stxd_token, nrxt_stx); + spin_lock_init(&nal_data->rxt_stxd_lock); + sema_init(&nal_data->ltxd_token, nltx); + spin_lock_init(&nal_data->ltxd_lock); for (i=0; i<=nstx; i++) { PORTAL_ALLOC(txd, sizeof(gmnal_stxd_t)); @@ -112,18 +112,18 @@ gmnal_alloc_txd(gmnal_data_t *nal_data) CERROR("Failed to malloc txd [%d]\n", i); return(GMNAL_STATUS_NOMEM); } - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); txbuffer = gm_dma_malloc(nal_data->gm_port, - GMNAL_SMALL_MSG_SIZE(nal_data)); - GMNAL_GM_UNLOCK(nal_data); + nal_data->small_msg_size); + spin_unlock(&nal_data->gm_lock); if (!txbuffer) { CERROR("Failed to gm_dma_malloc txbuffer [%d], " - "size [%d]\n", i,GMNAL_SMALL_MSG_SIZE(nal_data)); + "size [%d]\n", i, nal_data->small_msg_size); PORTAL_FREE(txd, sizeof(gmnal_stxd_t)); return(GMNAL_STATUS_FAIL); } txd->buffer = txbuffer; - txd->buffer_size = GMNAL_SMALL_MSG_SIZE(nal_data); + txd->buffer_size = nal_data->small_msg_size; txd->gm_size = gm_min_size_for_length(txd->buffer_size); txd->nal_data = (struct _gmnal_data_t*)nal_data; txd->rxt = 0; @@ -140,18 +140,18 @@ gmnal_alloc_txd(gmnal_data_t *nal_data) CERROR("Failed to malloc txd [%d]\n", i); return(GMNAL_STATUS_NOMEM); } - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); txbuffer = gm_dma_malloc(nal_data->gm_port, - GMNAL_SMALL_MSG_SIZE(nal_data)); - GMNAL_GM_UNLOCK(nal_data); + nal_data->small_msg_size); + spin_unlock(&nal_data->gm_lock); if (!txbuffer) { CERROR("Failed to gm_dma_malloc txbuffer [%d]," - " size [%d]\n",i,GMNAL_SMALL_MSG_SIZE(nal_data)); + " size [%d]\n",i, nal_data->small_msg_size); PORTAL_FREE(txd, sizeof(gmnal_stxd_t)); return(GMNAL_STATUS_FAIL); } txd->buffer = txbuffer; - txd->buffer_size = GMNAL_SMALL_MSG_SIZE(nal_data); + txd->buffer_size = nal_data->small_msg_size; txd->gm_size = gm_min_size_for_length(txd->buffer_size); txd->nal_data = (struct _gmnal_data_t*)nal_data; txd->rxt = 1; @@ -189,9 +189,9 @@ gmnal_free_txd(gmnal_data_t *nal_data) "size [%d]\n", txd, txd->buffer, txd->buffer_size); _txd = txd; txd = txd->next; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_dma_free(nal_data->gm_port, _txd->buffer); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(_txd, sizeof(gmnal_stxd_t)); } txd = nal_data->rxt_stxd; @@ -200,9 +200,9 @@ gmnal_free_txd(gmnal_data_t *nal_data) "size [%d]\n", txd, txd->buffer, txd->buffer_size); _txd = txd; txd = txd->next; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_dma_free(nal_data->gm_port, _txd->buffer); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(_txd, sizeof(gmnal_stxd_t)); } ltxd = nal_data->ltxd; @@ -234,11 +234,11 @@ gmnal_get_stxd(gmnal_data_t *nal_data, int block) if (gmnal_is_rxthread(nal_data)) { CDEBUG(D_INFO, "RXTHREAD Attempting to get token\n"); - GMNAL_RXT_TXD_GETTOKEN(nal_data); - GMNAL_RXT_TXD_LOCK(nal_data); + down(&nal_data->rxt_stxd_token); + spin_lock(&nal_data->rxt_stxd_lock); txd = nal_data->rxt_stxd; nal_data->rxt_stxd = txd->next; - GMNAL_RXT_TXD_UNLOCK(nal_data); + spin_unlock(&nal_data->rxt_stxd_lock); CDEBUG(D_INFO, "RXTHREAD got [%p], head is [%p]\n", txd, nal_data->rxt_stxd); txd->kniov = 0; @@ -246,18 +246,18 @@ gmnal_get_stxd(gmnal_data_t *nal_data, int block) } else { if (block) { CDEBUG(D_INFO, "Attempting to get token\n"); - GMNAL_TXD_GETTOKEN(nal_data); + down(&nal_data->stxd_token); CDEBUG(D_PORTALS, "Got token\n"); } else { - if (GMNAL_TXD_TRYGETTOKEN(nal_data)) { + if (down_trylock(&nal_data->stxd_token)) { CERROR("can't get token\n"); return(NULL); } } - GMNAL_TXD_LOCK(nal_data); + spin_lock(&nal_data->stxd_lock); txd = nal_data->stxd; nal_data->stxd = txd->next; - GMNAL_TXD_UNLOCK(nal_data); + spin_unlock(&nal_data->stxd_lock); CDEBUG(D_INFO, "got [%p], head is [%p]\n", txd, nal_data->stxd); txd->kniov = 0; @@ -279,18 +279,18 @@ gmnal_return_stxd(gmnal_data_t *nal_data, gmnal_stxd_t *txd) * for the rxthread */ if (txd->rxt) { - GMNAL_RXT_TXD_LOCK(nal_data); + spin_lock(&nal_data->rxt_stxd_lock); txd->next = nal_data->rxt_stxd; nal_data->rxt_stxd = txd; - GMNAL_RXT_TXD_UNLOCK(nal_data); - GMNAL_RXT_TXD_RETURNTOKEN(nal_data); + spin_unlock(&nal_data->rxt_stxd_lock); + up(&nal_data->rxt_stxd_token); CDEBUG(D_INFO, "Returned stxd to rxthread list\n"); } else { - GMNAL_TXD_LOCK(nal_data); + spin_lock(&nal_data->stxd_lock); txd->next = nal_data->stxd; nal_data->stxd = txd; - GMNAL_TXD_UNLOCK(nal_data); - GMNAL_TXD_RETURNTOKEN(nal_data); + spin_unlock(&nal_data->stxd_lock); + up(&nal_data->stxd_token); CDEBUG(D_INFO, "Returned stxd to general list\n"); } return; @@ -310,11 +310,11 @@ gmnal_get_ltxd(gmnal_data_t *nal_data) CDEBUG(D_TRACE, "nal_data [%p]\n", nal_data); - GMNAL_LTXD_GETTOKEN(nal_data); - GMNAL_LTXD_LOCK(nal_data); + down(&nal_data->ltxd_token); + spin_lock(&nal_data->ltxd_lock); ltxd = nal_data->ltxd; nal_data->ltxd = ltxd->next; - GMNAL_LTXD_UNLOCK(nal_data); + spin_unlock(&nal_data->ltxd_lock); CDEBUG(D_INFO, "got [%p], head is [%p]\n", ltxd, nal_data->ltxd); return(ltxd); } @@ -327,11 +327,11 @@ gmnal_return_ltxd(gmnal_data_t *nal_data, gmnal_ltxd_t *ltxd) { CDEBUG(D_TRACE, "nal_data [%p], ltxd[%p]\n", nal_data, ltxd); - GMNAL_LTXD_LOCK(nal_data); + spin_lock(&nal_data->ltxd_lock); ltxd->next = nal_data->ltxd; nal_data->ltxd = ltxd; - GMNAL_LTXD_UNLOCK(nal_data); - GMNAL_LTXD_RETURNTOKEN(nal_data); + spin_unlock(&nal_data->ltxd_lock); + up(&nal_data->ltxd_token); return; } /* @@ -351,9 +351,9 @@ gmnal_alloc_srxd(gmnal_data_t *nal_data) CDEBUG(D_TRACE, "gmnal_alloc_small rx\n"); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); nrx = gm_num_receive_tokens(nal_data->gm_port); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); CDEBUG(D_INFO, "total number of receive tokens available is [%d]\n", nrx); @@ -369,17 +369,17 @@ gmnal_alloc_srxd(gmnal_data_t *nal_data) nsrx); - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); nal_data->srxd_hash = gm_create_hash(gm_hash_compare_ptrs, gm_hash_hash_ptr, 0, 0, nsrx, 0); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); if (!nal_data->srxd_hash) { CERROR("Failed to create hash table\n"); return(GMNAL_STATUS_NOMEM); } - GMNAL_RXD_TOKEN_INIT(nal_data, nsrx); - GMNAL_RXD_LOCK_INIT(nal_data); + sema_init(&nal_data->srxd_token, nsrx); + spin_lock_init(&nal_data->srxd_lock); for (i=0; i<=nsrx; i++) { PORTAL_ALLOC(rxd, sizeof(gmnal_srxd_t)); @@ -387,55 +387,20 @@ gmnal_alloc_srxd(gmnal_data_t *nal_data) CERROR("Failed to malloc rxd [%d]\n", i); return(GMNAL_STATUS_NOMEM); } -#if 0 - PORTAL_ALLOC(rxbuffer, GMNAL_SMALL_MSG_SIZE(nal_data)); - if (!rxbuffer) { - CERROR("Failed to malloc rxbuffer [%d], " - "size [%d]\n", i,GMNAL_SMALL_MSG_SIZE(nal_data)); - PORTAL_FREE(rxd, sizeof(gmnal_srxd_t)); - return(GMNAL_STATUS_FAIL); - } - CDEBUG(D_NET, "Calling gm_register_memory with port [%p] " - "rxbuffer [%p], size [%d]\n", nal_data->gm_port, - rxbuffer, GMNAL_SMALL_MSG_SIZE(nal_data)); - GMNAL_GM_LOCK(nal_data); - gm_status = gm_register_memory(nal_data->gm_port, rxbuffer, - GMNAL_SMALL_MSG_SIZE(nal_data)); - GMNAL_GM_UNLOCK(nal_data); - if (gm_status != GM_SUCCESS) { - CERROR("gm_register_memory failed buffer [%p]," - " index [%d]\n", rxbuffer, i); - switch(gm_status) { - case(GM_FAILURE): - CERROR("GM_FAILURE\n"); - break; - case(GM_PERMISSION_DENIED): - CERROR("PERMISSION_DENIED\n"); - break; - case(GM_INVALID_PARAMETER): - CERROR("INVALID_PARAMETER\n"); - break; - default: - CERROR("Unknown error[%d]\n",gm_status); - break; - } - return(GMNAL_STATUS_FAIL); - } -#else - GMNAL_GM_LOCK(nal_data); + + spin_lock(&nal_data->gm_lock); rxbuffer = gm_dma_malloc(nal_data->gm_port, - GMNAL_SMALL_MSG_SIZE(nal_data)); - GMNAL_GM_UNLOCK(nal_data); + nal_data->small_msg_size); + spin_unlock(&nal_data->gm_lock); if (!rxbuffer) { CERROR("Failed to gm_dma_malloc rxbuffer [%d], " - "size [%d]\n",i ,GMNAL_SMALL_MSG_SIZE(nal_data)); + "size [%d]\n",i ,nal_data->small_msg_size); PORTAL_FREE(rxd, sizeof(gmnal_srxd_t)); return(GMNAL_STATUS_FAIL); } -#endif rxd->buffer = rxbuffer; - rxd->size = GMNAL_SMALL_MSG_SIZE(nal_data); + rxd->size = nal_data->small_msg_size; rxd->gmsize = gm_min_size_for_length(rxd->size); if (gm_hash_insert(nal_data->srxd_hash, @@ -474,15 +439,15 @@ gmnal_free_srxd(gmnal_data_t *nal_data) rxd = rxd->next; #if 0 - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_deregister_memory(nal_data->gm_port, _rxd->buffer, _rxd->size); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); PORTAL_FREE(_rxd->buffer, GMNAL_SMALL_RXBUFFER_SIZE); #else - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_dma_free(nal_data->gm_port, _rxd->buffer); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); #endif PORTAL_FREE(_rxd, sizeof(gmnal_srxd_t)); } @@ -503,18 +468,18 @@ gmnal_get_srxd(gmnal_data_t *nal_data, int block) CDEBUG(D_TRACE, "nal_data [%p] block [%d]\n", nal_data, block); if (block) { - GMNAL_RXD_GETTOKEN(nal_data); + down(&nal_data->srxd_token); } else { - if (GMNAL_RXD_TRYGETTOKEN(nal_data)) { + if (down_trylock(&nal_data->srxd_token)) { CDEBUG(D_INFO, "gmnal_get_srxd Can't get token\n"); return(NULL); } } - GMNAL_RXD_LOCK(nal_data); + spin_lock(&nal_data->srxd_lock); rxd = nal_data->srxd; if (rxd) nal_data->srxd = rxd->next; - GMNAL_RXD_UNLOCK(nal_data); + spin_unlock(&nal_data->srxd_lock); CDEBUG(D_INFO, "got [%p], head is [%p]\n", rxd, nal_data->srxd); return(rxd); } @@ -527,11 +492,11 @@ gmnal_return_srxd(gmnal_data_t *nal_data, gmnal_srxd_t *rxd) { CDEBUG(D_TRACE, "nal_data [%p], rxd[%p]\n", nal_data, rxd); - GMNAL_RXD_LOCK(nal_data); + spin_lock(&nal_data->srxd_lock); rxd->next = nal_data->srxd; nal_data->srxd = rxd; - GMNAL_RXD_UNLOCK(nal_data); - GMNAL_RXD_RETURNTOKEN(nal_data); + spin_unlock(&nal_data->srxd_lock); + up(&nal_data->srxd_token); return; } @@ -595,10 +560,10 @@ gmnal_stop_ctthread(gmnal_data_t *nal_data) nal_data); nal_data->ctthread_flag = GMNAL_THREAD_STOP; - GMNAL_GM_LOCK(nal_data); + spin_lock(&nal_data->gm_lock); gm_set_alarm(nal_data->gm_port, &nal_data->ctthread_alarm, 10, NULL, NULL); - GMNAL_GM_UNLOCK(nal_data); + spin_unlock(&nal_data->gm_lock); while(nal_data->ctthread_flag == GMNAL_THREAD_STOP && delay--) { CDEBUG(D_INFO, "gmnal_stop_ctthread sleeping\n"); @@ -875,10 +840,10 @@ gmnal_is_small_msg(gmnal_data_t *nal_data, int niov, struct iovec *iov, { CDEBUG(D_TRACE, "len [%d] limit[%d]\n", len, - GMNAL_SMALL_MSG_SIZE(nal_data)); + nal_data->small_msg_size); if ((len + sizeof(ptl_hdr_t) + sizeof(gmnal_msghdr_t)) - < GMNAL_SMALL_MSG_SIZE(nal_data)) { + < nal_data->small_msg_size) { CDEBUG(D_INFO, "Yep, small message\n"); return(1); -- GitLab