diff --git a/META b/META
index 3424f97294f45fad409b97589500afa971be2725..d3d40bb759a13dcce13c0da47575e1abb5543894 100644
--- a/META
+++ b/META
@@ -9,8 +9,8 @@
   Name:		slurm
   Major:	14
   Minor:	03
-  Micro:	2
-  Version:	14.03.2
+  Micro:	3
+  Version:	14.03.3
   Release:	1
 
 ##
diff --git a/NEWS b/NEWS
index 57af14e2f59339070295f17f943172611e2f7929..238e220614bcf5a1799655b48a16ac70eb50bd6b 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,13 @@
 This file describes changes in recent versions of Slurm. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in Slurm 14.03.3
+==========================
+ -- Correction to default batch output file name. In version 14.03.2 was using
+    "slurm_<jobid>_4294967294.out" due to error in job array logic.
+ -- In slurm.spec file, replace "Requires cray-MySQL-devel-enterprise" with
+    "Requires mysql-devel".
+
 * Changes in Slurm 14.03.2
 ==========================
  -- Fix race condition if PrologFlags=Alloc,NoHold is used.
@@ -425,6 +432,11 @@ documents those changes that are of interest to users and admins.
     tasks per compute node.
  -- Fix issue where user is requesting --acctg-freq=0 and no memory limits.
  -- BGQ - Temp fix issue where job could be left on job_list after it finished.
+ -- BGQ - Fix issue where limits were checked on midplane counts instead of
+    cnode counts.
+ -- BGQ - Move code to only start job on a block after limits are checked.
+ -- Handle node ranges better when dealing with accounting max node limits.
+ -- Fix perlapi to compile correctly with perl 5.18
 
 * Changes in Slurm 2.6.9
 ========================
diff --git a/auxdir/x_ac_lua.m4 b/auxdir/x_ac_lua.m4
index d5a7a2b7fc0cb20d8dd5c99421d1180bba02334d..61648b5939652b8bcd53fe55a84c52c2c7a91529 100644
--- a/auxdir/x_ac_lua.m4
+++ b/auxdir/x_ac_lua.m4
@@ -31,7 +31,7 @@ AC_DEFUN([X_AC_LUA],
                  #include <lauxlib.h>
 		 #include <lualib.h>
 		],
-		[lua_State *L = luaL_newstate ();
+		[lua_State *L = luaL_newstate (); luaL_openlibs(L);
 		],
 		[], [x_ac_have_lua="no"])
 
diff --git a/configure b/configure
index e57fe49b13970f58c1aa30ef2429a65544fa897b..ed2349ccdbc8c274d6cf7eb296a11959ed1b2fb6 100755
--- a/configure
+++ b/configure
@@ -22846,7 +22846,7 @@ $as_echo_n "checking for whether we can link to liblua... " >&6; }
 int
 main ()
 {
-lua_State *L = luaL_newstate ();
+lua_State *L = luaL_newstate (); luaL_openlibs(L);
 
   ;
   return 0;
diff --git a/contribs/lua/job_submit.license.lua b/contribs/lua/job_submit.license.lua
index 991c31b81af5b73ba7b9cef0a6634153f82383b1..c0d6c31b4629d0ea71bb4e3ae56492319f42d4b2 100644
--- a/contribs/lua/job_submit.license.lua
+++ b/contribs/lua/job_submit.license.lua
@@ -41,8 +41,7 @@ function slurm_job_submit ( job_desc, part_list, submit_uid )
 	if bad_license_count > 0 then
 		log_info("slurm_job_submit: for user %d, invalid licenses value: %s",
 			 job_desc.user_id, job_desc.licenses)
---		ESLURM_INVALID_LICENSES is 2048
-		return 2048
+		return slurm.ESLURM_INVALID_LICENSES
 	end
 
 	return 0
@@ -60,8 +59,7 @@ function slurm_job_modify ( job_desc, job_rec, part_list, modify_uid )
 	if bad_license_count > 0 then
 		log_info("slurm_job_modify: for job %u, invalid licenses value: %s",
 			 job_rec.job_id, job_desc.licenses)
---		ESLURM_INVALID_LICENSES is 2048
-		return 2048
+		return slurm.ESLURM_INVALID_LICENSES
 	end
 
 	return 0
diff --git a/contribs/perlapi/libslurm/perl/classmap b/contribs/perlapi/libslurm/perl/classmap
index 8a7fd5694b10a1adee220a9b23208db88d2e7e7d..5dc29930767f057d25c266744e68b88abad05b98 100644
--- a/contribs/perlapi/libslurm/perl/classmap
+++ b/contribs/perlapi/libslurm/perl/classmap
@@ -4,7 +4,7 @@
 # XXX: DO NOT use $class or other variables used in xsubpp, or there will be
 # trouble with xsubpp v1.9508 as in RHEL5.3
 
-$class_map = {
+$slurm_perl_api::class_map = {
 	"slurm_t" 			=> "Slurm",
 	"bitstr_tPtr"		 	=> "Slurm::Bitstr",
 	"hostlist_t" 			=> "Slurm::Hostlist",
diff --git a/contribs/perlapi/libslurm/perl/typemap b/contribs/perlapi/libslurm/perl/typemap
index d42cad7178b4d856cbb00bda8daf00dc6e5470a2..2ad8cece12f8a206719b316e023fb8fe12330443 100644
--- a/contribs/perlapi/libslurm/perl/typemap
+++ b/contribs/perlapi/libslurm/perl/typemap
@@ -33,7 +33,8 @@ T_SLURM
 
 
 T_PTROBJ_SLURM
-	sv_setref_pv( $arg, \"$ntype\", (void*)$var );
+	sv_setref_pv( $arg, \"${eval(`cat classmap`);\$slurm_perl_api::class_map->{$ntype}}\", (void*)$var );
+
 #####################################
 INPUT
 
@@ -48,12 +49,12 @@ T_SLURM
 	}
 
 T_PTROBJ_SLURM
-	if (sv_isobject($arg) && (SvTYPE(SvRV($arg)) == SVt_PVMG)) {
+	if (sv_isobject($arg) && (SvTYPE(SvRV($arg)) == SVt_PVMG) && sv_derived_from($arg, \"${eval(`cat classmap`);\$slurm_perl_api::class_map->{$ntype}}\")) {
 		IV tmp = SvIV((SV*)SvRV($arg));
 		$var = INT2PTR($type,tmp);
 	} else {
 		Perl_croak(aTHX_ \"%s: %s is not of type %s\",
 			${$ALIAS?\q[GvNAME(CvGV(cv))]:\qq[\"$pname\"]},
-			\"$var\", \"$ntype\");
+			\"$var\", \"${eval(`cat classmap`);\$slurm_perl_api::class_map->{$ntype}}\");
 	}
 
diff --git a/doc/html/cray.shtml b/doc/html/cray.shtml
index 8804d63198f96d8bed4d3300d0d0fec279bfdb0c..563fa2a6bf9cda6630b50e48e5c14c8ec3b89478 100644
--- a/doc/html/cray.shtml
+++ b/doc/html/cray.shtml
@@ -94,6 +94,9 @@
   by using the <b><i>SelectTypeParameters=other_cons_res</i></b>,
   doing this will allow you to run multiple jobs on a Cray node just
   like on a normal Linux cluster.
+  Use addition <b><i>SelectTypeParameters</i></b> to identify the resources
+  to allocated (e.g. cores, sockets, memory, etc.). See the slurm.conf man
+  page for details.
 </p>
 <li>Switch</li>
 <p>
@@ -140,6 +143,6 @@
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 25 March 2014</p>
+<p style="text-align:center;">Last modified 5 April 2014</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/gres.shtml b/doc/html/gres.shtml
index 8436b9fdd0b8a5499e178486d3836cf21f4976a4..ac024c813bcb32989fbdf45012102a18b17ea857 100644
--- a/doc/html/gres.shtml
+++ b/doc/html/gres.shtml
@@ -61,6 +61,11 @@ The name can include a numberic range suffix to be interpreted by SLURM
 This field is generally required if enforcement of generic resource
 allocations is to be supported (i.e. prevents a users from making
 use of resources allocated to a different user).
+Enforcement of the file allocation relies upon Linux Control Groups (cgroups)
+and Slurm's task/cgroup plugin, which will place the allocated files into
+the job's cgroup and prevent use of other files.
+Please see Slurm's <a href="cgroups.html">Cgroups Guide</a> for more
+information.<br>
 If File is specified then Count must be either set to the number
 of file names specified or not set (the default value is the number of files
 specified).
@@ -171,6 +176,6 @@ to a physical device</pre>
 explicitly defined in the offload pragmas.</P>
 <!-------------------------------------------------------------------------->
 
-<p style="text-align: center;">Last modified 25 October 2012</p>
+<p style="text-align: center;">Last modified 5 May 2014</p>
 
 </body></html>
diff --git a/slurm.spec b/slurm.spec
index a769c29ebcbda6f163c6c6b29bf2be6c3393bda4..7b78438148bf39c28bd384fd338e4b199a06ce43 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -148,12 +148,10 @@ BuildRequires: mysql-devel >= 5.0.0
 %endif
 
 %if %{slurm_with cray_alps}
-BuildRequires: cray-MySQL-devel-enterprise
-Requires: cray-MySQL-devel-enterprise
+BuildRequires: mysql-devel
 %endif
 
 %if %{slurm_with cray}
-BuildRequires: cray-MySQL-devel-enterprise
 BuildRequires: cray-libalpscomm_cn-devel
 BuildRequires: cray-libalpscomm_sn-devel
 BuildRequires: libnuma-devel
diff --git a/src/plugins/job_submit/lua/job_submit_lua.c b/src/plugins/job_submit/lua/job_submit_lua.c
index 09f6344a9ee81bbceecdf29c1d7bc9b68ad1d2ec..1c7b1456c986fd18e7b634f8014bc232b1f17dd5 100644
--- a/src/plugins/job_submit/lua/job_submit_lua.c
+++ b/src/plugins/job_submit/lua/job_submit_lua.c
@@ -245,7 +245,7 @@ static void _register_lua_slurm_output_functions (void)
 	lua_setfield (L, -2, "log_user");
 
 	/*
-	 * slurm.SUCCESS, slurm.FAILURE and slurm.ERROR
+	 * Error codes: slurm.SUCCESS, slurm.FAILURE, slurm.ERROR, etc.
 	 */
 	lua_pushnumber (L, SLURM_FAILURE);
 	lua_setfield (L, -2, "FAILURE");
@@ -253,6 +253,8 @@ static void _register_lua_slurm_output_functions (void)
 	lua_setfield (L, -2, "ERROR");
 	lua_pushnumber (L, SLURM_SUCCESS);
 	lua_setfield (L, -2, "SUCCESS");
+	lua_pushnumber (L, ESLURM_INVALID_LICENSES);
+	lua_setfield (L, -2, "ESLURM_INVALID_LICENSES");
 
 
 	/*
diff --git a/src/plugins/select/bluegene/bg_job_place.c b/src/plugins/select/bluegene/bg_job_place.c
index 2ec463dc8ebed5f4fe94d9bf4297df650dd7292f..ef989ea2b146c9f340c65a987dc31d85fa4917d1 100644
--- a/src/plugins/select/bluegene/bg_job_place.c
+++ b/src/plugins/select/bluegene/bg_job_place.c
@@ -2107,53 +2107,9 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 						   SELECT_JOBDATA_BLOCK_PTR,
 						   bg_record);
 
-				if ((jobinfo->conn_type[0] != SELECT_NAV)
-				    && (jobinfo->conn_type[0]
-					< SELECT_SMALL)) {
-					for (dim=0; dim<SYSTEM_DIMENSIONS;
-					     dim++)
-						jobinfo->conn_type[dim] =
-							bg_record->conn_type[
-								dim];
-				}
-
-				/* If it isn't 0 then it was setup
-				   previous (sub-block)
-				*/
-				if (jobinfo->geometry[dim] == 0)
-					memcpy(jobinfo->geometry,
-					       bg_record->geo,
-					       sizeof(bg_record->geo));
-
 				_build_job_resources_struct(job_ptr,
 							    slurm_block_bitmap,
 							    bg_record);
-				if (job_ptr) {
-					if (bg_record->job_list) {
-						/* Mark the ba_mp
-						 * cnodes as used now.
-						 */
-						ba_mp_t *ba_mp = list_peek(
-							bg_record->ba_mp_list);
-						xassert(ba_mp);
-						xassert(ba_mp->cnode_bitmap);
-						bit_or(ba_mp->cnode_bitmap,
-						       jobinfo->units_avail);
-						if (!find_job_in_bg_record(
-							    bg_record,
-							    job_ptr->job_id))
-							list_append(bg_record->
-								    job_list,
-								    job_ptr);
-					} else {
-						bg_record->job_running =
-							job_ptr->job_id;
-						bg_record->job_ptr = job_ptr;
-					}
-
-					job_ptr->job_state |= JOB_CONFIGURING;
-					last_bg_update = time(NULL);
-				}
 			} else {
 				set_select_jobinfo(
 					job_ptr->select_jobinfo->data,
diff --git a/src/plugins/select/bluegene/bg_job_run.c b/src/plugins/select/bluegene/bg_job_run.c
index 9f2b46b57fe989dce29cb954633aa5b2fd5abe53..3487984fe000a877c55f8652b70b9f8b7c3ff109 100644
--- a/src/plugins/select/bluegene/bg_job_run.c
+++ b/src/plugins/select/bluegene/bg_job_run.c
@@ -656,15 +656,56 @@ int term_jobs_on_block(char *bg_block_id)
  */
 extern int start_job(struct job_record *job_ptr)
 {
-	int rc = SLURM_SUCCESS;
+	int rc = SLURM_SUCCESS, dim;
 	bg_record_t *bg_record = NULL;
-
 	bg_action_t *bg_action_ptr = NULL;
+	select_jobinfo_t *jobinfo = job_ptr->select_jobinfo->data;
+
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record = jobinfo->bg_record;
+
+	if (!bg_record || !block_ptr_exist_in_list(bg_lists->main, bg_record)) {
+		slurm_mutex_unlock(&block_state_mutex);
+		error("bg_record %s doesn't exist, requested for job (%d)",
+		      jobinfo->bg_block_id, job_ptr->job_id);
+		_destroy_bg_action(bg_action_ptr);
+		return SLURM_ERROR;
+	}
+
+	if ((jobinfo->conn_type[0] != SELECT_NAV)
+	    && (jobinfo->conn_type[0] < SELECT_SMALL)) {
+		for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+			jobinfo->conn_type[dim] = bg_record->conn_type[dim];
+	}
+
+	/* If it isn't 0 then it was setup previous (sub-block)
+	*/
+	if (jobinfo->geometry[dim] == 0)
+		memcpy(jobinfo->geometry, bg_record->geo,
+		       sizeof(bg_record->geo));
+
+	if (bg_record->job_list) {
+		/* Mark the ba_mp cnodes as used now. */
+		ba_mp_t *ba_mp = list_peek(bg_record->ba_mp_list);
+		xassert(ba_mp);
+		xassert(ba_mp->cnode_bitmap);
+		bit_or(ba_mp->cnode_bitmap, jobinfo->units_avail);
+		if (!find_job_in_bg_record(bg_record, job_ptr->job_id))
+			list_append(bg_record->job_list, job_ptr);
+	} else {
+		bg_record->job_running = job_ptr->job_id;
+		bg_record->job_ptr = job_ptr;
+	}
+
+	job_ptr->job_state |= JOB_CONFIGURING;
 
 	bg_action_ptr = xmalloc(sizeof(bg_action_t));
 	bg_action_ptr->op = START_OP;
 	bg_action_ptr->job_ptr = job_ptr;
 
+	/* FIXME: The below get_select_jobinfo calls could be avoided
+	 * by just using the jobinfo as we do above.
+	 */
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_BLOCK_ID,
 			   &(bg_action_ptr->bg_block_id));
@@ -725,26 +766,6 @@ extern int start_job(struct job_record *job_ptr)
 				   bg_action_ptr->mloaderimage);
 	}
 
-	slurm_mutex_lock(&block_state_mutex);
-	bg_record = find_bg_record_in_list(bg_lists->main,
-					   bg_action_ptr->bg_block_id);
-	if (!bg_record) {
-		slurm_mutex_unlock(&block_state_mutex);
-		error("bg_record %s doesn't exist, requested for job (%d)",
-		      bg_action_ptr->bg_block_id, job_ptr->job_id);
-		_destroy_bg_action(bg_action_ptr);
-		return SLURM_ERROR;
-	}
-
-	last_bg_update = time(NULL);
-
-	if (bg_record->job_list) {
-		if (!find_job_in_bg_record(bg_record, job_ptr->job_id))
-			list_append(bg_record->job_list, job_ptr);
-	} else {
-		bg_record->job_running = bg_action_ptr->job_ptr->job_id;
-		bg_record->job_ptr = bg_action_ptr->job_ptr;
-	}
 	num_unused_cpus -= job_ptr->total_cpus;
 
 	if (!block_ptr_exist_in_list(bg_lists->job_running, bg_record))
@@ -757,6 +778,8 @@ extern int start_job(struct job_record *job_ptr)
 	   away.
 	*/
 	bg_record->modifying = 1;
+	last_bg_update = time(NULL);
+
 	slurm_mutex_unlock(&block_state_mutex);
 
 	info("Queue start of job %u in BG block %s",
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 1465601b2469f311f0004f83a9873d40a909a3d5..6a1099645812aa8e69094c7407bc19cab429b131 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -949,7 +949,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 	uint32_t time_limit;
 	bool rc = true;
 	uint32_t wall_mins;
-	int parent = 0; /*flag to tell us if we are looking at the
+	int parent = 0; /* flag to tell us if we are looking at the
 			 * parent or not
 			 */
 	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
@@ -1227,7 +1227,7 @@ extern bool acct_policy_job_runnable_post_select(
 	uint32_t job_memory = 0;
 	bool admin_set_memory_limit = false;
 	bool safe_limits = false;
-	int parent = 0; /*flag to tell us if we are looking at the
+	int parent = 0; /* flag to tell us if we are looking at the
 			 * parent or not
 			 */
 	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
@@ -1949,9 +1949,9 @@ extern uint32_t acct_policy_get_max_nodes(struct job_record *job_ptr)
 
 	if (max_nodes_limit == INFINITE) {
 		slurmdb_association_rec_t *assoc_ptr = job_ptr->assoc_ptr;
-		bool parent = 0; /*flag to tell us if we are looking at the
-				 * parent or not
-				 */
+		bool parent = 0; /* flag to tell us if we are looking at the
+				  * parent or not
+				  */
 		bool grp_set = 0;
 
 		while (assoc_ptr) {
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 35d7c25c4dcbda88aa023e82e535f10e61f29b12..8f82cb89645e6d1d45f4ce20c83efa1e1e9112a9 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -4385,7 +4385,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	List part_ptr_list = NULL;
 	bitstr_t *req_bitmap = NULL, *exc_bitmap = NULL;
 	struct job_record *job_ptr = NULL;
-	slurmdb_association_rec_t assoc_rec, *assoc_ptr;
+	slurmdb_association_rec_t assoc_rec, *assoc_ptr = NULL;
 	List license_list = NULL;
 	bool valid;
 	slurmdb_qos_rec_t qos_rec, *qos_ptr;
@@ -11781,15 +11781,14 @@ extern void build_cg_bitmap(struct job_record *job_ptr)
 
 /* job_hold_requeue()
  *
- * Requeue the job either in JOB_SPECIAL_EXIT state
- * in which is put on hold or if JOB_REQUEUE_HOLD is
- * specified don't change its state. The requeue
- * can happen directly from job_requeue() or from
- * job_epilog_complete() after the last component
- * has finished.
+ * Requeue the job based upon its current state.
+ * If JOB_SPECIAL_EXIT then requeue and hold with JOB_SPECIAL_EXIT state.
+ * If JOB_REQUEUE_HOLD then requeue and hold.
+ * If JOB_REQUEUE then requeue and let it run again.
+ * The requeue can happen directly from job_requeue() or from
+ * job_epilog_complete() after the last component has finished.
  */
-void
-job_hold_requeue(struct job_record *job_ptr)
+extern void job_hold_requeue(struct job_record *job_ptr)
 {
 	uint32_t state;
 	uint32_t flags;
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index e84dc1ebc6bf1afbbe4c1bea50f08bcf041be73f..bab084fc345549dbc5cc092eae2d6d7626e96bcd 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -1637,7 +1637,20 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	}
 
 	if ((error_code == SLURM_SUCCESS) && select_bitmap) {
-		uint32_t node_cnt = bit_set_count(select_bitmap);
+		uint32_t node_cnt = NO_VAL;
+#ifdef HAVE_BG
+		xassert(job_ptr->select_jobinfo);
+		select_g_select_jobinfo_get(job_ptr->select_jobinfo,
+					    SELECT_JOBDATA_NODE_CNT, &node_cnt);
+		if (node_cnt == NO_VAL) {
+			/* This should never happen */
+			node_cnt = bit_set_count(select_bitmap);
+			error("node_cnt not available at %s:%d\n",
+			      __FILE__, __LINE__);
+		}
+#else
+		node_cnt = bit_set_count(select_bitmap);
+#endif
 		if (!acct_policy_job_runnable_post_select(
 			    job_ptr, node_cnt, job_ptr->total_cpus,
 			    job_ptr->details->pn_min_memory)) {
@@ -1823,6 +1836,11 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		xfree(node_set_ptr);
 	}
 
+#ifdef HAVE_BG
+	if (error_code != SLURM_SUCCESS)
+		free_job_resources(&job_ptr->job_resrcs);
+#endif
+
 	return error_code;
 }
 
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index c6fad768486fe290c18b7e8b393f5afdb0a63c3c..16d166d4e38360686b47d31edd4d81136ed1fb6e 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -1112,6 +1112,18 @@ extern void job_fini (void);
  */
 extern int job_fail(uint32_t job_id, uint16_t job_state);
 
+
+/* job_hold_requeue()
+ *
+ * Requeue the job based upon its current state.
+ * If JOB_SPECIAL_EXIT then requeue and hold with JOB_SPECIAL_EXIT state.
+ * If JOB_REQUEUE_HOLD then requeue and hold.
+ * If JOB_REQUEUE then requeue and let it run again.
+ * The requeue can happen directly from job_requeue() or from
+ * job_epilog_complete() after the last component has finished.
+ */
+extern void job_hold_requeue(struct job_record *job_ptr);
+
 /*
  * determine if job is ready to execute per the node select plugin
  * IN job_id - job to test
@@ -1870,7 +1882,6 @@ extern int sync_job_files(void);
 /* After recovering job state, if using priority/basic then we increment the
  * priorities of all jobs to avoid decrementing the base down to zero */
 extern void sync_job_priorities(void);
-
 /*
  * update_job - update a job's parameters per the supplied specifications
  * IN job_specs - a job's specification
@@ -2024,11 +2035,4 @@ extern bool validate_super_user(uid_t uid);
  */
 extern bool validate_operator(uid_t uid);
 
-/* job_hold_requeue() - requeue a job in hold or requeue_exit
- *                      state.
- *
- * IN - job record
- */
-extern void job_hold_requeue(struct job_record *job_ptr);
-
 #endif /* !_HAVE_SLURMCTLD_H */
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 2fb824fea6049b62988b208b8a8367f62dc99be7..f43d2d770295636b41b5d8b3118fc09081a80b63 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -148,7 +148,7 @@ static char *
 _batchfilename(stepd_step_rec_t *job, const char *name)
 {
 	if (name == NULL) {
-		if (job->array_task_id == (uint16_t) NO_VAL)
+		if (job->array_task_id == NO_VAL)
 			return fname_create(job, "slurm-%J.out", 0);
 		else
 			return fname_create(job, "slurm-%A_%a.out", 0);