Skip to content

Commit bce9038

Browse files
committed
workqueue: add wq_numa_tbl_len and wq_numa_possible_cpumask[]
Unbound workqueues are going to be NUMA-affine. Add wq_numa_tbl_len and wq_numa_possible_cpumask[] in preparation. The former is the highest NUMA node ID + 1 and the latter is masks of possibles CPUs for each NUMA node. This patch only introduces these. Future patches will make use of them. v2: NUMA initialization move into wq_numa_init(). Also, the possible cpumask array is not created if there aren't multiple nodes on the system. wq_numa_enabled bool added. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
1 parent a892cac commit bce9038

1 file changed

Lines changed: 46 additions & 0 deletions

File tree

kernel/workqueue.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
#include <linux/jhash.h>
4545
#include <linux/hashtable.h>
4646
#include <linux/rculist.h>
47+
#include <linux/nodemask.h>
4748

4849
#include "workqueue_internal.h"
4950

@@ -253,6 +254,12 @@ struct workqueue_struct {
253254

254255
static struct kmem_cache *pwq_cache;
255256

257+
static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */
258+
static cpumask_var_t *wq_numa_possible_cpumask;
259+
/* possible CPUs of each node */
260+
261+
static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
262+
256263
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
257264
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
258265

@@ -4407,6 +4414,43 @@ void thaw_workqueues(void)
44074414
}
44084415
#endif /* CONFIG_FREEZER */
44094416

4417+
static void __init wq_numa_init(void)
4418+
{
4419+
cpumask_var_t *tbl;
4420+
int node, cpu;
4421+
4422+
/* determine NUMA pwq table len - highest node id + 1 */
4423+
for_each_node(node)
4424+
wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
4425+
4426+
if (num_possible_nodes() <= 1)
4427+
return;
4428+
4429+
/*
4430+
* We want masks of possible CPUs of each node which isn't readily
4431+
* available. Build one from cpu_to_node() which should have been
4432+
* fully initialized by now.
4433+
*/
4434+
tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
4435+
BUG_ON(!tbl);
4436+
4437+
for_each_node(node)
4438+
BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
4439+
4440+
for_each_possible_cpu(cpu) {
4441+
node = cpu_to_node(cpu);
4442+
if (WARN_ON(node == NUMA_NO_NODE)) {
4443+
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
4444+
/* happens iff arch is bonkers, let's just proceed */
4445+
return;
4446+
}
4447+
cpumask_set_cpu(cpu, tbl[node]);
4448+
}
4449+
4450+
wq_numa_possible_cpumask = tbl;
4451+
wq_numa_enabled = true;
4452+
}
4453+
44104454
static int __init init_workqueues(void)
44114455
{
44124456
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
@@ -4423,6 +4467,8 @@ static int __init init_workqueues(void)
44234467
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
44244468
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
44254469

4470+
wq_numa_init();
4471+
44264472
/* initialize CPU pools */
44274473
for_each_possible_cpu(cpu) {
44284474
struct worker_pool *pool;

0 commit comments

Comments
 (0)