summaryrefslogtreecommitdiff
blob: 6fc8f682b4898b783d8d7502dffa8d18d86e7286 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
--- linux-2.6.8/include/linux/sched.h.ia64	2005-12-01 15:41:24.000000000 -0500
+++ linux-2.6.8/include/linux/sched.h	2005-12-05 09:43:29.757723671 -0500
@@ -176,6 +176,8 @@
 extern void show_regs(struct pt_regs *);
 extern void smp_show_regs(struct pt_regs *, void *);
 extern void show_vsched(void);
+extern int vsched_init_default(int cpu);
+extern void vsched_fini_default(int cpu);
 
 /*
  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
--- linux-2.6.8/kernel/sched.c.ia64	2005-12-01 15:41:24.000000000 -0500
+++ linux-2.6.8/kernel/sched.c	2005-12-05 10:03:52.078997760 -0500
@@ -4333,9 +4333,6 @@
 	if (__add_vcpu(&idle_vsched, cpu))
 		panic("Can't create idle vcpu %d\n", cpu);
 
-	/* Also create vcpu for default_vsched */
-	if (cpu > 0 && __add_vcpu(&default_vsched, cpu) != 0)
-		panic("Can't create default vcpu %d\n", cpu);
 	cpu_set(cpu, idle_vsched.pcpu_running_map);
 #endif
 	vsched = &idle_vsched;
@@ -5250,6 +5250,28 @@
 	goto out_up;
 }
 
+static inline void offline_vcpu(struct vcpu_scheduler *vsched, int cpu,
+		runqueue_t *rq)
+{
+	spin_lock_irq(&rq->lock);
+	spin_lock(&fairsched_lock);
+	cpu_clear(cpu, vsched->vcpu_online_map);
+	vsched->num_online_vcpus--;
+	spin_unlock(&fairsched_lock);
+	spin_unlock_irq(&rq->lock);
+}
+
+static inline void del_vcpu(struct vcpu_scheduler *vsched, int cpu,
+		vcpu_t vcpu)
+{
+	spin_lock_irq(&fairsched_lock);
+	list_del(&vcpu->list);
+	vsched_vcpu(vsched, cpu) = NULL;
+	spin_unlock_irq(&fairsched_lock);
+
+	kfree(vcpu);
+}
+
 static void vsched_del_vcpu(vcpu_t vcpu)
 {
 	struct vcpu_scheduler *vsched;
@@ -5258,12 +5280,7 @@
 	vsched = vcpu_vsched(vcpu);
 	rq = vcpu_rq(vcpu);
 
-	spin_lock_irq(&rq->lock);
-	spin_lock(&fairsched_lock);
-	cpu_clear(vcpu->id, vsched->vcpu_online_map);
-	vsched->num_online_vcpus--;
-	spin_unlock(&fairsched_lock);
-	spin_unlock_irq(&rq->lock);
+	offline_vcpu(vsched, vcpu->id, rq);
 
 	/*
 	 * all tasks should migrate from this VCPU somewhere,
@@ -5280,12 +5297,7 @@
 
 	BUG_ON(vcpu->active);	/* should be in idle_list */
 
-	spin_lock_irq(&fairsched_lock);
-	list_del(&vcpu->list);
-	vsched_vcpu(vsched, vcpu->id) = NULL;
-	spin_unlock_irq(&fairsched_lock);
-
-	kfree(vcpu);
+	del_vcpu(vsched, vcpu->id, vcpu);
 }
 
 int vsched_mvpr(struct task_struct *p, struct vcpu_scheduler *vsched)
@@ -5623,6 +5635,29 @@
 }
 #endif /* CONFIG_SCHED_VCPU */
 
+int __devinit vsched_init_default(int cpu)
+{
+	if (cpu > 0)
+		return __add_vcpu(&default_vsched, cpu);
+	return 0;	
+}
+
+void __devinit vsched_fini_default(int cpu)
+{
+	vcpu_t vcpu;
+	runqueue_t *rq;
+	unsigned long flags;
+
+	if (cpu == 0)
+		return;
+
+	vcpu = vsched_vcpu(&default_vsched, cpu);
+	rq = vcpu_rq(vcpu);
+
+	offline_vcpu(&default_vsched, cpu, rq);
+	del_vcpu(&default_vsched, cpu, vcpu);
+}
+
 void __init sched_init(void)
 {
 	runqueue_t *rq;
--- linux-2.6.8/kernel/cpu.c.ia64	2005-12-01 15:41:24.000000000 -0500
+++ linux-2.6.8/kernel/cpu.c	2005-12-05 09:48:23.973540379 -0500
@@ -196,6 +196,11 @@
 		ret = -EINVAL;
 		goto out;
 	}
+
+	ret = vsched_init_default(cpu);
+	if (ret)
+		goto out;
+
 	ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
 	if (ret == NOTIFY_BAD) {
 		printk("%s: attempt to bring up CPU %u failed\n",
@@ -215,8 +220,10 @@
 	notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
 
 out_notify:
-	if (ret != 0)
+	if (ret != 0) {
 		notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
+		vsched_fini_default(cpu);
+	}
 out:
 	up(&cpucontrol);
 	return ret;