Revision | 46552e68acf71752c2330929cb97c644e4560155 (tree) |
---|---|
Zeit | 2016-01-30 08:40:59 |
Autor | Linus Torvalds <torvalds@linu...> |
Commiter | Linus Torvalds |
Merge tag 'pm+acpi-4.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management and ACPI fixes from Rafael Wysocki:
* tag 'pm+acpi-4.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { | ||
135 | 135 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), |
136 | 136 | }, |
137 | 137 | }, |
138 | - { | |
139 | - .callback = video_detect_force_vendor, | |
140 | - .ident = "Dell Inspiron 5737", | |
141 | - .matches = { | |
142 | - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | |
143 | - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), | |
144 | - }, | |
145 | - }, | |
146 | 138 | |
147 | 139 | /* |
148 | 140 | * These models have a working acpi_video backlight control, and using |
@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) | ||
162 | 162 | |
163 | 163 | /** |
164 | 164 | * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). |
165 | - * @genpd: PM domait to power off. | |
165 | + * @genpd: PM domain to power off. | |
166 | 166 | * |
167 | 167 | * Queue up the execution of genpd_poweroff() unless it's already been done |
168 | 168 | * before. |
@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | ||
172 | 172 | queue_work(pm_wq, &genpd->power_off_work); |
173 | 173 | } |
174 | 174 | |
175 | -static int genpd_poweron(struct generic_pm_domain *genpd); | |
176 | - | |
177 | 175 | /** |
178 | 176 | * __genpd_poweron - Restore power to a given PM domain and its masters. |
179 | 177 | * @genpd: PM domain to power up. |
178 | + * @depth: nesting count for lockdep. | |
180 | 179 | * |
181 | 180 | * Restore power to @genpd and all of its masters so that it is possible to |
182 | 181 | * resume a device belonging to it. |
183 | 182 | */ |
184 | -static int __genpd_poweron(struct generic_pm_domain *genpd) | |
183 | +static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) | |
185 | 184 | { |
186 | 185 | struct gpd_link *link; |
187 | 186 | int ret = 0; |
@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd) | ||
196 | 195 | * with it. |
197 | 196 | */ |
198 | 197 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
199 | - genpd_sd_counter_inc(link->master); | |
198 | + struct generic_pm_domain *master = link->master; | |
199 | + | |
200 | + genpd_sd_counter_inc(master); | |
201 | + | |
202 | + mutex_lock_nested(&master->lock, depth + 1); | |
203 | + ret = __genpd_poweron(master, depth + 1); | |
204 | + mutex_unlock(&master->lock); | |
200 | 205 | |
201 | - ret = genpd_poweron(link->master); | |
202 | 206 | if (ret) { |
203 | - genpd_sd_counter_dec(link->master); | |
207 | + genpd_sd_counter_dec(master); | |
204 | 208 | goto err; |
205 | 209 | } |
206 | 210 | } |
@@ -232,11 +236,12 @@ static int genpd_poweron(struct generic_pm_domain *genpd) | ||
232 | 236 | int ret; |
233 | 237 | |
234 | 238 | mutex_lock(&genpd->lock); |
235 | - ret = __genpd_poweron(genpd); | |
239 | + ret = __genpd_poweron(genpd, 0); | |
236 | 240 | mutex_unlock(&genpd->lock); |
237 | 241 | return ret; |
238 | 242 | } |
239 | 243 | |
244 | + | |
240 | 245 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) |
241 | 246 | { |
242 | 247 | return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); |
@@ -484,7 +489,7 @@ static int pm_genpd_runtime_resume(struct device *dev) | ||
484 | 489 | } |
485 | 490 | |
486 | 491 | mutex_lock(&genpd->lock); |
487 | - ret = __genpd_poweron(genpd); | |
492 | + ret = __genpd_poweron(genpd, 0); | |
488 | 493 | mutex_unlock(&genpd->lock); |
489 | 494 | |
490 | 495 | if (ret) |
@@ -1339,8 +1344,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | ||
1339 | 1344 | if (!link) |
1340 | 1345 | return -ENOMEM; |
1341 | 1346 | |
1342 | - mutex_lock(&genpd->lock); | |
1343 | - mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | |
1347 | + mutex_lock(&subdomain->lock); | |
1348 | + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | |
1344 | 1349 | |
1345 | 1350 | if (genpd->status == GPD_STATE_POWER_OFF |
1346 | 1351 | && subdomain->status != GPD_STATE_POWER_OFF) { |
@@ -1363,8 +1368,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | ||
1363 | 1368 | genpd_sd_counter_inc(genpd); |
1364 | 1369 | |
1365 | 1370 | out: |
1366 | - mutex_unlock(&subdomain->lock); | |
1367 | 1371 | mutex_unlock(&genpd->lock); |
1372 | + mutex_unlock(&subdomain->lock); | |
1368 | 1373 | if (ret) |
1369 | 1374 | kfree(link); |
1370 | 1375 | return ret; |
@@ -1385,7 +1390,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | ||
1385 | 1390 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1386 | 1391 | return -EINVAL; |
1387 | 1392 | |
1388 | - mutex_lock(&genpd->lock); | |
1393 | + mutex_lock(&subdomain->lock); | |
1394 | + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | |
1389 | 1395 | |
1390 | 1396 | if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { |
1391 | 1397 | pr_warn("%s: unable to remove subdomain %s\n", genpd->name, |
@@ -1398,22 +1404,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | ||
1398 | 1404 | if (link->slave != subdomain) |
1399 | 1405 | continue; |
1400 | 1406 | |
1401 | - mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | |
1402 | - | |
1403 | 1407 | list_del(&link->master_node); |
1404 | 1408 | list_del(&link->slave_node); |
1405 | 1409 | kfree(link); |
1406 | 1410 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1407 | 1411 | genpd_sd_counter_dec(genpd); |
1408 | 1412 | |
1409 | - mutex_unlock(&subdomain->lock); | |
1410 | - | |
1411 | 1413 | ret = 0; |
1412 | 1414 | break; |
1413 | 1415 | } |
1414 | 1416 | |
1415 | 1417 | out: |
1416 | 1418 | mutex_unlock(&genpd->lock); |
1419 | + mutex_unlock(&subdomain->lock); | |
1417 | 1420 | |
1418 | 1421 | return ret; |
1419 | 1422 | } |
@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev, | ||
142 | 142 | |
143 | 143 | try_again: |
144 | 144 | cpu_reg = regulator_get_optional(cpu_dev, reg); |
145 | - if (IS_ERR(cpu_reg)) { | |
145 | + ret = PTR_ERR_OR_ZERO(cpu_reg); | |
146 | + if (ret) { | |
146 | 147 | /* |
147 | 148 | * If cpu's regulator supply node is present, but regulator is |
148 | 149 | * not yet registered, we should try defering probe. |
149 | 150 | */ |
150 | - if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | |
151 | + if (ret == -EPROBE_DEFER) { | |
151 | 152 | dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", |
152 | 153 | cpu); |
153 | - return -EPROBE_DEFER; | |
154 | + return ret; | |
154 | 155 | } |
155 | 156 | |
156 | 157 | /* Try with "cpu-supply" */ |
@@ -159,18 +160,16 @@ try_again: | ||
159 | 160 | goto try_again; |
160 | 161 | } |
161 | 162 | |
162 | - dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", | |
163 | - cpu, PTR_ERR(cpu_reg)); | |
163 | + dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret); | |
164 | 164 | } |
165 | 165 | |
166 | 166 | cpu_clk = clk_get(cpu_dev, NULL); |
167 | - if (IS_ERR(cpu_clk)) { | |
167 | + ret = PTR_ERR_OR_ZERO(cpu_clk); | |
168 | + if (ret) { | |
168 | 169 | /* put regulator */ |
169 | 170 | if (!IS_ERR(cpu_reg)) |
170 | 171 | regulator_put(cpu_reg); |
171 | 172 | |
172 | - ret = PTR_ERR(cpu_clk); | |
173 | - | |
174 | 173 | /* |
175 | 174 | * If cpu's clk node is present, but clock is not yet |
176 | 175 | * registered, we should try defering probe. |
@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, | ||
48 | 48 | bool active) |
49 | 49 | { |
50 | 50 | do { |
51 | - policy = list_next_entry(policy, policy_list); | |
52 | - | |
53 | 51 | /* No more policies in the list */ |
54 | - if (&policy->policy_list == &cpufreq_policy_list) | |
52 | + if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) | |
55 | 53 | return NULL; |
54 | + | |
55 | + policy = list_next_entry(policy, policy_list); | |
56 | 56 | } while (!suitable_policy(policy, active)); |
57 | 57 | |
58 | 58 | return policy; |
@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy, | ||
387 | 387 | if (!have_governor_per_policy()) |
388 | 388 | cdata->gdbs_data = dbs_data; |
389 | 389 | |
390 | + policy->governor_data = dbs_data; | |
391 | + | |
390 | 392 | ret = sysfs_create_group(get_governor_parent_kobj(policy), |
391 | 393 | get_sysfs_attr(dbs_data)); |
392 | 394 | if (ret) |
393 | 395 | goto reset_gdbs_data; |
394 | 396 | |
395 | - policy->governor_data = dbs_data; | |
396 | - | |
397 | 397 | return 0; |
398 | 398 | |
399 | 399 | reset_gdbs_data: |
400 | + policy->governor_data = NULL; | |
401 | + | |
400 | 402 | if (!have_governor_per_policy()) |
401 | 403 | cdata->gdbs_data = NULL; |
402 | 404 | cdata->exit(dbs_data, !policy->governor->initialized); |
@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy, | ||
417 | 419 | if (!cdbs->shared || cdbs->shared->policy) |
418 | 420 | return -EBUSY; |
419 | 421 | |
420 | - policy->governor_data = NULL; | |
421 | 422 | if (!--dbs_data->usage_count) { |
422 | 423 | sysfs_remove_group(get_governor_parent_kobj(policy), |
423 | 424 | get_sysfs_attr(dbs_data)); |
424 | 425 | |
426 | + policy->governor_data = NULL; | |
427 | + | |
425 | 428 | if (!have_governor_per_policy()) |
426 | 429 | cdata->gdbs_data = NULL; |
427 | 430 | |
428 | 431 | cdata->exit(dbs_data, policy->governor->initialized == 1); |
429 | 432 | kfree(dbs_data); |
433 | + } else { | |
434 | + policy->governor_data = NULL; | |
430 | 435 | } |
431 | 436 | |
432 | 437 | free_common_dbs_info(policy, cdata); |
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void) | ||
202 | 202 | } |
203 | 203 | } |
204 | 204 | #else |
205 | -static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) | |
205 | +static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) | |
206 | 206 | { |
207 | 207 | return 0; |
208 | 208 | } |
@@ -119,7 +119,6 @@ struct cpuidle_coupled { | ||
119 | 119 | |
120 | 120 | #define CPUIDLE_COUPLED_NOT_IDLE (-1) |
121 | 121 | |
122 | -static DEFINE_MUTEX(cpuidle_coupled_lock); | |
123 | 122 | static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); |
124 | 123 | |
125 | 124 | /* |
@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) | ||
153 | 153 | * be frozen safely. |
154 | 154 | */ |
155 | 155 | index = find_deepest_state(drv, dev, UINT_MAX, 0, true); |
156 | - if (index >= 0) | |
156 | + if (index > 0) | |
157 | 157 | enter_freeze_proper(drv, dev, index); |
158 | 158 | |
159 | 159 | return index; |
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) | ||
953 | 953 | { |
954 | 954 | pci_lock_rescan_remove(); |
955 | 955 | |
956 | - if (slot->flags & SLOT_IS_GOING_AWAY) | |
956 | + if (slot->flags & SLOT_IS_GOING_AWAY) { | |
957 | + pci_unlock_rescan_remove(); | |
957 | 958 | return -ENODEV; |
959 | + } | |
958 | 960 | |
959 | 961 | /* configure all functions */ |
960 | 962 | if (!(slot->flags & SLOT_ENABLED)) |
@@ -235,7 +235,7 @@ config PM_TRACE_RTC | ||
235 | 235 | |
236 | 236 | config APM_EMULATION |
237 | 237 | tristate "Advanced Power Management Emulation" |
238 | - depends on PM && SYS_SUPPORTS_APM_EMULATION | |
238 | + depends on SYS_SUPPORTS_APM_EMULATION | |
239 | 239 | help |
240 | 240 | APM is a BIOS specification for saving power using several different |
241 | 241 | techniques. This is mostly useful for battery powered laptops with |
@@ -162,7 +162,7 @@ static void cpuidle_idle_call(void) | ||
162 | 162 | */ |
163 | 163 | if (idle_should_freeze()) { |
164 | 164 | entered_state = cpuidle_enter_freeze(drv, dev); |
165 | - if (entered_state >= 0) { | |
165 | + if (entered_state > 0) { | |
166 | 166 | local_irq_enable(); |
167 | 167 | goto exit_idle; |
168 | 168 | } |