summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoreisnerd <eisnerd@localhost>2009-02-18 17:10:58 +0000
committereisnerd <eisnerd@localhost>2009-02-18 17:10:58 +0000
commitc31245e4d58ff1266955bb6a832208005328edbf (patch)
treea1ed601c236a1c63887dbd5ff65b0564a7cb45d9 /app-laptop
parentVersion bump to quassel-0.4.0 (diff)
downloadjokey-c31245e4d58ff1266955bb6a832208005328edbf.tar.gz
jokey-c31245e4d58ff1266955bb6a832208005328edbf.tar.bz2
jokey-c31245e4d58ff1266955bb6a832208005328edbf.zip
app-laptop/hdapsd: hdapsd added
svn path=/trunk/; revision=607
Diffstat (limited to 'app-laptop')
-rw-r--r--app-laptop/hdapsd/ChangeLog21
-rw-r--r--app-laptop/hdapsd/Manifest11
-rw-r--r--app-laptop/hdapsd/files/hdaps-Z60m.patch10
-rw-r--r--app-laptop/hdapsd/files/hdaps_protect-2.6.24.patch909
-rw-r--r--app-laptop/hdapsd/files/hdaps_protect-2.6.25.patch949
-rw-r--r--app-laptop/hdapsd/files/hdaps_protect-2.6.26.patch935
-rw-r--r--app-laptop/hdapsd/files/hdaps_protect-2.6.27.patch966
-rw-r--r--app-laptop/hdapsd/files/hdapsd.conf11
-rw-r--r--app-laptop/hdapsd/files/hdapsd.init49
-rw-r--r--app-laptop/hdapsd/hdapsd-20090129.ebuild47
-rw-r--r--app-laptop/hdapsd/metadata.xml7
11 files changed, 3915 insertions, 0 deletions
diff --git a/app-laptop/hdapsd/ChangeLog b/app-laptop/hdapsd/ChangeLog
new file mode 100644
index 0000000..073f01c
--- /dev/null
+++ b/app-laptop/hdapsd/ChangeLog
@@ -0,0 +1,21 @@
+
+
+ 18 Feb 2009; Florian Manschwetus <FlorianManschwetus@gmx.de>
+ +files/hdaps_protect-2.6.24.patch, +files/hdaps_protect-2.6.25.patch,
+ +files/hdaps_protect-2.6.26.patch, +files/hdaps_protect-2.6.27.patch,
+ +files/hdaps-Z60m.patch, +hdapsd-20090129.ebuild, +files/hdapsd.conf,
+ +files/hdapsd.init, +metadata.xml:
+ hdapsd added
+
+ 08 Feb 2009; Florian Manschwetus <florianmanschwetus@gmx.de>
+ -hdapsd-20070802.ebuild, -files/hdapsd-20070802.c,
+ -hdapsd-20081004-r1.ebuild, -files/hdapsd-20081004.c,
+ +hdapsd-20090129.ebuild:
+ bump to 20090129
+
+ 30 Dec 2008; Florian Manschwetus <florianmanschwetus@gmx.de> ChangeLog:
+ hdapsd-20081004-r1
+
+ 30 Dec 2008; Florian Manschwetus <florianmanschwetus@gmx.de> ChangeLog:
+ hdapsd-20081004-r1
+
diff --git a/app-laptop/hdapsd/Manifest b/app-laptop/hdapsd/Manifest
new file mode 100644
index 0000000..0bdca4d
--- /dev/null
+++ b/app-laptop/hdapsd/Manifest
@@ -0,0 +1,11 @@
+AUX hdaps-Z60m.patch 352 RMD160 963be45785af235fa3e0d74d68d2e75fad663396 SHA1 317de158fdacdd379a94b0652b54ed48525a71bc SHA256 05b0def085f637bae8561a825d7ff3ffc51bdd33609b1520e2d30aa807f00016
+AUX hdaps_protect-2.6.24.patch 27929 RMD160 630fd3e7e041a3fabf575db2330e208358323bee SHA1 621546e648c36448bfa6d19bb7fcf4c99154f878 SHA256 3a5d54041da432f4bdcf77e1b9d7f318a395112f2ddb5cd82c30bd90acf4458c
+AUX hdaps_protect-2.6.25.patch 29680 RMD160 4df227f89a7c87eddabf6a996bb602a00c64ef13 SHA1 04f27068439d65c007524e4947db6e75c89a4ac4 SHA256 f7e651ff45c7bbb59ac0ae80cb7253d3a597df8fb56dd7a17c22ebfac5cd0e1a
+AUX hdaps_protect-2.6.26.patch 29010 RMD160 81106dd4caea46303a4f5ba662372fe7cf5a88e3 SHA1 495c27aba7838c4737e0aa1c38a28710408d5f46 SHA256 c2521a34e0c28c617b65bd5bc1b43c8dc652fd7aea985dd9a09ac2304479de2d
+AUX hdaps_protect-2.6.27.patch 32590 RMD160 21a302bb89d47c94d4612a5c0f8db854b0db33fe SHA1 d135b51df5a5656b13541f9e8fb4e48f8c6fc80e SHA256 fe2689c2bc28584e50dde3234147469f48af51bfde866adcbfce17ab56c41a43
+AUX hdapsd.conf 321 RMD160 5bb760d34bca3f5a12aeb057b78de7e2a9065bc3 SHA1 ade013dab6278a385b4658aad7e871b743380689 SHA256 22e1acf385427052a12858db005fd73d42d9f6818fb253b032cd326101df2e34
+AUX hdapsd.init 1321 RMD160 5a02904b766a51c97c6974260816576cd4710beb SHA1 eee2035439fd31901634cee40c2812cb89edd42b SHA256 f55d5fbaab2ddbfd826dd2692197d4b7222065e728b1e3bc5ca8ec8863367de9
+DIST hdapsd-20090129.tar.gz 95211 RMD160 9db3bb776621d45525edc21b99f125ef54bdd5f3 SHA1 8c1a23ff8c4ef2410678340bc1477ec94afc1562 SHA256 753e2d6d8508665331046e0f49287f863bb9b4a56ad8bb9c116cc162b986f640
+EBUILD hdapsd-20090129.ebuild 1510 RMD160 d390528b8732ef696b3384bd83a31f676eb9e835 SHA1 73b1a524fecfa9046dd4a739cdcbac698242288b SHA256 25896e05b7a6ff108113dceaea046ebab6d25fec364b1ed75629e954fadb2366
+MISC ChangeLog 745 RMD160 4269da7231e5ddae33885003218c88225b9294ce SHA1 283717d6aca59bc03ac9599ccb15e1abbda745e1 SHA256 9b5f112cc19162316a414deae18926e5e160686582ce3c32d841778d0a077e7d
+MISC metadata.xml 263 RMD160 97c8ad544063bcbdf65190404934923e0a42d562 SHA1 6e276e4277eeae8ee686d0c858761aa63bf71e0b SHA256 0d5fcdbefefdc4a8d5d703b5a01a74e1bcf181cb3bd29fd82e202221795e51c2
diff --git a/app-laptop/hdapsd/files/hdaps-Z60m.patch b/app-laptop/hdapsd/files/hdaps-Z60m.patch
new file mode 100644
index 0000000..57c723c
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdaps-Z60m.patch
@@ -0,0 +1,10 @@
+--- drivers/hwmon/hdaps.c 2006-06-26 09:37:31.131424200 +0100
++++ drivers/hwmon/hdaps.c 2006-06-26 09:39:49.000000000 +0100
+@@ -539,6 +539,7 @@
+ HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
+ HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
+ HDAPS_DMI_MATCH_LENOVO("ThinkPad X60"),
++ HDAPS_DMI_MATCH_NORMAL("ThinkPad Z60m"),
+ { .ident = NULL }
+ };
+
diff --git a/app-laptop/hdapsd/files/hdaps_protect-2.6.24.patch b/app-laptop/hdapsd/files/hdaps_protect-2.6.24.patch
new file mode 100644
index 0000000..b9f36eb
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdaps_protect-2.6.24.patch
@@ -0,0 +1,909 @@
+diff --git a/Documentation/block/disk-protection.txt b/Documentation/block/disk-protection.txt
+new file mode 100644
+index 0000000..508cc5b
+--- /dev/null
++++ b/Documentation/block/disk-protection.txt
+@@ -0,0 +1,79 @@
++Hard disk protection
++====================
++
++
++Intro
++-----
++ATA/ATAPI-7 specifies the IDLE IMMEDIATE command with UNLOAD FEATURE.
++Issuing this command should cause the drive to switch to idle mode and
++unload disk heads. This feature is being used in modern laptops in
++conjunction with accelerometers and appropriate software to implement
++a shock protection facility. The idea is to stop all I/O operations on
++the internal hard drive and park its heads on the ramp when critical
++situations are anticipated. The desire to have such a feature
++available on GNU/Linux systems has been the original motivation to
++implement a generic disk parking interface in the Linux kernel.
++
++
++The interface
++-------------
++The interface works as follows: Writing an integer value to
++/sys/block/*/queue/protect will park the respective drive and freeze
++the block layer queue for the specified number of seconds. When the
++timeout expires and no further disk park request has been issued in
++the meantime, the queue is unfrozen and accumulated I/O operations are
++performed.
++
++IMPORTANT NOTE:
++Not all ATA drives implement IDLE IMMEDIATE with UNLOAD FEATURE and
++quite a few of those that do so, don't report this capability as
++described in the specs. When a disk park has been requested through
++sysfs as described above, the kernel will try to determine if the
++drive supports the UNLOAD FEATURE by default. The kernel will only
++rely on the IDLE IMMEDIATE with UNLOAD FEATURE command if it is
++convinced that this command is actually supported by the disk drive;
++otherwise, it will fall back to STANDBY IMMEDIATE. Resuming from the
++latter will take much longer and it is generally more likely to have a
++negative impact on the drive's lifetime due to the inclease of spin
++down and up cycles. If you want to use this interface in a shock
++protection framework and you know that your drive does indeed support
++the IDLE IMMEDIATE with UNLOAD FEATURE command despite not saying so,
++you can force the kernel to issue that command by doing the following
++on the command line:
++# echo -n unload > /sys/block/sda/queue/protect_method
++(replace sda by the drive identifier as appropriate).
++
++/sys/block/*/queue/protect_method accepts auto, unload and standby
++respectively. Reading from protect_method shows the available options
++surrounding the active one with brackets. When auto is active, this
++will change to whatever the kernel sees fit after the next disk park
++command has been issued.
++
++
++References
++----------
++
++There are several laptops from different brands featuring shock
++protection capabilities. As manufacturers have refused to support open
++source development of the required software components so far, Linux
++support for shock protection varies considerably between different
++hardware implementations. Ideally, this section should contain a list
++of poiters at different projects aiming at an implementation of shock
++protection on different systeems. Unfortunately, I only know of a
++single project which, although still considered experimental, is fit
++for use. Please feel free to add projects that have been the victims
++of my ignorance.
++
++- http://www.thinkwiki.org/wiki/HDAPS
++ See this page for information about Linux support of the hard disk
++ active protection syystem as implemented in IBM/Lenovo Thinkpads.
++
++
++CREDITS
++-------
++
++The patch to implement the interface described in this file has
++originally been published by Jon Escombe <lists@...>.
++
++
++05 Dec 2006, Elias Oltmanns <eo@...>
+diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
+index 3b927be..356cfe3 100644
+--- a/block/ll_rw_blk.c
++++ b/block/ll_rw_blk.c
+@@ -39,6 +39,10 @@ #include <scsi/scsi_cmnd.h>
+
+ static void blk_unplug_work(struct work_struct *work);
+ static void blk_unplug_timeout(unsigned long data);
++static void blk_unfreeze_work(struct work_struct *work);
++static void blk_unfreeze_timeout(unsigned long data);
++static int blk_protect_register(struct request_queue *q);
++static void blk_protect_unregister(struct request_queue *q);
+ static void drive_stat_acct(struct request *rq, int new_io);
+ static void init_request_from_bio(struct request *req, struct bio *bio);
+ static int __make_request(struct request_queue *q, struct bio *bio);
+@@ -231,6 +235,16 @@ void blk_queue_make_request(struct reque
+ q->unplug_timer.function = blk_unplug_timeout;
+ q->unplug_timer.data = (unsigned long)q;
+
++ q->max_unfreeze = 30;
++
++ INIT_WORK(&q->unfreeze_work, blk_unfreeze_work);
++
++ q->unfreeze_timer.function = blk_unfreeze_timeout;
++ q->unfreeze_timer.data = (unsigned long)q;
++
++ /* Set protect_method to auto detection initially */
++ q->protect_method = 2;
++
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+ */
+@@ -263,6 +277,18 @@ static void rq_init(struct request_queue
+ rq->next_rq = NULL;
+ }
+
++void blk_queue_issue_protect_fn(struct request_queue *q, issue_protect_fn *ipf)
++{
++ q->issue_protect_fn = ipf;
++}
++EXPORT_SYMBOL(blk_queue_issue_protect_fn);
++
++void blk_queue_issue_unprotect_fn(struct request_queue *q, issue_unprotect_fn *iuf)
++{
++ q->issue_unprotect_fn = iuf;
++}
++EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
++
+ /**
+ * blk_queue_ordered - does this queue support ordered writes
+ * @q: the request queue
+@@ -1861,6 +1887,7 @@ struct request_queue *blk_alloc_queue_no
+ }
+
+ init_timer(&q->unplug_timer);
++ init_timer(&q->unfreeze_timer);
+
+ kobject_set_name(&q->kobj, "%s", "queue");
+ q->kobj.ktype = &queue_ktype;
+@@ -4214,13 +4241,21 @@ int blk_register_queue(struct gendisk *d
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+
+ ret = elv_register_queue(q);
++ if (ret)
++ goto err;
++
++ ret = blk_protect_register(q);
+ if (ret) {
+- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+- kobject_del(&q->kobj);
+- return ret;
++ elv_unregister_queue(q);
++ goto err;
+ }
+
+ return 0;
++
++err:
++ kobject_uevent(&q->kobj, KOBJ_REMOVE);
++ kobject_del(&q->kobj);
++ return ret;
+ }
+
+ void blk_unregister_queue(struct gendisk *disk)
+@@ -4228,6 +4263,7 @@ void blk_unregister_queue(struct gendisk
+ struct request_queue *q = disk->queue;
+
+ if (q && q->request_fn) {
++ blk_protect_unregister(q);
+ elv_unregister_queue(q);
+
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+@@ -4235,3 +4271,197 @@ void blk_unregister_queue(struct gendisk
+ kobject_put(&disk->kobj);
+ }
+ }
++
++/*
++ * Issue lower level unprotect function if no timers are pending.
++ */
++static void blk_unfreeze_work(struct work_struct *work)
++{
++ struct request_queue *q = container_of(work, struct request_queue, unfreeze_work);
++ int pending;
++ unsigned long flags;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ pending = timer_pending(&q->unfreeze_timer);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ if (!pending)
++ q->issue_unprotect_fn(q);
++}
++
++/*
++ * Called when the queue freeze timeout expires...
++ */
++static void blk_unfreeze_timeout(unsigned long data)
++{
++ struct request_queue *q = (struct request_queue *) data;
++
++ kblockd_schedule_work(&q->unfreeze_work);
++}
++
++/*
++ * The lower level driver parks and freezes the queue, and this block layer
++ * function sets up the freeze timeout timer on return. If the queue is
++ * already frozen then this is called to extend the timer...
++ */
++void blk_freeze_queue(struct request_queue *q, int seconds)
++{
++ /* Don't accept arbitrarily long freezes */
++ if (seconds >= q->max_unfreeze)
++ seconds = q->max_unfreeze;
++ /* set/reset the timer */
++ mod_timer(&q->unfreeze_timer, msecs_to_jiffies(seconds*1000) + jiffies);
++}
++
++/*
++ * When reading the 'protect' attribute, we return seconds remaining
++ * before unfreeze timeout expires
++ */
++static ssize_t queue_protect_show(struct request_queue *q, char *page)
++{
++ unsigned int seconds = 0;
++
++ spin_lock_irq(q->queue_lock);
++ if (blk_queue_stopped(q) && timer_pending(&q->unfreeze_timer))
++ /*
++ * Adding 1 in order to guarantee nonzero value until timer
++ * has actually expired.
++ */
++ seconds = jiffies_to_msecs(q->unfreeze_timer.expires
++ - jiffies) / 1000 + 1;
++ spin_unlock_irq(q->queue_lock);
++ return queue_var_show(seconds, (page));
++}
++
++/*
++ * When writing the 'protect' attribute, input is the number of seconds
++ * to freeze the queue for. We call a lower level helper function to
++ * park the heads and freeze/block the queue, then we make a block layer
++ * call to setup the thaw timeout. If input is 0, then we thaw the queue.
++ */
++static ssize_t queue_protect_store(struct request_queue *q,
++ const char *page, size_t count)
++{
++ unsigned long freeze = 0;
++
++ queue_var_store(&freeze, page, count);
++
++ if (freeze>0) {
++ /* Park and freeze */
++ if (!blk_queue_stopped(q))
++ q->issue_protect_fn(q);
++ /* set / reset the thaw timer */
++ spin_lock_irq(q->queue_lock);
++ blk_freeze_queue(q, freeze);
++ spin_unlock_irq(q->queue_lock);
++ } else {
++ spin_lock_irq(q->queue_lock);
++ freeze = del_timer(&q->unfreeze_timer);
++ spin_unlock_irq(q->queue_lock);
++ if (freeze)
++ q->issue_unprotect_fn(q);
++ }
++
++ return count;
++}
++
++static ssize_t
++queue_str_show(char *page, char *str, int status)
++{
++ ssize_t len;
++
++ if (status & 1)
++ len = sprintf(page, "[%s]", str);
++ else
++ len = sprintf(page, "%s", str);
++ if (status & 2)
++ len += sprintf(page+len, "\n");
++ else
++ len += sprintf(page+len, " ");
++ return len;
++}
++
++/*
++ * Returns current protect_method.
++ */
++static ssize_t queue_protect_method_show(struct request_queue *q, char *page)
++{
++ int len = 0;
++ int unload = q->protect_method;
++
++ len += queue_str_show(page+len, "auto", (unload & 2) >> 1);
++ len += queue_str_show(page+len, "unload", unload & 1);
++ len += queue_str_show(page+len, "standby", !unload ? 3 : 2);
++ return len;
++}
++
++/*
++ * Stores the device protect method.
++ */
++static ssize_t queue_protect_method_store(struct request_queue *q,
++ const char *page, size_t count)
++{
++ spin_lock_irq(q->queue_lock);
++ if (!strcmp(page, "auto") || !strcmp(page, "auto\n"))
++ q->protect_method = 2;
++ else if (!strcmp(page, "unload") || !strcmp(page, "unload\n"))
++ q->protect_method = 1;
++ else if (!strcmp(page, "standby") || !strcmp(page, "standby\n"))
++ q->protect_method = 0;
++ else {
++ spin_unlock_irq(q->queue_lock);
++ return -EINVAL;
++ }
++ spin_unlock_irq(q->queue_lock);
++ return count;
++}
++
++static struct queue_sysfs_entry queue_protect_entry = {
++ .attr = { .name = "protect", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_protect_show,
++ .store = queue_protect_store,
++};
++static struct queue_sysfs_entry queue_protect_method_entry = {
++ .attr = { .name = "protect_method", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_protect_method_show,
++ .store = queue_protect_method_store,
++};
++
++static int blk_protect_register(struct request_queue *q)
++{
++ int error = 0;
++
++ /* check that the lower level driver has a protect handler */
++ if (!q->issue_protect_fn)
++ return 0;
++
++ /* create the attributes */
++ error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
++ if (error) {
++ printk(KERN_ERR
++ "blk_protect_register(): failed to create protect queue attribute!\n");
++ return error;
++ }
++
++ error = sysfs_create_file(&q->kobj, &queue_protect_method_entry.attr);
++ if (error) {
++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr);
++ printk(KERN_ERR
++ "blk_protect_register(): failed to create protect_method attribute!\n");
++ return error;
++ }
++ kobject_get(&q->kobj);
++
++ return 0;
++}
++
++static void blk_protect_unregister(struct request_queue *q)
++{
++ /* check that the lower level driver has a protect handler */
++ if (!q->issue_protect_fn)
++ return;
++
++ /* remove the attributes */
++ sysfs_remove_file(&q->kobj, &queue_protect_method_entry.attr);
++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr);
++ kobject_put(&q->kobj);
++}
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 94144ed..393f1d5 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -853,6 +853,38 @@ static void ata_scsi_dev_config(struct s
+ }
+ }
+
++extern int scsi_protect_queue(struct request_queue *q, int unload);
++extern int scsi_unprotect_queue(struct request_queue *q);
++
++static int ata_scsi_issue_protect_fn(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++ struct ata_port *ap = ata_shost_to_port(sdev->host);
++ struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
++ int unload = q->protect_method;
++ unsigned long flags;
++
++ if (!dev) {
++ printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): Couldn't find ATA device to be parked.\n");
++ return -ENXIO;
++ }
++
++ if (unload == 2) {
++ unload = ata_id_has_unload(dev->id) ? 1 : 0;
++ spin_lock_irqsave(q->queue_lock, flags);
++ q->protect_method = unload;
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ }
++
++ /* call scsi_protect_queue, requesting either unload or standby */
++ return scsi_protect_queue(q, unload);
++}
++
++static int ata_scsi_issue_unprotect_fn(struct request_queue *q)
++{
++ return scsi_unprotect_queue(q);
++}
++
+ /**
+ * ata_scsi_slave_config - Set SCSI device attributes
+ * @sdev: SCSI device to examine
+@@ -876,6 +908,10 @@ int ata_scsi_slave_config(struct scsi_de
+
+ if (dev)
+ ata_scsi_dev_config(sdev, dev);
++ blk_queue_issue_protect_fn(sdev->request_queue,
++ ata_scsi_issue_protect_fn);
++ blk_queue_issue_unprotect_fn(sdev->request_queue,
++ ata_scsi_issue_unprotect_fn);
+
+ return 0; /* scsi layer doesn't check return value, sigh */
+ }
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index 00123d9..a54e2f2 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -701,6 +701,145 @@ static void idedisk_prepare_flush(struct
+ }
+
+ /*
++ * todo:
++ * - we freeze the queue regardless of success and rely on the
++ * ide_protect_queue function to thaw immediately if the command
++ * failed (to be consistent with the libata handler)... should
++ * we also inspect here?
++ */
++void ide_end_protect_rq(struct request *rq, int error)
++{
++ struct completion *waiting = rq->end_io_data;
++
++ rq->end_io_data = NULL;
++ /* spin lock already accquired */
++ if (!blk_queue_stopped(rq->q))
++ blk_stop_queue(rq->q);
++
++ complete(waiting);
++}
++
++int ide_unprotect_queue(struct request_queue *q)
++{
++ struct request rq;
++ unsigned long flags;
++ int pending = 0, rc = 0;
++ ide_drive_t *drive = q->queuedata;
++ u8 args[7], *argbuf = args;
++
++ if (!blk_queue_stopped(q))
++ return -EIO;
++
++ /* Are there any pending jobs on the queue? */
++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ blk_start_queue(q);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ /* The unload feature of the IDLE_IMMEDIATE command
++ temporarily disables HD power management from spinning down
++ the disk. Any other command will reenable HD pm, so, if
++ there are no pending jobs on the queue, another
++ CHECK_POWER_MODE1 command without the unload feature should do
++ just fine. */
++ if (!pending) {
++ printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
++ memset(args, 0, sizeof(args));
++ argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */
++ ide_init_drive_cmd(&rq);
++ rq.cmd_type = REQ_TYPE_ATA_TASK;
++ rq.buffer = argbuf;
++ rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
++ }
++
++ return rc;
++}
++
++int ide_protect_queue(struct request_queue *q, int unload)
++{
++ ide_drive_t *drive = q->queuedata;
++ struct request rq;
++ u8 args[7], *argbuf = args;
++ int ret = 0;
++ DECLARE_COMPLETION(wait);
++
++ memset(&rq, 0, sizeof(rq));
++ memset(args, 0, sizeof(args));
++
++ if (blk_queue_stopped(q))
++ return -EIO;
++
++ if (unload) {
++ argbuf[0] = 0xe1;
++ argbuf[1] = 0x44;
++ argbuf[3] = 0x4c;
++ argbuf[4] = 0x4e;
++ argbuf[5] = 0x55;
++ } else
++ argbuf[0] = 0xe0;
++
++ /* Issue the park command & freeze */
++ ide_init_drive_cmd(&rq);
++
++ rq.cmd_type = REQ_TYPE_ATA_TASK;
++ rq.buffer = argbuf;
++ rq.end_io_data = &wait;
++ rq.end_io = ide_end_protect_rq;
++
++ ret = ide_do_drive_cmd(drive, &rq, ide_next);
++ wait_for_completion(&wait);
++
++ if (ret)
++ {
++ printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
++ ide_unprotect_queue(q);
++ return ret;
++ }
++
++ if (unload) {
++ if (args[3] == 0xc4)
++ printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
++ else {
++ /* error parking the head */
++ printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
++ ret = -EIO;
++ ide_unprotect_queue(q);
++ }
++ } else
++ printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
++
++ return ret;
++}
++
++int idedisk_issue_protect_fn(struct request_queue *q)
++{
++ ide_drive_t *drive = q->queuedata;
++ int unload = q->protect_method;
++ unsigned long flags;
++
++ /*
++ * Check capability of the device -
++ * - if "idle immediate with unload" is supported we use that, else
++ * we use "standby immediate" and live with spinning down the drive..
++ * (Word 84, bit 13 of IDENTIFY DEVICE data)
++ */
++ if (unload == 2) {
++ unload = drive->id->cfsse & (1 << 13) ? 1 : 0;
++ spin_lock_irqsave(q->queue_lock, flags);
++ q->protect_method = unload;
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ }
++
++ return ide_protect_queue(q, unload);
++}
++
++int idedisk_issue_unprotect_fn(struct request_queue *q)
++{
++ return ide_unprotect_queue(q);
++}
++
++/*
+ * This is tightly woven into the driver->do_special can not touch.
+ * DON'T do it again until a total personality rewrite is committed.
+ */
+@@ -972,6 +1111,9 @@ static void idedisk_setup (ide_drive_t *
+ drive->wcache = 1;
+
+ write_cache(drive, 1);
++
++ blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);
++ blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);
+ }
+
+ static void ide_cacheflush_p(ide_drive_t *drive)
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index db22d1f..1e54733 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -1270,6 +1270,17 @@ #endif
+ }
+
+ /*
++ * Don't accept a request when the queue is stopped (unless we
++ * are resuming from suspend). Prevents existing queue entries
++ * being processed after queue is stopped by the hard disk
++ * protection mechanism...
++ */
++ if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
++ hwgroup->busy = 0;
++ break;
++ }
++
++ /*
+ * Sanity: don't accept a request that isn't a PM request
+ * if we are currently power managed. This is very important as
+ * blk_stop_queue() doesn't prevent the elv_next_request()
+@@ -1767,6 +1778,9 @@ int ide_do_drive_cmd (ide_drive_t *drive
+ where = ELEVATOR_INSERT_FRONT;
+ rq->cmd_flags |= REQ_PREEMPT;
+ }
++ if (action == ide_next)
++ where = ELEVATOR_INSERT_FRONT;
++
+ __elv_add_request(drive->queue, rq, where, 0);
+ ide_do_request(hwgroup, IDE_NO_IRQ);
+ spin_unlock_irqrestore(&ide_lock, flags);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 0e81e4c..895b1f4 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2268,7 +2268,13 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
+ int
+ scsi_device_quiesce(struct scsi_device *sdev)
+ {
++ int i;
+ int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
++ for (i = 0; err && (sdev->sdev_state == SDEV_BLOCK) && (i < 100);
++ i++) {
++ msleep_interruptible(200);
++ err = scsi_device_set_state(sdev, SDEV_QUIESCE);
++ }
+ if (err)
+ return err;
+
+@@ -2518,3 +2524,166 @@ void scsi_kunmap_atomic_sg(void *virt)
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ }
+ EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
++
++/*
++ * Structure required for synchronous io completion after queue freezing
++ */
++struct scsi_protect_io_context_sync {
++ struct scsi_device *sdev;
++ int result;
++ char *sense;
++ struct completion *waiting;
++};
++
++/*
++ * scsi_protect_wait_done()
++ * Command completion handler for scsi_protect_queue().
++ *
++ * Unable to call scsi_internal_device_block() as
++ * scsi_end_request() already has the spinlock. So,
++ * we put the necessary functionality inline.
++ *
++ * todo:
++ * - we block the queue regardless of success and rely on the
++ * scsi_protect_queue function to unblock if the command
++ * failed... should we also inspect here?
++ */
++static void scsi_protect_wait_done(void *data, char *sense, int result, int resid)
++{
++ struct scsi_protect_io_context_sync *siocs = data;
++ struct completion *waiting = siocs->waiting;
++ struct request_queue *q = siocs->sdev->request_queue;
++
++ siocs->waiting = NULL;
++ siocs->result = result;
++ memcpy(siocs->sense, sense, SCSI_SENSE_BUFFERSIZE);
++
++ if (!scsi_device_set_state(siocs->sdev, SDEV_BLOCK))
++ blk_stop_queue(q);
++
++ complete(waiting);
++}
++
++/*
++ * scsi_unprotect_queue()
++ * - release the queue that was previously blocked
++ */
++int scsi_unprotect_queue(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++ int rc = 0, pending = 0;
++ u8 scsi_cmd[MAX_COMMAND_SIZE];
++ struct scsi_sense_hdr sshdr;
++
++ if (sdev->sdev_state != SDEV_BLOCK)
++ return -ENXIO;
++
++ /* Are there any pending jobs on the queue? */
++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++ rc = scsi_internal_device_unblock(sdev);
++ if (rc)
++ return rc;
++
++ if (!pending) {
++ printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
++
++ memset(scsi_cmd, 0, sizeof(scsi_cmd));
++ scsi_cmd[0] = ATA_16;
++ scsi_cmd[1] = (3 << 1); /* Non-data */
++ /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
++ scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
++
++ /* Good values for timeout and retries? Values below
++ from scsi_ioctl_send_command() for default case... */
++ if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
++ (10*HZ), 5))
++ rc = -EIO;
++ }
++ return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
++
++/*
++ * scsi_protect_queue()
++ * - build and issue the park/standby command..
++ * - queue is blocked during command completion handler
++ */
++int scsi_protect_queue(struct request_queue *q, int unload)
++{
++ struct scsi_protect_io_context_sync siocs;
++ struct scsi_device *sdev = q->queuedata;
++ int rc = 0;
++ u8 args[7];
++ u8 scsi_cmd[MAX_COMMAND_SIZE];
++ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
++ unsigned char *desc;
++ DECLARE_COMPLETION_ONSTACK(wait);
++
++ if (sdev->sdev_state != SDEV_RUNNING)
++ return -ENXIO;
++
++ memset(args, 0, sizeof(args));
++ memset(sense, 0, sizeof(sense));
++
++ if (unload) {
++ args[0] = 0xe1;
++ args[1] = 0x44;
++ args[3] = 0x4c;
++ args[4] = 0x4e;
++ args[5] = 0x55;
++ } else
++ args[0] = 0xe0;
++
++ memset(scsi_cmd, 0, sizeof(scsi_cmd));
++ scsi_cmd[0] = ATA_16;
++ scsi_cmd[1] = (3 << 1); /* Non-data */
++ scsi_cmd[2] = 0x20; /* no off.line, or data xfer, request cc */
++ scsi_cmd[4] = args[1];
++ scsi_cmd[6] = args[2];
++ scsi_cmd[8] = args[3];
++ scsi_cmd[10] = args[4];
++ scsi_cmd[12] = args[5];
++ scsi_cmd[14] = args[0];
++ siocs.sdev = sdev;
++ siocs.sense = sense;
++ siocs.waiting = &wait;
++
++ scsi_execute_async(sdev, scsi_cmd, COMMAND_SIZE(scsi_cmd[0]),
++ DMA_NONE, NULL, 0, 0, (10*HZ), 5,
++ &siocs, &scsi_protect_wait_done, GFP_NOWAIT);
++ wait_for_completion(&wait);
++
++ if (siocs.result != ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
++ scsi_unprotect_queue(q); /* just in case we still managed to block */
++ rc = -EIO;
++ goto out;
++ }
++
++ desc = sense + 8;
++
++ /* Retrieve data from check condition */
++ args[1] = desc[3];
++ args[2] = desc[5];
++ args[3] = desc[7];
++ args[4] = desc[9];
++ args[5] = desc[11];
++ args[0] = desc[13];
++
++ if (unload) {
++ if (args[3] == 0xc4)
++ printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
++ else {
++ /* error parking the head */
++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
++ rc = -EIO;
++ scsi_unprotect_queue(q);
++ }
++ } else
++ printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
++
++out:
++ return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_protect_queue);
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 5c4e54a..d501029 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -380,6 +380,18 @@ #define ata_id_u64(id,n) \
+
+ #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
+
++static inline int ata_id_has_unload(const u16 *id)
++{
++ /* ATA-7 specifies two places to indicate unload feature support.
++ * Since I don't really understand the difference, I'll just check
++ * both and only return zero if none of them indicates otherwise. */
++ if ((id[84] & 0xC000) == 0x4000 && id[84] & (1 << 13))
++ return id[84] & (1 << 13);
++ if ((id[87] & 0xC000) == 0x4000)
++ return id[87] & (1 << 13);
++ return 0;
++}
++
+ static inline bool ata_id_has_hipm(const u16 *id)
+ {
+ u16 val = id[76];
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index d18ee67..e10f40b 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -332,6 +332,8 @@ struct bio_vec;
+ typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
+ typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
+ typedef void (softirq_done_fn)(struct request *);
++typedef int (issue_protect_fn) (struct request_queue *);
++typedef int (issue_unprotect_fn) (struct request_queue *);
+
+ enum blk_queue_state {
+ Queue_down,
+@@ -368,6 +370,8 @@ struct request_queue
+ merge_bvec_fn *merge_bvec_fn;
+ prepare_flush_fn *prepare_flush_fn;
+ softirq_done_fn *softirq_done_fn;
++ issue_protect_fn *issue_protect_fn;
++ issue_unprotect_fn *issue_unprotect_fn;
+
+ /*
+ * Dispatch queue sorting
+@@ -383,6 +387,14 @@ struct request_queue
+ unsigned long unplug_delay; /* After this many jiffies */
+ struct work_struct unplug_work;
+
++ /*
++ * Auto-unfreeze state
++ */
++ struct timer_list unfreeze_timer;
++ int max_unfreeze; /* At most this many seconds */
++ struct work_struct unfreeze_work;
++ int protect_method;
++
+ struct backing_dev_info backing_dev_info;
+
+ /*
+@@ -773,6 +785,8 @@ extern int blk_do_ordered(struct request
+ extern unsigned blk_ordered_cur_seq(struct request_queue *);
+ extern unsigned blk_ordered_req_seq(struct request *);
+ extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
++extern void blk_queue_issue_protect_fn(struct request_queue *, issue_protect_fn *);
++extern void blk_queue_issue_unprotect_fn(struct request_queue *, issue_unprotect_fn *);
+
+ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+ extern void blk_dump_rq_flags(struct request *, char *);
+diff --git a/include/linux/ide.h b/include/linux/ide.h
+index dc75ccb..990cac2 100644
+--- a/include/linux/ide.h
++++ b/include/linux/ide.h
+@@ -1045,6 +1045,7 @@ extern void ide_init_drive_cmd (struct r
+ */
+ typedef enum {
+ ide_wait, /* insert rq at end of list, and wait for it */
++ ide_next, /* insert rq immediately after current request */
+ ide_preempt, /* insert rq in front of current request */
+ ide_head_wait, /* insert rq in front of current request and wait for it */
+ ide_end /* insert rq at end of list, but don't wait for it */
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 14daf48..bab372b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -823,7 +823,7 @@ static void ata_scsi_sdev_config(struct
+ * prevent SCSI midlayer from automatically deferring
+ * requests.
+ */
+- sdev->max_device_blocked = 1;
++ sdev->max_device_blocked = 2;
+ }
+
+ static void ata_scsi_dev_config(struct scsi_device *sdev,
+@@ -3120,7 +3120,7 @@ int ata_scsi_add_hosts(struct ata_host *
+ * Set host_blocked to 1 to prevent SCSI midlayer from
+ * automatically deferring requests.
+ */
+- shost->max_host_blocked = 1;
++ shost->max_host_blocked = 2;
+
+ rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+ if (rc)
diff --git a/app-laptop/hdapsd/files/hdaps_protect-2.6.25.patch b/app-laptop/hdapsd/files/hdaps_protect-2.6.25.patch
new file mode 100644
index 0000000..3c55a0b
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdaps_protect-2.6.25.patch
@@ -0,0 +1,949 @@
+ Documentation/block/disk-protection.txt | 79 ++++++++++++++
+ block/blk-core.c | 41 +++++++
+ block/blk-settings.c | 22 ++++
+ block/blk-sysfs.c | 168 ++++++++++++++++++++++++++++++
+ block/blk.h | 3 +
+ drivers/ata/libata-scsi.c | 36 +++++++
+ drivers/ide/ide-disk.c | 145 ++++++++++++++++++++++++++
+ drivers/ide/ide-io.c | 14 +++
+ drivers/scsi/scsi_lib.c | 171 +++++++++++++++++++++++++++++++
+ include/linux/ata.h | 12 ++
+ include/linux/blkdev.h | 14 +++
+ include/linux/ide.h | 1
+ 12 files changed, 703 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/block/disk-protection.txt b/Documentation/block/disk-protection.txt
+new file mode 100644
+index 0000000..508cc5b
+--- /dev/null
++++ b/Documentation/block/disk-protection.txt
+@@ -0,0 +1,79 @@
++Hard disk protection
++====================
++
++
++Intro
++-----
++ATA/ATAPI-7 specifies the IDLE IMMEDIATE command with UNLOAD FEATURE.
++Issuing this command should cause the drive to switch to idle mode and
++unload disk heads. This feature is being used in modern laptops in
++conjunction with accelerometers and appropriate software to implement
++a shock protection facility. The idea is to stop all I/O operations on
++the internal hard drive and park its heads on the ramp when critical
++situations are anticipated. The desire to have such a feature
++available on GNU/Linux systems has been the original motivation to
++implement a generic disk parking interface in the Linux kernel.
++
++
++The interface
++-------------
++The interface works as follows: Writing an integer value to
++/sys/block/*/queue/protect will park the respective drive and freeze
++the block layer queue for the specified number of seconds. When the
++timeout expires and no further disk park request has been issued in
++the meantime, the queue is unfrozen and accumulated I/O operations are
++performed.
++
++IMPORTANT NOTE:
++Not all ATA drives implement IDLE IMMEDIATE with UNLOAD FEATURE and
++quite a few of those that do so, don't report this capability as
++described in the specs. When a disk park has been requested through
++sysfs as described above, the kernel will try to determine if the
++drive supports the UNLOAD FEATURE by default. The kernel will only
++rely on the IDLE IMMEDIATE with UNLOAD FEATURE command if it is
++convinced that this command is actually supported by the disk drive;
++otherwise, it will fall back to STANDBY IMMEDIATE. Resuming from the
++latter will take much longer and it is generally more likely to have a
++negative impact on the drive's lifetime due to the inclease of spin
++down and up cycles. If you want to use this interface in a shock
++protection framework and you know that your drive does indeed support
++the IDLE IMMEDIATE with UNLOAD FEATURE command despite not saying so,
++you can force the kernel to issue that command by doing the following
++on the command line:
++# echo -n unload > /sys/block/sda/queue/protect_method
++(replace sda by the drive identifier as appropriate).
++
++/sys/block/*/queue/protect_method accepts auto, unload and standby
++respectively. Reading from protect_method shows the available options
++surrounding the active one with brackets. When auto is active, this
++will change to whatever the kernel sees fit after the next disk park
++command has been issued.
++
++
++References
++----------
++
++There are several laptops from different brands featuring shock
++protection capabilities. As manufacturers have refused to support open
++source development of the required software components so far, Linux
++support for shock protection varies considerably between different
++hardware implementations. Ideally, this section should contain a list
++of poiters at different projects aiming at an implementation of shock
++protection on different systeems. Unfortunately, I only know of a
++single project which, although still considered experimental, is fit
++for use. Please feel free to add projects that have been the victims
++of my ignorance.
++
++- http://www.thinkwiki.org/wiki/HDAPS
++ See this page for information about Linux support of the hard disk
++ active protection syystem as implemented in IBM/Lenovo Thinkpads.
++
++
++CREDITS
++-------
++
++The patch to implement the interface described in this file has
++originally been published by Jon Escombe <lists@...>.
++
++
++05 Dec 2006, Elias Oltmanns <eo@...>
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 2a438a9..5affbf3 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -320,6 +320,46 @@ void blk_unplug(struct request_queue *q)
+ }
+ EXPORT_SYMBOL(blk_unplug);
+
++/*
++ * Issue lower level unprotect function if no timers are pending.
++ */
++void blk_unfreeze_work(struct work_struct *work)
++{
++ struct request_queue *q = container_of(work, struct request_queue, unfreeze_work);
++ int pending;
++ unsigned long flags;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ pending = timer_pending(&q->unfreeze_timer);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ if (!pending)
++ q->issue_unprotect_fn(q);
++}
++
++/*
++ * Called when the queue freeze timeout expires...
++ */
++void blk_unfreeze_timeout(unsigned long data)
++{
++ struct request_queue *q = (struct request_queue *) data;
++
++ kblockd_schedule_work(&q->unfreeze_work);
++}
++
++/*
++ * The lower level driver parks and freezes the queue, and this block layer
++ * function sets up the freeze timeout timer on return. If the queue is
++ * already frozen then this is called to extend the timer...
++ */
++void blk_freeze_queue(struct request_queue *q, int seconds)
++{
++ /* Don't accept arbitrarily long freezes */
++ if (seconds >= q->max_unfreeze)
++ seconds = q->max_unfreeze;
++ /* set/reset the timer */
++ mod_timer(&q->unfreeze_timer, msecs_to_jiffies(seconds*1000) + jiffies);
++}
++
+ /**
+ * blk_start_queue - restart a previously stopped queue
+ * @q: The &struct request_queue in question
+@@ -482,6 +522,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ }
+
+ init_timer(&q->unplug_timer);
++ init_timer(&q->unfreeze_timer);
+
+ kobject_init(&q->kobj, &blk_queue_ktype);
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 5713f7e..b4dfa5d 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -112,6 +112,16 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+ q->unplug_timer.function = blk_unplug_timeout;
+ q->unplug_timer.data = (unsigned long)q;
+
++ q->max_unfreeze = 30;
++
++ INIT_WORK(&q->unfreeze_work, blk_unfreeze_work);
++
++ q->unfreeze_timer.function = blk_unfreeze_timeout;
++ q->unfreeze_timer.data = (unsigned long)q;
++
++ /* Set protect_method to auto detection initially */
++ q->protect_method = 2;
++
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+ */
+@@ -119,6 +129,18 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+ }
+ EXPORT_SYMBOL(blk_queue_make_request);
+
++void blk_queue_issue_protect_fn(struct request_queue *q, issue_protect_fn *ipf)
++{
++ q->issue_protect_fn = ipf;
++}
++EXPORT_SYMBOL(blk_queue_issue_protect_fn);
++
++void blk_queue_issue_unprotect_fn(struct request_queue *q, issue_unprotect_fn *iuf)
++{
++ q->issue_unprotect_fn = iuf;
++}
++EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
++
+ /**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q: the request queue for the device
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 54d0db1..d901003 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -270,6 +270,160 @@ struct kobj_type blk_queue_ktype = {
+ .release = blk_release_queue,
+ };
+
++/*
++ * When reading the 'protect' attribute, we return seconds remaining
++ * before unfreeze timeout expires
++ */
++static ssize_t queue_protect_show(struct request_queue *q, char *page)
++{
++ unsigned int seconds = 0;
++
++ spin_lock_irq(q->queue_lock);
++ if (blk_queue_stopped(q) && timer_pending(&q->unfreeze_timer))
++ /*
++ * Adding 1 in order to guarantee nonzero value until timer
++ * has actually expired.
++ */
++ seconds = jiffies_to_msecs(q->unfreeze_timer.expires
++ - jiffies) / 1000 + 1;
++ spin_unlock_irq(q->queue_lock);
++ return queue_var_show(seconds, (page));
++}
++
++/*
++ * When writing the 'protect' attribute, input is the number of seconds
++ * to freeze the queue for. We call a lower level helper function to
++ * park the heads and freeze/block the queue, then we make a block layer
++ * call to setup the thaw timeout. If input is 0, then we thaw the queue.
++ */
++static ssize_t queue_protect_store(struct request_queue *q,
++ const char *page, size_t count)
++{
++ unsigned long freeze = 0;
++
++ queue_var_store(&freeze, page, count);
++
++ if (freeze>0) {
++ /* Park and freeze */
++ if (!blk_queue_stopped(q))
++ q->issue_protect_fn(q);
++ /* set / reset the thaw timer */
++ spin_lock_irq(q->queue_lock);
++ blk_freeze_queue(q, freeze);
++ spin_unlock_irq(q->queue_lock);
++ } else {
++ spin_lock_irq(q->queue_lock);
++ freeze = del_timer(&q->unfreeze_timer);
++ spin_unlock_irq(q->queue_lock);
++ if (freeze)
++ q->issue_unprotect_fn(q);
++ }
++
++ return count;
++}
++
++static ssize_t
++queue_str_show(char *page, char *str, int status)
++{
++ ssize_t len;
++
++ if (status & 1)
++ len = sprintf(page, "[%s]", str);
++ else
++ len = sprintf(page, "%s", str);
++ if (status & 2)
++ len += sprintf(page+len, "\n");
++ else
++ len += sprintf(page+len, " ");
++ return len;
++}
++
++/*
++ * Returns current protect_method.
++ */
++static ssize_t queue_protect_method_show(struct request_queue *q, char *page)
++{
++ int len = 0;
++ int unload = q->protect_method;
++
++ len += queue_str_show(page+len, "auto", (unload & 2) >> 1);
++ len += queue_str_show(page+len, "unload", unload & 1);
++ len += queue_str_show(page+len, "standby", !unload ? 3 : 2);
++ return len;
++}
++
++/*
++ * Stores the device protect method.
++ */
++static ssize_t queue_protect_method_store(struct request_queue *q,
++ const char *page, size_t count)
++{
++ spin_lock_irq(q->queue_lock);
++ if (!strcmp(page, "auto") || !strcmp(page, "auto\n"))
++ q->protect_method = 2;
++ else if (!strcmp(page, "unload") || !strcmp(page, "unload\n"))
++ q->protect_method = 1;
++ else if (!strcmp(page, "standby") || !strcmp(page, "standby\n"))
++ q->protect_method = 0;
++ else {
++ spin_unlock_irq(q->queue_lock);
++ return -EINVAL;
++ }
++ spin_unlock_irq(q->queue_lock);
++ return count;
++}
++
++static struct queue_sysfs_entry queue_protect_entry = {
++ .attr = { .name = "protect", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_protect_show,
++ .store = queue_protect_store,
++};
++static struct queue_sysfs_entry queue_protect_method_entry = {
++ .attr = { .name = "protect_method", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_protect_method_show,
++ .store = queue_protect_method_store,
++};
++
++static int blk_protect_register(struct request_queue *q)
++{
++ int error = 0;
++
++ /* check that the lower level driver has a protect handler */
++ if (!q->issue_protect_fn)
++ return 0;
++
++ /* create the attributes */
++ error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
++ if (error) {
++ printk(KERN_ERR
++ "blk_protect_register(): failed to create protect queue attribute!\n");
++ return error;
++ }
++
++ error = sysfs_create_file(&q->kobj, &queue_protect_method_entry.attr);
++ if (error) {
++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr);
++ printk(KERN_ERR
++ "blk_protect_register(): failed to create protect_method attribute!\n");
++ return error;
++ }
++ kobject_get(&q->kobj);
++
++ return 0;
++}
++
++static void blk_protect_unregister(struct request_queue *q)
++{
++ /* check that the lower level driver has a protect handler */
++ if (!q->issue_protect_fn)
++ return;
++
++ /* remove the attributes */
++ sysfs_remove_file(&q->kobj, &queue_protect_method_entry.attr);
++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr);
++ kobject_put(&q->kobj);
++}
++
+ int blk_register_queue(struct gendisk *disk)
+ {
+ int ret;
+@@ -287,13 +441,20 @@ int blk_register_queue(struct gendisk *disk)
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+
+ ret = elv_register_queue(q);
++ if (ret)
++ goto err;
++ ret = blk_protect_register(q);
+ if (ret) {
+- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+- kobject_del(&q->kobj);
+- return ret;
++ elv_unregister_queue(q);
++ goto err;
+ }
+
+ return 0;
++
++err:
++ kobject_uevent(&q->kobj, KOBJ_REMOVE);
++ kobject_del(&q->kobj);
++ return ret;
+ }
+
+ void blk_unregister_queue(struct gendisk *disk)
+@@ -301,6 +462,7 @@ void blk_unregister_queue(struct gendisk *disk)
+ struct request_queue *q = disk->queue;
+
+ if (q && q->request_fn) {
++ blk_protect_unregister(q);
+ elv_unregister_queue(q);
+
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+diff --git a/block/blk.h b/block/blk.h
+index ec9120f..98d0f87 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -18,6 +18,9 @@ void __blk_queue_free_tags(struct request_queue *q);
+
+ void blk_unplug_work(struct work_struct *work);
+ void blk_unplug_timeout(unsigned long data);
++void blk_unfreeze_work(struct work_struct *work);
++void blk_unfreeze_timeout(unsigned long data);
++void blk_freeze_queue(struct request_queue *q, int seconds);
+
+ struct io_context *current_io_context(gfp_t gfp_flags, int node);
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 1579539..54c1ad2 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -905,6 +905,38 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
+ return 0;
+ }
+
++extern int scsi_protect_queue(struct request_queue *q, int unload);
++extern int scsi_unprotect_queue(struct request_queue *q);
++
++static int ata_scsi_issue_protect_fn(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++ struct ata_port *ap = ata_shost_to_port(sdev->host);
++ struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
++ int unload = q->protect_method;
++ unsigned long flags;
++
++ if (!dev) {
++ printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): Couldn't find ATA device to be parked.\n");
++ return -ENXIO;
++ }
++
++ if (unload == 2) {
++ unload = ata_id_has_unload(dev->id) ? 1 : 0;
++ spin_lock_irqsave(q->queue_lock, flags);
++ q->protect_method = unload;
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ }
++
++ /* call scsi_protect_queue, requesting either unload or standby */
++ return scsi_protect_queue(q, unload);
++}
++
++static int ata_scsi_issue_unprotect_fn(struct request_queue *q)
++{
++ return scsi_unprotect_queue(q);
++}
++
+ /**
+ * ata_scsi_slave_config - Set SCSI device attributes
+ * @sdev: SCSI device to examine
+@@ -927,6 +959,10 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
+
+ if (dev)
+ rc = ata_scsi_dev_config(sdev, dev);
++ blk_queue_issue_protect_fn(sdev->request_queue,
++ ata_scsi_issue_protect_fn);
++ blk_queue_issue_unprotect_fn(sdev->request_queue,
++ ata_scsi_issue_unprotect_fn);
+
+ return rc;
+ }
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index 39501d1..feab06f 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -612,6 +612,148 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+ }
+
+ /*
++ * todo:
++ * - we freeze the queue regardless of success and rely on the
++ * ide_protect_queue function to thaw immediately if the command
++ * failed (to be consistent with the libata handler)... should
++ * we also inspect here?
++ */
++void ide_end_protect_rq(struct request *rq, int error)
++{
++ struct completion *waiting = rq->end_io_data;
++
++ rq->end_io_data = NULL;
++ /* spin lock already accquired */
++ if (!blk_queue_stopped(rq->q))
++ blk_stop_queue(rq->q);
++
++ complete(waiting);
++}
++
++int ide_unprotect_queue(struct request_queue *q)
++{
++ struct request rq;
++ unsigned long flags;
++ int pending, rc = 0;
++ ide_drive_t *drive = q->queuedata;
++ ide_task_t task;
++
++ if (!blk_queue_stopped(q))
++ return -EIO;
++
++ /* Are there any pending jobs on the queue? */
++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ blk_start_queue(q);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ /* The unload feature of the IDLE_IMMEDIATE command
++ temporarily disables HD power management from spinning down
++ the disk. Any other command will reenable HD pm, so, if
++ there are no pending jobs on the queue, another
++ CHECK_POWER_MODE1 command without the unload feature should do
++ just fine. */
++ if (!pending) {
++ printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
++ memset(&task, 0, sizeof(task));
++ task.tf.command = WIN_CHECKPOWERMODE1; /* CHECK_POWER_MODE1 */
++ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++ task.data_phase = TASKFILE_NO_DATA;
++ ide_init_drive_cmd(&rq);
++ rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++ rq.special = &task;
++ rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
++ }
++
++ return rc;
++}
++
++int ide_protect_queue(struct request_queue *q, int unload)
++{
++ ide_drive_t *drive = q->queuedata;
++ struct request rq;
++ ide_task_t task;
++ int ret = 0;
++ DECLARE_COMPLETION(wait);
++
++ memset(&rq, 0, sizeof(rq));
++ memset(&task, 0, sizeof(task));
++
++ if (blk_queue_stopped(q))
++ return -EIO;
++
++ task.data_phase = TASKFILE_NO_DATA;
++ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++ if (unload) {
++ task.tf.command = 0xe1;
++ task.tf.feature = 0x44;
++ task.tf.lbal = 0x4c;
++ task.tf.lbam = 0x4e;
++ task.tf.lbah = 0x55;
++ } else
++ task.tf.command = 0xe0;
++
++ /* Issue the park command & freeze */
++ ide_init_drive_cmd(&rq);
++
++ rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++ rq.special = &task;
++ rq.end_io_data = &wait;
++ rq.end_io = ide_end_protect_rq;
++
++ ret = ide_do_drive_cmd(drive, &rq, ide_next);
++ wait_for_completion(&wait);
++
++ if (ret) {
++ printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
++ ide_unprotect_queue(q);
++ return ret;
++ }
++
++ if (unload) {
++ if (task.tf.lbal == 0xc4)
++ printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
++ else {
++ /* error parking the head */
++ printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
++ ret = -EIO;
++ ide_unprotect_queue(q);
++ }
++ } else
++ printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
++
++ return ret;
++}
++
++int idedisk_issue_protect_fn(struct request_queue *q)
++{
++ ide_drive_t *drive = q->queuedata;
++ int unload = q->protect_method;
++ unsigned long flags;
++
++ /*
++ * Check capability of the device -
++ * - if "idle immediate with unload" is supported we use that, else
++ * we use "standby immediate" and live with spinning down the drive..
++ * (Word 84, bit 13 of IDENTIFY DEVICE data)
++ */
++ if (unload == 2) {
++ unload = drive->id->cfsse & (1 << 13) ? 1 : 0;
++ spin_lock_irqsave(q->queue_lock, flags);
++ q->protect_method = unload;
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ }
++
++ return ide_protect_queue(q, unload);
++}
++
++int idedisk_issue_unprotect_fn(struct request_queue *q)
++{
++ return ide_unprotect_queue(q);
++}
++
++/*
+ * This is tightly woven into the driver->do_special can not touch.
+ * DON'T do it again until a total personality rewrite is committed.
+ */
+@@ -877,6 +1019,9 @@ static void idedisk_setup (ide_drive_t *drive)
+ drive->wcache = 1;
+
+ write_cache(drive, 1);
++
++ blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);
++ blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);
+ }
+
+ static void ide_cacheflush_p(ide_drive_t *drive)
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index 7153796..81444fe 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -1158,6 +1158,17 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
+ }
+
+ /*
++ * Don't accept a request when the queue is stopped (unless we
++ * are resuming from suspend). Prevents existing queue entries
++ * being processed after queue is stopped by the hard disk
++ * protection mechanism...
++ */
++ if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
++ hwgroup->busy = 0;
++ break;
++ }
++
++ /*
+ * Sanity: don't accept a request that isn't a PM request
+ * if we are currently power managed. This is very important as
+ * blk_stop_queue() doesn't prevent the elv_next_request()
+@@ -1651,6 +1662,9 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
+ where = ELEVATOR_INSERT_FRONT;
+ rq->cmd_flags |= REQ_PREEMPT;
+ }
++ if (action == ide_next)
++ where = ELEVATOR_INSERT_FRONT;
++
+ __elv_add_request(drive->queue, rq, where, 0);
+ ide_do_request(hwgroup, IDE_NO_IRQ);
+ spin_unlock_irqrestore(&ide_lock, flags);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index f40898d..02238d6 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2248,7 +2248,13 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
+ int
+ scsi_device_quiesce(struct scsi_device *sdev)
+ {
++ int i;
+ int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
++ for (i = 0; err && (sdev->sdev_state == SDEV_BLOCK) && (i < 100);
++ i++) {
++ msleep_interruptible(200);
++ err = scsi_device_set_state(sdev, SDEV_QUIESCE);
++ }
+ if (err)
+ return err;
+
+@@ -2496,3 +2502,168 @@ void scsi_kunmap_atomic_sg(void *virt)
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ }
+ EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
++
++/*
++ * Structure required for synchronous io completion after queue freezing
++ */
++struct scsi_protect_io_context_sync {
++ struct scsi_device *sdev;
++ int result;
++ char *sense;
++ struct completion *waiting;
++};
++
++/*
++ * scsi_protect_wait_done()
++ * Command completion handler for scsi_protect_queue().
++ *
++ * Unable to call scsi_internal_device_block() as
++ * scsi_end_request() already has the spinlock. So,
++ * we put the necessary functionality inline.
++ *
++ * todo:
++ * - we block the queue regardless of success and rely on the
++ * scsi_protect_queue function to unblock if the command
++ * failed... should we also inspect here?
++ */
++static void scsi_protect_wait_done(void *data, char *sense, int result, int resid)
++{
++ struct scsi_protect_io_context_sync *siocs = data;
++ struct completion *waiting = siocs->waiting;
++ struct request_queue *q = siocs->sdev->request_queue;
++
++ siocs->waiting = NULL;
++ siocs->result = result;
++ memcpy(siocs->sense, sense, SCSI_SENSE_BUFFERSIZE);
++
++ if (!scsi_device_set_state(siocs->sdev, SDEV_BLOCK))
++ blk_stop_queue(q);
++
++ complete(waiting);
++}
++
++/*
++ * scsi_unprotect_queue()
++ * - release the queue that was previously blocked
++ */
++int scsi_unprotect_queue(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++ int rc = 0, pending = 0;
++ u8 scsi_cmd[MAX_COMMAND_SIZE];
++ struct scsi_sense_hdr sshdr;
++
++ if (sdev->sdev_state != SDEV_BLOCK)
++ return -ENXIO;
++
++ /* Are there any pending jobs on the queue? */
++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++ rc = scsi_internal_device_unblock(sdev);
++ if (rc)
++ return rc;
++
++ if (!pending) {
++ printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
++
++ memset(scsi_cmd, 0, sizeof(scsi_cmd));
++ scsi_cmd[0] = ATA_16;
++ scsi_cmd[1] = (3 << 1); /* Non-data */
++ /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
++ scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
++
++ /* Good values for timeout and retries? Values below
++ from scsi_ioctl_send_command() for default case... */
++ if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
++ (10*HZ), 5))
++ rc = -EIO;
++ }
++ return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
++
++/*
++ * scsi_protect_queue()
++ * - build and issue the park/standby command..
++ * - queue is blocked during command completion handler
++ */
++int scsi_protect_queue(struct request_queue *q, int unload)
++{
++ struct scsi_protect_io_context_sync siocs;
++ struct scsi_device *sdev = q->queuedata;
++ int rc = 0;
++ u8 args[7];
++ u8 scsi_cmd[MAX_COMMAND_SIZE];
++ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
++ unsigned char *desc;
++ DECLARE_COMPLETION_ONSTACK(wait);
++
++ if (sdev->sdev_state != SDEV_RUNNING)
++ return -ENXIO;
++
++ memset(args, 0, sizeof(args));
++ memset(sense, 0, sizeof(sense));
++
++ if (unload) {
++ args[0] = 0xe1;
++ args[1] = 0x44;
++ args[3] = 0x4c;
++ args[4] = 0x4e;
++ args[5] = 0x55;
++ } else
++ args[0] = 0xe0;
++
++ memset(scsi_cmd, 0, sizeof(scsi_cmd));
++ scsi_cmd[0] = ATA_16;
++ scsi_cmd[1] = (3 << 1); /* Non-data */
++ scsi_cmd[2] = 0x20; /* no off.line, or data xfer, request cc */
++ scsi_cmd[4] = args[1];
++ scsi_cmd[6] = args[2];
++ scsi_cmd[8] = args[3];
++ scsi_cmd[10] = args[4];
++ scsi_cmd[12] = args[5];
++ scsi_cmd[14] = args[0];
++ siocs.sdev = sdev;
++ siocs.sense = sense;
++ siocs.waiting = &wait;
++
++ rc = scsi_execute_async(sdev, scsi_cmd, COMMAND_SIZE(scsi_cmd[0]),
++ DMA_NONE, NULL, 0, 0, (10*HZ), 5,
++ &siocs, &scsi_protect_wait_done, GFP_NOWAIT);
++ if (rc)
++ goto out;
++ wait_for_completion(&wait);
++
++ if (siocs.result != ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
++ scsi_unprotect_queue(q); /* just in case we still managed to block */
++ rc = -EIO;
++ goto out;
++ }
++
++ desc = sense + 8;
++
++ /* Retrieve data from check condition */
++ args[1] = desc[3];
++ args[2] = desc[5];
++ args[3] = desc[7];
++ args[4] = desc[9];
++ args[5] = desc[11];
++ args[0] = desc[13];
++
++ if (unload) {
++ if (args[3] == 0xc4)
++ printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
++ else {
++ /* error parking the head */
++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
++ rc = -EIO;
++ scsi_unprotect_queue(q);
++ }
++ } else
++ printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
++
++out:
++ return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_protect_queue);
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 1c622e2..199a4e3 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -459,6 +459,18 @@ static inline int ata_is_data(u8 prot)
+
+ #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
+
++static inline int ata_id_has_unload(const u16 *id)
++{
++ /* ATA-7 specifies two places to indicate unload feature support.
++ * Since I don't really understand the difference, I'll just check
++ * both and only return zero if none of them indicates otherwise. */
++ if ((id[84] & 0xC000) == 0x4000 && id[84] & (1 << 13))
++ return id[84] & (1 << 13);
++ if ((id[87] & 0xC000) == 0x4000)
++ return id[87] & (1 << 13);
++ return 0;
++}
++
+ static inline bool ata_id_has_hipm(const u16 *id)
+ {
+ u16 val = id[76];
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 6f79d40..1173d21 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -260,6 +260,8 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_ve
+ typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
+ typedef void (softirq_done_fn)(struct request *);
+ typedef int (dma_drain_needed_fn)(struct request *);
++typedef int (issue_protect_fn) (struct request_queue *);
++typedef int (issue_unprotect_fn) (struct request_queue *);
+
+ enum blk_queue_state {
+ Queue_down,
+@@ -297,6 +299,8 @@ struct request_queue
+ prepare_flush_fn *prepare_flush_fn;
+ softirq_done_fn *softirq_done_fn;
+ dma_drain_needed_fn *dma_drain_needed;
++ issue_protect_fn *issue_protect_fn;
++ issue_unprotect_fn *issue_unprotect_fn;
+
+ /*
+ * Dispatch queue sorting
+@@ -312,6 +316,14 @@ struct request_queue
+ unsigned long unplug_delay; /* After this many jiffies */
+ struct work_struct unplug_work;
+
++ /*
++ * Auto-unfreeze state
++ */
++ struct timer_list unfreeze_timer;
++ int max_unfreeze; /* At most this many seconds */
++ struct work_struct unfreeze_work;
++ int protect_method;
++
+ struct backing_dev_info backing_dev_info;
+
+ /*
+@@ -718,6 +730,8 @@ extern int blk_do_ordered(struct request_queue *, struct request **);
+ extern unsigned blk_ordered_cur_seq(struct request_queue *);
+ extern unsigned blk_ordered_req_seq(struct request *);
+ extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
++extern void blk_queue_issue_protect_fn(struct request_queue *, issue_protect_fn *);
++extern void blk_queue_issue_unprotect_fn(struct request_queue *, issue_unprotect_fn *);
+
+ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+ extern void blk_dump_rq_flags(struct request *, char *);
+diff --git a/include/linux/ide.h b/include/linux/ide.h
+index bc26b2f..1eab8fe 100644
+--- a/include/linux/ide.h
++++ b/include/linux/ide.h
+@@ -837,6 +837,7 @@ extern void ide_init_drive_cmd (struct request *rq);
+ */
+ typedef enum {
+ ide_wait, /* insert rq at end of list, and wait for it */
++ ide_next, /* insert rq immediately after current request */
+ ide_preempt, /* insert rq in front of current request */
+ ide_head_wait, /* insert rq in front of current request and wait for it */
+ ide_end /* insert rq at end of list, but don't wait for it */
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 14daf48..bab372b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -823,7 +823,7 @@ static void ata_scsi_sdev_config(struct
+ * prevent SCSI midlayer from automatically deferring
+ * requests.
+ */
+- sdev->max_device_blocked = 1;
++ sdev->max_device_blocked = 2;
+ }
+
+ static void ata_scsi_dev_config(struct scsi_device *sdev,
+@@ -3120,7 +3120,7 @@ int ata_scsi_add_hosts(struct ata_host *
+ * Set host_blocked to 1 to prevent SCSI midlayer from
+ * automatically deferring requests.
+ */
+- shost->max_host_blocked = 1;
++ shost->max_host_blocked = 2;
+
+ rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+ if (rc)
diff --git a/app-laptop/hdapsd/files/hdaps_protect-2.6.26.patch b/app-laptop/hdapsd/files/hdaps_protect-2.6.26.patch
new file mode 100644
index 0000000..ff19c7b
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdaps_protect-2.6.26.patch
@@ -0,0 +1,935 @@
+
+ Documentation/block/disk-protection.txt | 79 ++++++++++++++
+ block/blk-core.c | 41 +++++++
+ block/blk-settings.c | 22 ++++
+ block/blk-sysfs.c | 173 ++++++++++++++++++++++++++++++-
+ block/blk.h | 3 +
+ drivers/ata/libata-scsi.c | 35 ++++++
+ drivers/ide/ide-disk.c | 145 ++++++++++++++++++++++++++
+ drivers/ide/ide-io.c | 14 ++
+ drivers/scsi/scsi_lib.c | 174 +++++++++++++++++++++++++++++++
+ include/linux/ata.h | 12 ++
+ include/linux/blkdev.h | 14 ++
+ include/linux/ide.h | 1
+ 12 files changed, 706 insertions(+), 7 deletions(-)
+ create mode 100644 Documentation/block/disk-protection.txt
+
+diff --git a/Documentation/block/disk-protection.txt b/Documentation/block/disk-protection.txt
+new file mode 100644
+index 0000000..508cc5b
+--- /dev/null
++++ b/Documentation/block/disk-protection.txt
+@@ -0,0 +1,79 @@
++Hard disk protection
++====================
++
++
++Intro
++-----
++ATA/ATAPI-7 specifies the IDLE IMMEDIATE command with UNLOAD FEATURE.
++Issuing this command should cause the drive to switch to idle mode and
++unload disk heads. This feature is being used in modern laptops in
++conjunction with accelerometers and appropriate software to implement
++a shock protection facility. The idea is to stop all I/O operations on
++the internal hard drive and park its heads on the ramp when critical
++situations are anticipated. The desire to have such a feature
++available on GNU/Linux systems has been the original motivation to
++implement a generic disk parking interface in the Linux kernel.
++
++
++The interface
++-------------
++The interface works as follows: Writing an integer value to
++/sys/block/*/queue/protect will park the respective drive and freeze
++the block layer queue for the specified number of seconds. When the
++timeout expires and no further disk park request has been issued in
++the meantime, the queue is unfrozen and accumulated I/O operations are
++performed.
++
++IMPORTANT NOTE:
++Not all ATA drives implement IDLE IMMEDIATE with UNLOAD FEATURE and
++quite a few of those that do so, don't report this capability as
++described in the specs. When a disk park has been requested through
++sysfs as described above, the kernel will try to determine if the
++drive supports the UNLOAD FEATURE by default. The kernel will only
++rely on the IDLE IMMEDIATE with UNLOAD FEATURE command if it is
++convinced that this command is actually supported by the disk drive;
++otherwise, it will fall back to STANDBY IMMEDIATE. Resuming from the
++latter will take much longer and it is generally more likely to have a
++negative impact on the drive's lifetime due to the inclease of spin
++down and up cycles. If you want to use this interface in a shock
++protection framework and you know that your drive does indeed support
++the IDLE IMMEDIATE with UNLOAD FEATURE command despite not saying so,
++you can force the kernel to issue that command by doing the following
++on the command line:
++# echo -n unload > /sys/block/sda/queue/protect_method
++(replace sda by the drive identifier as appropriate).
++
++/sys/block/*/queue/protect_method accepts auto, unload and standby
++respectively. Reading from protect_method shows the available options
++surrounding the active one with brackets. When auto is active, this
++will change to whatever the kernel sees fit after the next disk park
++command has been issued.
++
++
++References
++----------
++
++There are several laptops from different brands featuring shock
++protection capabilities. As manufacturers have refused to support open
++source development of the required software components so far, Linux
++support for shock protection varies considerably between different
++hardware implementations. Ideally, this section should contain a list
++of poiters at different projects aiming at an implementation of shock
++protection on different systeems. Unfortunately, I only know of a
++single project which, although still considered experimental, is fit
++for use. Please feel free to add projects that have been the victims
++of my ignorance.
++
++- http://www.thinkwiki.org/wiki/HDAPS
++ See this page for information about Linux support of the hard disk
++ active protection syystem as implemented in IBM/Lenovo Thinkpads.
++
++
++CREDITS
++-------
++
++The patch to implement the interface described in this file has
++originally been published by Jon Escombe <lists@...>.
++
++
++05 Dec 2006, Elias Oltmanns <eo@...>
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 1905aab..0305c21 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -305,6 +305,46 @@ void blk_unplug(struct request_queue *q)
+ }
+ EXPORT_SYMBOL(blk_unplug);
+
++/*
++ * Issue lower level unprotect function if no timers are pending.
++ */
++void blk_unfreeze_work(struct work_struct *work)
++{
++ struct request_queue *q = container_of(work, struct request_queue, unfreeze_work);
++ int pending;
++ unsigned long flags;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ pending = timer_pending(&q->unfreeze_timer);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ if (!pending)
++ q->issue_unprotect_fn(q);
++}
++
++/*
++ * Called when the queue freeze timeout expires...
++ */
++void blk_unfreeze_timeout(unsigned long data)
++{
++ struct request_queue *q = (struct request_queue *) data;
++
++ kblockd_schedule_work(&q->unfreeze_work);
++}
++
++/*
++ * The lower level driver parks and freezes the queue, and this block layer
++ * function sets up the freeze timeout timer on return. If the queue is
++ * already frozen then this is called to extend the timer...
++ */
++void blk_freeze_queue(struct request_queue *q, int seconds)
++{
++ /* Don't accept arbitrarily long freezes */
++ if (seconds >= q->max_unfreeze)
++ seconds = q->max_unfreeze;
++ /* set/reset the timer */
++ mod_timer(&q->unfreeze_timer, msecs_to_jiffies(seconds*1000) + jiffies);
++}
++
+ /**
+ * blk_start_queue - restart a previously stopped queue
+ * @q: The &struct request_queue in question
+@@ -478,6 +518,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ }
+
+ init_timer(&q->unplug_timer);
++ init_timer(&q->unfreeze_timer);
+
+ kobject_init(&q->kobj, &blk_queue_ktype);
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 8dd8641..2da72f3 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -111,6 +111,16 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+ q->unplug_timer.function = blk_unplug_timeout;
+ q->unplug_timer.data = (unsigned long)q;
+
++ q->max_unfreeze = 30;
++
++ INIT_WORK(&q->unfreeze_work, blk_unfreeze_work);
++
++ q->unfreeze_timer.function = blk_unfreeze_timeout;
++ q->unfreeze_timer.data = (unsigned long)q;
++
++ /* Set protect_method to auto detection initially */
++ q->protect_method = 2;
++
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+ */
+@@ -118,6 +128,18 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+ }
+ EXPORT_SYMBOL(blk_queue_make_request);
+
++void blk_queue_issue_protect_fn(struct request_queue *q, issue_protect_fn *ipf)
++{
++ q->issue_protect_fn = ipf;
++}
++EXPORT_SYMBOL(blk_queue_issue_protect_fn);
++
++void blk_queue_issue_unprotect_fn(struct request_queue *q, issue_unprotect_fn *iuf)
++{
++ q->issue_unprotect_fn = iuf;
++}
++EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
++
+ /**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q: the request queue for the device
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 304ec73..2fdca01 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -298,6 +298,160 @@ struct kobj_type blk_queue_ktype = {
+ .release = blk_release_queue,
+ };
+
++/*
++ * When reading the 'protect' attribute, we return seconds remaining
++ * before unfreeze timeout expires
++ */
++static ssize_t queue_protect_show(struct request_queue *q, char *page)
++{
++ unsigned int seconds = 0;
++
++ spin_lock_irq(q->queue_lock);
++ if (blk_queue_stopped(q) && timer_pending(&q->unfreeze_timer))
++ /*
++ * Adding 1 in order to guarantee nonzero value until timer
++ * has actually expired.
++ */
++ seconds = jiffies_to_msecs(q->unfreeze_timer.expires
++ - jiffies) / 1000 + 1;
++ spin_unlock_irq(q->queue_lock);
++ return queue_var_show(seconds, (page));
++}
++
++/*
++ * When writing the 'protect' attribute, input is the number of seconds
++ * to freeze the queue for. We call a lower level helper function to
++ * park the heads and freeze/block the queue, then we make a block layer
++ * call to setup the thaw timeout. If input is 0, then we thaw the queue.
++ */
++static ssize_t queue_protect_store(struct request_queue *q,
++ const char *page, size_t count)
++{
++ unsigned long freeze = 0;
++
++ queue_var_store(&freeze, page, count);
++
++ if (freeze>0) {
++ /* Park and freeze */
++ if (!blk_queue_stopped(q))
++ q->issue_protect_fn(q);
++ /* set / reset the thaw timer */
++ spin_lock_irq(q->queue_lock);
++ blk_freeze_queue(q, freeze);
++ spin_unlock_irq(q->queue_lock);
++ } else {
++ spin_lock_irq(q->queue_lock);
++ freeze = del_timer(&q->unfreeze_timer);
++ spin_unlock_irq(q->queue_lock);
++ if (freeze)
++ q->issue_unprotect_fn(q);
++ }
++
++ return count;
++}
++
++static ssize_t
++queue_str_show(char *page, char *str, int status)
++{
++ ssize_t len;
++
++ if (status & 1)
++ len = sprintf(page, "[%s]", str);
++ else
++ len = sprintf(page, "%s", str);
++ if (status & 2)
++ len += sprintf(page+len, "\n");
++ else
++ len += sprintf(page+len, " ");
++ return len;
++}
++
++/*
++ * Returns current protect_method.
++ */
++static ssize_t queue_protect_method_show(struct request_queue *q, char *page)
++{
++ int len = 0;
++ int unload = q->protect_method;
++
++ len += queue_str_show(page+len, "auto", (unload & 2) >> 1);
++ len += queue_str_show(page+len, "unload", unload & 1);
++ len += queue_str_show(page+len, "standby", !unload ? 3 : 2);
++ return len;
++}
++
++/*
++ * Stores the device protect method.
++ */
++static ssize_t queue_protect_method_store(struct request_queue *q,
++ const char *page, size_t count)
++{
++ spin_lock_irq(q->queue_lock);
++ if (!strcmp(page, "auto") || !strcmp(page, "auto\n"))
++ q->protect_method = 2;
++ else if (!strcmp(page, "unload") || !strcmp(page, "unload\n"))
++ q->protect_method = 1;
++ else if (!strcmp(page, "standby") || !strcmp(page, "standby\n"))
++ q->protect_method = 0;
++ else {
++ spin_unlock_irq(q->queue_lock);
++ return -EINVAL;
++ }
++ spin_unlock_irq(q->queue_lock);
++ return count;
++}
++
++static struct queue_sysfs_entry queue_protect_entry = {
++ .attr = { .name = "protect", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_protect_show,
++ .store = queue_protect_store,
++};
++static struct queue_sysfs_entry queue_protect_method_entry = {
++ .attr = { .name = "protect_method", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_protect_method_show,
++ .store = queue_protect_method_store,
++};
++
++static int blk_protect_register(struct request_queue *q)
++{
++ int error = 0;
++
++ /* check that the lower level driver has a protect handler */
++ if (!q->issue_protect_fn)
++ return 0;
++
++ /* create the attributes */
++ error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
++ if (error) {
++ printk(KERN_ERR
++ "blk_protect_register(): failed to create protect queue attribute!\n");
++ return error;
++ }
++
++ error = sysfs_create_file(&q->kobj, &queue_protect_method_entry.attr);
++ if (error) {
++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr);
++ printk(KERN_ERR
++ "blk_protect_register(): failed to create protect_method attribute!\n");
++ return error;
++ }
++ kobject_get(&q->kobj);
++
++ return 0;
++}
++
++static void blk_protect_unregister(struct request_queue *q)
++{
++ /* check that the lower level driver has a protect handler */
++ if (!q->issue_protect_fn)
++ return;
++
++ /* remove the attributes */
++ sysfs_remove_file(&q->kobj, &queue_protect_method_entry.attr);
++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr);
++ kobject_put(&q->kobj);
++}
++
+ int blk_register_queue(struct gendisk *disk)
+ {
+ int ret;
+@@ -318,23 +472,28 @@ int blk_register_queue(struct gendisk *disk)
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+
+ ret = elv_register_queue(q);
++ if (ret)
++ goto err;
++ ret = blk_protect_register(q);
+ if (ret) {
+- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+- kobject_del(&q->kobj);
+- return ret;
++ elv_unregister_queue(q);
++ goto err;
+ }
+
+ return 0;
++
++err:
++ kobject_uevent(&q->kobj, KOBJ_REMOVE);
++ kobject_del(&q->kobj);
++ return ret;
+ }
+
+ void blk_unregister_queue(struct gendisk *disk)
+ {
+ struct request_queue *q = disk->queue;
+
+- if (WARN_ON(!q))
+- return;
+-
+- if (q->request_fn) {
++ if (q && q->request_fn) {
++ blk_protect_unregister(q);
+ elv_unregister_queue(q);
+
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+diff --git a/block/blk.h b/block/blk.h
+index 59776ab..5c89160 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -17,6 +17,9 @@ void __blk_queue_free_tags(struct request_queue *q);
+
+ void blk_unplug_work(struct work_struct *work);
+ void blk_unplug_timeout(unsigned long data);
++void blk_unfreeze_work(struct work_struct *work);
++void blk_unfreeze_timeout(unsigned long data);
++void blk_freeze_queue(struct request_queue *q, int seconds);
+
+ struct io_context *current_io_context(gfp_t gfp_flags, int node);
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 57a4364..8f9f118 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -917,6 +917,37 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
+ return 0;
+ }
+
++extern int scsi_protect_queue(struct request_queue *q, int unload);
++extern int scsi_unprotect_queue(struct request_queue *q);
++
++static int ata_scsi_issue_protect_fn(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++ struct ata_port *ap = ata_shost_to_port(sdev->host);
++ struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
++ int unload = q->protect_method;
++
++ if (!dev) {
++ printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): Couldn't find ATA device to be parked.\n");
++ return -ENXIO;
++ }
++
++ if (unload == 2) {
++ unload = ata_id_has_unload(dev->id) ? 1 : 0;
++ q->protect_method = unload;
++ }
++
++ /* call scsi_protect_queue, requesting either unload or standby */
++ return scsi_protect_queue(q, unload);
++}
++
++static int ata_scsi_issue_unprotect_fn(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++
++ return scsi_unprotect_queue(q);
++}
++
+ /**
+ * ata_scsi_slave_config - Set SCSI device attributes
+ * @sdev: SCSI device to examine
+@@ -939,6 +970,10 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
+
+ if (dev)
+ rc = ata_scsi_dev_config(sdev, dev);
++ blk_queue_issue_protect_fn(sdev->request_queue,
++ ata_scsi_issue_protect_fn);
++ blk_queue_issue_unprotect_fn(sdev->request_queue,
++ ata_scsi_issue_unprotect_fn);
+
+ return rc;
+ }
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index 8e08d08..5721f43 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -612,6 +612,148 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+ }
+
+ /*
++ * todo:
++ * - we freeze the queue regardless of success and rely on the
++ * ide_protect_queue function to thaw immediately if the command
++ * failed (to be consistent with the libata handler)... should
++ * we also inspect here?
++ */
++void ide_end_protect_rq(struct request *rq, int error)
++{
++ struct completion *waiting = rq->end_io_data;
++
++ rq->end_io_data = NULL;
++ /* spin lock already accquired */
++ if (!blk_queue_stopped(rq->q))
++ blk_stop_queue(rq->q);
++
++ complete(waiting);
++}
++
++int ide_unprotect_queue(struct request_queue *q)
++{
++ struct request rq;
++ unsigned long flags;
++ int pending, rc = 0;
++ ide_drive_t *drive = q->queuedata;
++ ide_task_t task;
++
++ if (!blk_queue_stopped(q))
++ return -EIO;
++
++ /* Are there any pending jobs on the queue? */
++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ blk_start_queue(q);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ /* The unload feature of the IDLE_IMMEDIATE command
++ temporarily disables HD power management from spinning down
++ the disk. Any other command will reenable HD pm, so, if
++ there are no pending jobs on the queue, another
++ CHECK_POWER_MODE1 command without the unload feature should do
++ just fine. */
++ if (!pending) {
++ printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
++ memset(&task, 0, sizeof(task));
++ task.tf.command = WIN_CHECKPOWERMODE1; /* CHECK_POWER_MODE1 */
++ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++ task.data_phase = TASKFILE_NO_DATA;
++ ide_init_drive_cmd(&rq);
++ rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++ rq.special = &task;
++ rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
++ }
++
++ return rc;
++}
++
++int ide_protect_queue(struct request_queue *q, int unload)
++{
++ ide_drive_t *drive = q->queuedata;
++ struct request rq;
++ ide_task_t task;
++ int ret = 0;
++ DECLARE_COMPLETION(wait);
++
++ memset(&rq, 0, sizeof(rq));
++ memset(&task, 0, sizeof(task));
++
++ if (blk_queue_stopped(q))
++ return -EIO;
++
++ task.data_phase = TASKFILE_NO_DATA;
++ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++ if (unload) {
++ task.tf.command = 0xe1;
++ task.tf.feature = 0x44;
++ task.tf.lbal = 0x4c;
++ task.tf.lbam = 0x4e;
++ task.tf.lbah = 0x55;
++ } else
++ task.tf.command = 0xe0;
++
++ /* Issue the park command & freeze */
++ ide_init_drive_cmd(&rq);
++
++ rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++ rq.special = &task;
++ rq.end_io_data = &wait;
++ rq.end_io = ide_end_protect_rq;
++
++ ret = ide_do_drive_cmd(drive, &rq, ide_next);
++ wait_for_completion(&wait);
++
++ if (ret) {
++ printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
++ ide_unprotect_queue(q);
++ return ret;
++ }
++
++ if (unload) {
++ if (task.tf.lbal == 0xc4)
++ printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
++ else {
++ /* error parking the head */
++ printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
++ ret = -EIO;
++ ide_unprotect_queue(q);
++ }
++ } else
++ printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
++
++ return ret;
++}
++
++int idedisk_issue_protect_fn(struct request_queue *q)
++{
++ ide_drive_t *drive = q->queuedata;
++ int unload = q->protect_method;
++ unsigned long flags;
++
++ /*
++ * Check capability of the device -
++ * - if "idle immediate with unload" is supported we use that, else
++ * we use "standby immediate" and live with spinning down the drive..
++ * (Word 84, bit 13 of IDENTIFY DEVICE data)
++ */
++ if (unload == 2) {
++ unload = drive->id->cfsse & (1 << 13) ? 1 : 0;
++ spin_lock_irqsave(q->queue_lock, flags);
++ q->protect_method = unload;
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ }
++
++ return ide_protect_queue(q, unload);
++}
++
++int idedisk_issue_unprotect_fn(struct request_queue *q)
++{
++ return ide_unprotect_queue(q);
++}
++
++/*
+ * This is tightly woven into the driver->do_special can not touch.
+ * DON'T do it again until a total personality rewrite is committed.
+ */
+@@ -891,6 +1033,9 @@ static void idedisk_setup(ide_drive_t *drive)
+ drive->wcache = 1;
+
+ write_cache(drive, 1);
++
++ blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);
++ blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);
+ }
+
+ static void ide_cacheflush_p(ide_drive_t *drive)
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index 6965253..b40a681 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -1115,6 +1115,17 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
+ }
+
+ /*
++ * Don't accept a request when the queue is stopped (unless we
++ * are resuming from suspend). Prevents existing queue entries
++ * being processed after queue is stopped by the hard disk
++ * protection mechanism...
++ */
++ if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
++ hwgroup->busy = 0;
++ break;
++ }
++
++ /*
+ * Sanity: don't accept a request that isn't a PM request
+ * if we are currently power managed. This is very important as
+ * blk_stop_queue() doesn't prevent the elv_next_request()
+@@ -1607,6 +1618,9 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
+ where = ELEVATOR_INSERT_FRONT;
+ rq->cmd_flags |= REQ_PREEMPT;
+ }
++ if (action == ide_next)
++ where = ELEVATOR_INSERT_FRONT;
++
+ __elv_add_request(drive->queue, rq, where, 0);
+ ide_do_request(hwgroup, IDE_NO_IRQ);
+ spin_unlock_irqrestore(&ide_lock, flags);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index cbf55d5..8b260f5 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2261,7 +2261,13 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
+ int
+ scsi_device_quiesce(struct scsi_device *sdev)
+ {
++ int i;
+ int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
++ for (i = 0; err && (sdev->sdev_state == SDEV_BLOCK) && (i < 100);
++ i++) {
++ msleep_interruptible(200);
++ err = scsi_device_set_state(sdev, SDEV_QUIESCE);
++ }
+ if (err)
+ return err;
+
+@@ -2509,3 +2515,171 @@ void scsi_kunmap_atomic_sg(void *virt)
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ }
+ EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
++
++/*
++ * Structure required for synchronous io completion after queue freezing
++ */
++struct scsi_protect_io_context_sync {
++ struct scsi_device *sdev;
++ int result;
++ char *sense;
++ struct completion *waiting;
++};
++
++/*
++ * scsi_protect_wait_done()
++ * Command completion handler for scsi_protect_queue().
++ *
++ * Unable to call scsi_internal_device_block() as
++ * scsi_end_request() already has the spinlock. So,
++ * we put the necessary functionality inline.
++ *
++ * todo:
++ * - we block the queue regardless of success and rely on the
++ * scsi_protect_queue function to unblock if the command
++ * failed... should we also inspect here?
++ */
++static void scsi_protect_wait_done(void *data, char *sense, int result, int resid)
++{
++ struct scsi_protect_io_context_sync *siocs = data;
++ struct completion *waiting = siocs->waiting;
++ struct request_queue *q = siocs->sdev->request_queue;
++
++ siocs->waiting = NULL;
++ siocs->result = result;
++ memcpy(siocs->sense, sense, SCSI_SENSE_BUFFERSIZE);
++
++ if (!scsi_device_set_state(siocs->sdev, SDEV_BLOCK)) {
++ siocs->sdev->max_device_blocked = 2;
++ blk_stop_queue(q);
++ }
++
++ complete(waiting);
++}
++
++/*
++ * scsi_unprotect_queue()
++ * - release the queue that was previously blocked
++ */
++int scsi_unprotect_queue(struct request_queue *q)
++{
++ struct scsi_device *sdev = q->queuedata;
++ int rc = 0, pending = 0;
++ u8 scsi_cmd[MAX_COMMAND_SIZE];
++ struct scsi_sense_hdr sshdr;
++
++ if (sdev->sdev_state != SDEV_BLOCK)
++ return -ENXIO;
++
++ /* Are there any pending jobs on the queue? */
++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++ rc = scsi_internal_device_unblock(sdev);
++ if (rc)
++ return rc;
++ sdev->max_device_blocked = 1;
++
++ if (!pending) {
++ printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
++
++ memset(scsi_cmd, 0, sizeof(scsi_cmd));
++ scsi_cmd[0] = ATA_16;
++ scsi_cmd[1] = (3 << 1); /* Non-data */
++ /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
++ scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
++
++ /* Good values for timeout and retries? Values below
++ from scsi_ioctl_send_command() for default case... */
++ if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
++ (10*HZ), 5))
++ rc = -EIO;
++ }
++ return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
++
++/*
++ * scsi_protect_queue()
++ * - build and issue the park/standby command..
++ * - queue is blocked during command completion handler
++ */
++int scsi_protect_queue(struct request_queue *q, int unload)
++{
++ struct scsi_protect_io_context_sync siocs;
++ struct scsi_device *sdev = q->queuedata;
++ int rc = 0;
++ u8 args[7];
++ u8 scsi_cmd[MAX_COMMAND_SIZE];
++ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
++ unsigned char *desc;
++ DECLARE_COMPLETION_ONSTACK(wait);
++
++ if (sdev->sdev_state != SDEV_RUNNING)
++ return -ENXIO;
++
++ memset(args, 0, sizeof(args));
++ memset(sense, 0, sizeof(sense));
++
++ if (unload) {
++ args[0] = 0xe1;
++ args[1] = 0x44;
++ args[3] = 0x4c;
++ args[4] = 0x4e;
++ args[5] = 0x55;
++ } else
++ args[0] = 0xe0;
++
++ memset(scsi_cmd, 0, sizeof(scsi_cmd));
++ scsi_cmd[0] = ATA_16;
++ scsi_cmd[1] = (3 << 1); /* Non-data */
++ scsi_cmd[2] = 0x20; /* no off.line, or data xfer, request cc */
++ scsi_cmd[4] = args[1];
++ scsi_cmd[6] = args[2];
++ scsi_cmd[8] = args[3];
++ scsi_cmd[10] = args[4];
++ scsi_cmd[12] = args[5];
++ scsi_cmd[14] = args[0];
++ siocs.sdev = sdev;
++ siocs.sense = sense;
++ siocs.waiting = &wait;
++
++ rc = scsi_execute_async(sdev, scsi_cmd, COMMAND_SIZE(scsi_cmd[0]),
++ DMA_NONE, NULL, 0, 0, (10*HZ), 5,
++ &siocs, &scsi_protect_wait_done, GFP_NOWAIT);
++ if (rc)
++ goto out;
++ wait_for_completion(&wait);
++
++ if (siocs.result != ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
++ scsi_unprotect_queue(q); /* just in case we still managed to block */
++ rc = -EIO;
++ goto out;
++ }
++
++ desc = sense + 8;
++
++ /* Retrieve data from check condition */
++ args[1] = desc[3];
++ args[2] = desc[5];
++ args[3] = desc[7];
++ args[4] = desc[9];
++ args[5] = desc[11];
++ args[0] = desc[13];
++
++ if (unload) {
++ if (args[3] == 0xc4)
++ printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
++ else {
++ /* error parking the head */
++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
++ rc = -EIO;
++ scsi_unprotect_queue(q);
++ }
++ } else
++ printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
++
++out:
++ return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_protect_queue);
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 1c622e2..199a4e3 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -459,6 +459,18 @@ static inline int ata_is_data(u8 prot)
+
+ #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
+
++static inline int ata_id_has_unload(const u16 *id)
++{
++ /* ATA-7 specifies two places to indicate unload feature support.
++ * Since I don't really understand the difference, I'll just check
++ * both and only return zero if none of them indicates otherwise. */
++ if ((id[84] & 0xC000) == 0x4000 && id[84] & (1 << 13))
++ return id[84] & (1 << 13);
++ if ((id[87] & 0xC000) == 0x4000)
++ return id[87] & (1 << 13);
++ return 0;
++}
++
+ static inline bool ata_id_has_hipm(const u16 *id)
+ {
+ u16 val = id[76];
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index d2a1b71..73e69df 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -263,6 +263,8 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_ve
+ typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
+ typedef void (softirq_done_fn)(struct request *);
+ typedef int (dma_drain_needed_fn)(struct request *);
++typedef int (issue_protect_fn) (struct request_queue *);
++typedef int (issue_unprotect_fn) (struct request_queue *);
+
+ enum blk_queue_state {
+ Queue_down,
+@@ -300,6 +302,8 @@ struct request_queue
+ prepare_flush_fn *prepare_flush_fn;
+ softirq_done_fn *softirq_done_fn;
+ dma_drain_needed_fn *dma_drain_needed;
++ issue_protect_fn *issue_protect_fn;
++ issue_unprotect_fn *issue_unprotect_fn;
+
+ /*
+ * Dispatch queue sorting
+@@ -315,6 +319,14 @@ struct request_queue
+ unsigned long unplug_delay; /* After this many jiffies */
+ struct work_struct unplug_work;
+
++ /*
++ * Auto-unfreeze state
++ */
++ struct timer_list unfreeze_timer;
++ int max_unfreeze; /* At most this many seconds */
++ struct work_struct unfreeze_work;
++ int protect_method;
++
+ struct backing_dev_info backing_dev_info;
+
+ /*
+@@ -764,6 +776,8 @@ extern int blk_do_ordered(struct request_queue *, struct request **);
+ extern unsigned blk_ordered_cur_seq(struct request_queue *);
+ extern unsigned blk_ordered_req_seq(struct request *);
+ extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
++extern void blk_queue_issue_protect_fn(struct request_queue *, issue_protect_fn *);
++extern void blk_queue_issue_unprotect_fn(struct request_queue *, issue_unprotect_fn *);
+
+ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+ extern void blk_dump_rq_flags(struct request *, char *);
+diff --git a/include/linux/ide.h b/include/linux/ide.h
+index eddb6da..428746d 100644
+--- a/include/linux/ide.h
++++ b/include/linux/ide.h
+@@ -864,6 +864,7 @@ extern void ide_init_drive_cmd (struct request *rq);
+ */
+ typedef enum {
+ ide_wait, /* insert rq at end of list, and wait for it */
++ ide_next, /* insert rq immediately after current request */
+ ide_preempt, /* insert rq in front of current request */
+ ide_head_wait, /* insert rq in front of current request and wait for it */
+ ide_end /* insert rq at end of list, but don't wait for it */
+
diff --git a/app-laptop/hdapsd/files/hdaps_protect-2.6.27.patch b/app-laptop/hdapsd/files/hdaps_protect-2.6.27.patch
new file mode 100644
index 0000000..973ea7b
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdaps_protect-2.6.27.patch
@@ -0,0 +1,966 @@
+---
+
+ Documentation/laptops/disk-shock-protection.txt | 149 +++++++++++++++++++++++
+ drivers/ata/ahci.c | 1
+ drivers/ata/libata-core.c | 1
+ drivers/ata/libata-eh.c | 126 +++++++++++++++++++
+ drivers/ata/libata-scsi.c | 109 +++++++++++++++++
+ drivers/ide/Makefile | 2
+ drivers/ide/ide-io.c | 27 ++++
+ drivers/ide/ide-iops.c | 28 ++++
+ drivers/ide/ide-park.c | 120 +++++++++++++++++++
+ drivers/ide/ide-probe.c | 5 +
+ drivers/ide/ide-taskfile.c | 34 +++++
+ drivers/ide/ide.c | 1
+ include/linux/ata.h | 9 +
+ include/linux/ide.h | 12 ++
+ include/linux/libata.h | 13 ++
+ 15 files changed, 631 insertions(+), 6 deletions(-)
+ create mode 100644 Documentation/laptops/disk-shock-protection.txt
+ create mode 100644 drivers/ide/ide-park.c
+
+diff --git a/Documentation/laptops/disk-shock-protection.txt b/Documentation/laptops/disk-shock-protection.txt
+new file mode 100644
+index 0000000..0e6ba26
+--- /dev/null
++++ b/Documentation/laptops/disk-shock-protection.txt
+@@ -0,0 +1,149 @@
++Hard disk shock protection
++==========================
++
++Author: Elias Oltmanns <eo-oA28OIkTjSVZXbeN9DUtxg@public.gmane.org>
++Last modified: 2008-10-03
++
++
++0. Contents
++-----------
++
++1. Intro
++2. The interface
++3. References
++4. CREDITS
++
++
++1. Intro
++--------
++
++ATA/ATAPI-7 specifies the IDLE IMMEDIATE command with unload feature.
++Issuing this command should cause the drive to switch to idle mode and
++unload disk heads. This feature is being used in modern laptops in
++conjunction with accelerometers and appropriate software to implement
++a shock protection facility. The idea is to stop all I/O operations on
++the internal hard drive and park its heads on the ramp when critical
++situations are anticipated. The desire to have such a feature
++available on GNU/Linux systems has been the original motivation to
++implement a generic disk head parking interface in the Linux kernel.
++Please note, however, that other components have to be set up on your
++system in order to get disk shock protection working (see
++section 3. References below for pointers to more information about
++that).
++
++
++2. The interface
++----------------
++
++For each ATA device, the kernel exports the file
++block/*/device/unload_heads in sysfs (here assumed to be mounted under
++/sys). Access to /sys/block/*/device/unload_heads is denied with
++-EOPNOTSUPP if the device does not support the unload feature.
++Otherwise, writing an integer value to this file will take the heads
++of the respective drive off the platter and block all I/O operations
++for the specified number of milliseconds. When the timeout expires and
++no further disk head park request has been issued in the meantime,
++normal operation will be resumed. The maximal value accepted for a
++timeout is 30000 milliseconds. Exceeding this limit will return
++-EOVERFLOW, but heads will be parked anyway and the timeout will be
++set to 30 seconds. However, you can always change a timeout to any
++value between 0 and 30000 by issuing a subsequent head park request
++before the timeout of the previous one has expired. In particular, the
++total timeout can exceed 30 seconds and, more importantly, you can
++cancel a previously set timeout and resume normal operation
++immediately by specifying a timeout of 0. Values below -2 are rejected
++with -EINVAL (see below for the special meaning of -1 and -2). If the
++timeout specified for a recent head park request has not yet expired,
++reading from /sys/block/*/device/unload_heads will report the number
++of milliseconds remaining until normal operation will be resumed;
++otherwise, reading the unload_heads attribute will return 0.
++
++For example, do the following in order to park the heads of drive
++/dev/sda and stop all I/O operations for five seconds:
++
++# echo 5000 > /sys/block/sda/device/unload_heads
++
++A simple
++
++# cat /sys/block/sda/device/unload_heads
++
++will show you how many milliseconds are left before normal operation
++will be resumed.
++
++A word of caution: The fact that the interface operates on a basis of
++milliseconds may raise expectations that cannot be satisfied in
++reality. In fact, the ATA specs clearly state that the time for an
++unload operation to complete is vendor specific. The hint in ATA-7
++that this will typically be within 500 milliseconds apparently has
++been dropped in ATA-8.
++
++There is a technical detail of this implementation that may cause some
++confusion and should be discussed here. When a head park request has
++been issued to a device successfully, all I/O operations on the
++controller port this device is attached to will be deferred. That is
++to say, any other device that may be connected to the same port will
++be affected too. The only exception is that a subsequent head unload
++request to that other device will be executed immediately. Further
++operations on that port will be deferred until the timeout specified
++for either device on the port has expired. As far as PATA (old style
++IDE) configurations are concerned, there can only be two devices
++attached to any single port. In SATA world we have port multipliers
++which means that a user-issued head parking request to one device may
++actually result in stopping I/O to a whole bunch of devices. However,
++since this feature is supposed to be used on laptops and does not seem
++to be very useful in any other environment, there will be mostly one
++device per port. Even if the CD/DVD writer happens to be connected to
++the same port as the hard drive, it generally *should* recover just
++fine from the occasional buffer under-run incurred by a head park
++request to the HD. Actually, when you are using an ide driver rather
++than its libata counterpart (i.e. your disk is called /dev/hda
++instead of /dev/sda), then parking the heads of one drive (drive X)
++will generally not affect the mode of operation of another drive
++(drive Y) on the same port as described above. It is only when a port
++reset is required to recover from an exception on drive Y that further
++I/O operations on that drive (and the reset itself) will be delayed
++until drive X is no longer in the parked state.
++
++Finally, there are some hard drives that only comply with an earlier
++version of the ATA standard than ATA-7, but do support the unload
++feature nonetheless. Unfortunately, there is no safe way Linux can
++detect these devices, so you won't be able to write to the
++unload_heads attribute. If you know that your device really does
++support the unload feature (for instance, because the vendor of your
++laptop or the hard drive itself told you so), then you can tell the
++kernel to enable the usage of this feature for that drive by writing
++the special value -1 to the unload_heads attribute:
++
++# echo -1 > /sys/block/sda/device/unload_heads
++
++will enable the feature for /dev/sda, and giving -2 instead of -1 will
++disable it again.
++
++
++3. References
++-------------
++
++There are several laptops from different vendors featuring shock
++protection capabilities. As manufacturers have refused to support open
++source development of the required software components so far, Linux
++support for shock protection varies considerably between different
++hardware implementations. Ideally, this section should contain a list
++of pointers at different projects aiming at an implementation of shock
++protection on different systems. Unfortunately, I only know of a
++single project which, although still considered experimental, is fit
++for use. Please feel free to add projects that have been the victims
++of my ignorance.
++
++- http://www.thinkwiki.org/wiki/HDAPS
++ See this page for information about Linux support of the hard disk
++ active protection system as implemented in IBM/Lenovo Thinkpads.
++
++
++4. CREDITS
++----------
++
++This implementation of disk head parking has been inspired by a patch
++originally published by Jon Escombe <lists-Xbpc2PeERmvQXOPxS62xeg@public.gmane.org>. My efforts
++to develop an implementation of this feature that is fit to be merged
++into mainline have been aided by various kernel developers, in
++particular by Tejun Heo and Bartlomiej Zolnierkiewicz.
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 2e1a7cb..fd813fa 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
+
+ static struct device_attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity,
++ &dev_attr_unload_heads,
+ NULL
+ };
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 79e3a8e..b8102d7 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5264,6 +5264,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
+ INIT_LIST_HEAD(&ap->eh_done_q);
+ init_waitqueue_head(&ap->eh_wait_q);
++ init_completion(&ap->park_req_pending);
+ init_timer_deferrable(&ap->fastdrain_timer);
+ ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
+ ap->fastdrain_timer.data = (unsigned long)ap;
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index c1db2f2..2efe3ae 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2446,6 +2446,80 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ goto retry;
+ }
+
++static inline void ata_eh_pull_park_action(struct ata_port *ap)
++{
++ struct ata_link *link;
++ struct ata_device *dev;
++ unsigned long flags;
++
++ /*
++ * This function can be thought of as an extended version of
++ * ata_eh_about_to_do() specially crafted to accommodate the
++ * requirements of ATA_EH_PARK handling. Since the EH thread
++ * does not leave the do {} while () loop in ata_eh_recover as
++ * long as the timeout for a park request to *one* device on
++ * the port has not expired, and since we still want to pick
++ * up park requests to other devices on the same port or
++ * timeout updates for the same device, we have to pull
++ * ATA_EH_PARK actions from eh_info into eh_context.i
++ * ourselves at the beginning of each pass over the loop.
++ *
++ * Additionally, all write accesses to &ap->park_req_pending
++ * through INIT_COMPLETION() (see below) or complete_all()
++ * (see ata_scsi_park_store()) are protected by the host lock.
++ * As a result we have that park_req_pending.done is zero on
++ * exit from this function, i.e. when ATA_EH_PARK actions for
++ * *all* devices on port ap have been pulled into the
++ * respective eh_context structs. If, and only if,
++ * park_req_pending.done is non-zero by the time we reach
++ * wait_for_completion_timeout(), another ATA_EH_PARK action
++ * has been scheduled for at least one of the devices on port
++ * ap and we have to cycle over the do {} while () loop in
++ * ata_eh_recover() again.
++ */
++
++ spin_lock_irqsave(ap->lock, flags);
++ INIT_COMPLETION(ap->park_req_pending);
++ ata_port_for_each_link(link, ap) {
++ ata_link_for_each_dev(dev, link) {
++ struct ata_eh_info *ehi = &link->eh_info;
++
++ link->eh_context.i.dev_action[dev->devno] |=
++ ehi->dev_action[dev->devno] & ATA_EH_PARK;
++ ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
++ }
++ }
++ spin_unlock_irqrestore(ap->lock, flags);
++}
++
++static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
++{
++ struct ata_eh_context *ehc = &dev->link->eh_context;
++ struct ata_taskfile tf;
++ unsigned int err_mask;
++
++ ata_tf_init(dev, &tf);
++ if (park) {
++ ehc->unloaded_mask |= 1 << dev->devno;
++ tf.command = ATA_CMD_IDLEIMMEDIATE;
++ tf.feature = 0x44;
++ tf.lbal = 0x4c;
++ tf.lbam = 0x4e;
++ tf.lbah = 0x55;
++ } else {
++ ehc->unloaded_mask &= ~(1 << dev->devno);
++ tf.command = ATA_CMD_CHK_POWER;
++ }
++
++ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
++ tf.protocol |= ATA_PROT_NODATA;
++ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++ if (park && (err_mask || tf.lbal != 0xc4)) {
++ ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
++ ehc->unloaded_mask &= ~(1 << dev->devno);
++ }
++}
++
+ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ struct ata_device **r_failed_dev)
+ {
+@@ -2755,7 +2829,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+ struct ata_device *dev;
+ int nr_failed_devs;
+ int rc;
+- unsigned long flags;
++ unsigned long flags, deadline;
+
+ DPRINTK("ENTER\n");
+
+@@ -2829,6 +2903,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+ }
+ }
+
++ do {
++ unsigned long now;
++
++ /*
++ * clears ATA_EH_PARK in eh_info and resets
++ * ap->park_req_pending
++ */
++ ata_eh_pull_park_action(ap);
++
++ deadline = jiffies;
++ ata_port_for_each_link(link, ap) {
++ ata_link_for_each_dev(dev, link) {
++ struct ata_eh_context *ehc = &link->eh_context;
++ unsigned long tmp;
++
++ if (dev->class != ATA_DEV_ATA)
++ continue;
++ if (!(ehc->i.dev_action[dev->devno] &
++ ATA_EH_PARK))
++ continue;
++ tmp = dev->unpark_deadline;
++ if (time_before(deadline, tmp))
++ deadline = tmp;
++ else if (time_before_eq(tmp, jiffies))
++ continue;
++ if (ehc->unloaded_mask & (1 << dev->devno))
++ continue;
++
++ ata_eh_park_issue_cmd(dev, 1);
++ }
++ }
++
++ now = jiffies;
++ if (time_before_eq(deadline, now))
++ break;
++
++ deadline = wait_for_completion_timeout(&ap->park_req_pending,
++ deadline - now);
++ } while (deadline);
++ ata_port_for_each_link(link, ap) {
++ ata_link_for_each_dev(dev, link) {
++ if (!(link->eh_context.unloaded_mask &
++ (1 << dev->devno)))
++ continue;
++
++ ata_eh_park_issue_cmd(dev, 0);
++ ata_eh_done(link, dev, ATA_EH_PARK);
++ }
++ }
++
+ /* the rest */
+ ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index b9d3ba4..e1f916f 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -183,6 +183,106 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+ ata_scsi_lpm_show, ata_scsi_lpm_put);
+ EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
+
++static ssize_t ata_scsi_park_show(struct device *device,
++ struct device_attribute *attr, char *buf)
++{
++ struct scsi_device *sdev = to_scsi_device(device);
++ struct ata_port *ap;
++ struct ata_link *link;
++ struct ata_device *dev;
++ unsigned long flags, now;
++ unsigned int uninitialized_var(msecs);
++ int rc = 0;
++
++ ap = ata_shost_to_port(sdev->host);
++
++ spin_lock_irqsave(ap->lock, flags);
++ dev = ata_scsi_find_dev(ap, sdev);
++ if (!dev) {
++ rc = -ENODEV;
++ goto unlock;
++ }
++ if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
++ rc = -EOPNOTSUPP;
++ goto unlock;
++ }
++
++ link = dev->link;
++ now = jiffies;
++ if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
++ link->eh_context.unloaded_mask & (1 << dev->devno) &&
++ time_after(dev->unpark_deadline, now))
++ msecs = jiffies_to_msecs(dev->unpark_deadline - now);
++ else
++ msecs = 0;
++
++unlock:
++ spin_unlock_irq(ap->lock);
++
++ return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
++}
++
++static ssize_t ata_scsi_park_store(struct device *device,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct scsi_device *sdev = to_scsi_device(device);
++ struct ata_port *ap;
++ struct ata_device *dev;
++ long int input;
++ unsigned long flags;
++ int rc;
++
++ rc = strict_strtol(buf, 10, &input);
++ if (rc || input < -2)
++ return -EINVAL;
++ if (input > ATA_TMOUT_MAX_PARK) {
++ rc = -EOVERFLOW;
++ input = ATA_TMOUT_MAX_PARK;
++ }
++
++ ap = ata_shost_to_port(sdev->host);
++
++ spin_lock_irqsave(ap->lock, flags);
++ dev = ata_scsi_find_dev(ap, sdev);
++ if (unlikely(!dev)) {
++ rc = -ENODEV;
++ goto unlock;
++ }
++ if (dev->class != ATA_DEV_ATA) {
++ rc = -EOPNOTSUPP;
++ goto unlock;
++ }
++
++ if (input >= 0) {
++ if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
++ rc = -EOPNOTSUPP;
++ goto unlock;
++ }
++
++ dev->unpark_deadline = ata_deadline(jiffies, input);
++ dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
++ ata_port_schedule_eh(ap);
++ complete(&ap->park_req_pending);
++ } else {
++ switch (input) {
++ case -1:
++ dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
++ break;
++ case -2:
++ dev->flags |= ATA_DFLAG_NO_UNLOAD;
++ break;
++ }
++ }
++unlock:
++ spin_unlock_irqrestore(ap->lock, flags);
++
++ return rc ? rc : len;
++}
++DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
++ ata_scsi_park_show, ata_scsi_park_store);
++EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
++
+ static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+ {
+ cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+@@ -269,6 +369,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
+ ata_scsi_activity_store);
+ EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
+
++struct device_attribute *ata_common_sdev_attrs[] = {
++ &dev_attr_unload_heads,
++ NULL
++};
++EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
++
+ static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+ {
+@@ -954,6 +1060,9 @@ static int atapi_drain_needed(struct request *rq)
+ static int ata_scsi_dev_config(struct scsi_device *sdev,
+ struct ata_device *dev)
+ {
++ if (!ata_id_has_unload(dev->id))
++ dev->flags |= ATA_DFLAG_NO_UNLOAD;
++
+ /* configure max sectors */
+ blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
+
+diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
+index 64e0ecd..564bf9d 100644
+--- a/drivers/ide/Makefile
++++ b/drivers/ide/Makefile
+@@ -5,7 +5,7 @@
+ EXTRA_CFLAGS += -Idrivers/ide
+
+ ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o \
+- ide-pio-blacklist.o
++ ide-park.o ide-pio-blacklist.o
+
+ # core IDE code
+ ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index a896a28..c83e9f0 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -718,7 +718,31 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+
+ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
+ {
+- switch (rq->cmd[0]) {
++ u8 cmd = rq->cmd[0];
++
++ if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
++ ide_task_t task;
++ struct ide_taskfile *tf = &task.tf;
++
++ memset(&task, 0, sizeof(task));
++ if (cmd == REQ_PARK_HEADS) {
++ drive->sleep = *(unsigned long *)rq->special;
++ drive->sleeping = 1;
++ tf->command = WIN_IDLEIMMEDIATE;
++ tf->feature = 0x44;
++ tf->lbal = 0x4c;
++ tf->lbam = 0x4e;
++ tf->lbah = 0x55;
++ } else /* cmd == REQ_UNPARK_HEADS */
++ tf->command = WIN_CHECKPOWERMODE1;
++
++ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
++ IDE_TFLAG_CUSTOM_HANDLER;
++ drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
++ return do_rw_taskfile(drive, &task);
++ }
++
++ switch (cmd) {
+ case REQ_DRIVE_RESET:
+ return ide_do_reset(drive);
+ default:
+@@ -1047,6 +1071,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
+ hwgroup->hwif = hwif;
+ hwgroup->drive = drive;
+ drive->sleeping = 0;
++ drive->parked = 0;
+ drive->service_start = jiffies;
+
+ if (blk_queue_plugged(drive->queue)) {
+diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
+index 2cbadff..446ec28 100644
+--- a/drivers/ide/ide-iops.c
++++ b/drivers/ide/ide-iops.c
+@@ -1108,6 +1108,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
+ drive->special.b.set_geometry = legacy;
+ drive->special.b.recalibrate = legacy;
+ drive->mult_count = 0;
++ drive->parked = 0;
+ if (!drive->keep_settings && !drive->using_dma)
+ drive->mult_req = 0;
+ if (drive->mult_req != drive->mult_count)
+@@ -1164,12 +1165,13 @@ static void pre_reset(ide_drive_t *drive)
+ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+ {
+ unsigned int unit;
+- unsigned long flags;
++ unsigned long flags, timeout;
+ ide_hwif_t *hwif;
+ ide_hwgroup_t *hwgroup;
+ struct ide_io_ports *io_ports;
+ const struct ide_tp_ops *tp_ops;
+ const struct ide_port_ops *port_ops;
++ DEFINE_WAIT(wait);
+
+ spin_lock_irqsave(&ide_lock, flags);
+ hwif = HWIF(drive);
+@@ -1196,6 +1198,30 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+ return ide_started;
+ }
+
++ /* We must not disturb devices in the parked state. */
++ do {
++ unsigned long now;
++
++ prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
++ timeout = jiffies;
++ for (unit = 0; unit < MAX_DRIVES; unit++) {
++ ide_drive_t *tdrive = &hwif->drives[unit];
++
++ if (tdrive->present && tdrive->parked &&
++ time_after(tdrive->sleep, timeout))
++ timeout = tdrive->sleep;
++ }
++
++ now = jiffies;
++ if (time_before_eq(timeout, now))
++ break;
++
++ spin_unlock_irqrestore(&ide_lock, flags);
++ timeout = schedule_timeout_uninterruptible(timeout - now);
++ spin_lock_irqsave(&ide_lock, flags);
++ } while (timeout);
++ finish_wait(&ide_park_wq, &wait);
++
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
+new file mode 100644
+index 0000000..18adda6
+--- /dev/null
++++ b/drivers/ide/ide-park.c
+@@ -0,0 +1,120 @@
++#include <linux/kernel.h>
++#include <linux/ide.h>
++#include <linux/jiffies.h>
++#include <linux/blkdev.h>
++
++DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
++
++static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
++{
++ struct request_queue *q = drive->queue;
++ struct request *rq;
++ int rc;
++
++ timeout += jiffies;
++ spin_lock_irq(&ide_lock);
++ if (drive->parked) {
++ ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
++ int reset_timer;
++
++ reset_timer = time_before(timeout, drive->sleep);
++ drive->sleep = timeout;
++ wake_up_all(&ide_park_wq);
++ if (reset_timer && hwgroup->sleeping &&
++ del_timer(&hwgroup->timer)) {
++ hwgroup->sleeping = 0;
++ hwgroup->busy = 0;
++ blk_start_queueing(q);
++ }
++ spin_unlock_irq(&ide_lock);
++ return;
++ }
++ spin_unlock_irq(&ide_lock);
++
++ rq = blk_get_request(q, READ, __GFP_WAIT);
++ rq->cmd[0] = REQ_PARK_HEADS;
++ rq->cmd_len = 1;
++ rq->cmd_type = REQ_TYPE_SPECIAL;
++ rq->special = &timeout;
++ rc = blk_execute_rq(q, NULL, rq, 1);
++ blk_put_request(rq);
++ if (rc)
++ goto out;
++
++ /*
++ * Make sure that *some* command is sent to the drive after the
++ * timeout has expired, so power management will be reenabled.
++ */
++ rq = blk_get_request(q, READ, GFP_NOWAIT);
++ if (unlikely(!rq))
++ goto out;
++
++ rq->cmd[0] = REQ_UNPARK_HEADS;
++ rq->cmd_len = 1;
++ rq->cmd_type = REQ_TYPE_SPECIAL;
++ elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
++
++out:
++ return;
++}
++
++ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ ide_drive_t *drive = to_ide_device(dev);
++ unsigned long now;
++ unsigned int msecs;
++
++ if (drive->no_unload)
++ return -EOPNOTSUPP;
++
++ spin_lock_irq(&ide_lock);
++ now = jiffies;
++ if (drive->parked && time_after(drive->sleep, now))
++ msecs = jiffies_to_msecs(drive->sleep - now);
++ else
++ msecs = 0;
++ spin_unlock_irq(&ide_lock);
++
++ return snprintf(buf, 20, "%u\n", msecs);
++}
++
++ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++#define MAX_PARK_TIMEOUT 30000
++ ide_drive_t *drive = to_ide_device(dev);
++ long int input;
++ int rc;
++
++ rc = strict_strtol(buf, 10, &input);
++ if (rc || input < -2)
++ return -EINVAL;
++ if (input > MAX_PARK_TIMEOUT) {
++ input = MAX_PARK_TIMEOUT;
++ rc = -EOVERFLOW;
++ }
++
++ mutex_lock(&ide_setting_mtx);
++ if (input >= 0) {
++ if (drive->no_unload)
++ rc = -EOPNOTSUPP;
++ else if (input || drive->parked)
++ issue_park_cmd(drive, msecs_to_jiffies(input));
++ } else {
++ if (drive->media == ide_disk)
++ switch (input) {
++ case -1:
++ drive->no_unload = 0;
++ break;
++ case -2:
++ drive->no_unload = 1;
++ break;
++ }
++ else
++ rc = -EOPNOTSUPP;
++ }
++ mutex_unlock(&ide_setting_mtx);
++
++ return rc ? rc : len;
++}
+diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
+index a51a30e..0df3c91 100644
+--- a/drivers/ide/ide-probe.c
++++ b/drivers/ide/ide-probe.c
+@@ -210,6 +210,8 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
+ drive->media = type;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
++ /* we don't do head unloading on ATAPI devices */
++ drive->no_unload = 1;
+ return;
+ }
+
+@@ -227,6 +229,9 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
+
+ drive->media = ide_disk;
+
++ if (!ata_id_has_unload((const u16 *)drive->id))
++ drive->no_unload = 1;
++
+ printk(KERN_CONT "%s DISK drive\n",
+ (id->config == 0x848a) ? "CFA" : "ATA");
+
+diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
+index 7fb6f1c..255c960 100644
+--- a/drivers/ide/ide-taskfile.c
++++ b/drivers/ide/ide-taskfile.c
+@@ -56,6 +56,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *);
+ static ide_startstop_t set_geometry_intr(ide_drive_t *);
+ static ide_startstop_t recal_intr(ide_drive_t *);
+ static ide_startstop_t set_multmode_intr(ide_drive_t *);
++static ide_startstop_t park_intr(ide_drive_t *);
+ static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
+ static ide_startstop_t task_in_intr(ide_drive_t *);
+
+@@ -105,6 +106,8 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
+ case WIN_SPECIFY: handler = set_geometry_intr; break;
+ case WIN_RESTORE: handler = recal_intr; break;
+ case WIN_SETMULT: handler = set_multmode_intr; break;
++ case WIN_IDLEIMMEDIATE: /* fall through */
++ case WIN_CHECKPOWERMODE1: handler = park_intr; break;
+ }
+ }
+ ide_execute_command(drive, tf->command, handler,
+@@ -186,6 +189,37 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
+ }
+
+ /*
++ * park_intr() is invoked on completion of a REQ_PARK_HEADS cmd.
++ */
++static ide_startstop_t park_intr(ide_drive_t *drive)
++{
++ ide_hwif_t *hwif = drive->hwif;
++ u8 stat;
++
++ local_irq_enable_in_hardirq();
++ stat = hwif->tp_ops->read_status(hwif);
++
++ if (!OK_STAT(stat, READY_STAT, BAD_STAT))
++ return ide_error(drive, "park_intr", stat);
++
++ if (hwif->hwgroup->rq->cmd[0] == REQ_PARK_HEADS) {
++ ide_task_t task;
++
++ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++ hwif->tp_ops->tf_read(drive, &task);
++ if (task.tf.lbal != 0xc4) {
++ printk(KERN_ERR "%s: head unload failed!\n",
++ drive->name);
++ ide_tf_dump(drive->name, &task.tf);
++ } else
++ drive->parked = 1;
++ }
++
++ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
++ return ide_stopped;
++}
++
++/*
+ * Handler for commands without a data phase
+ */
+ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
+diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
+index 7724516..9416ffb 100644
+--- a/drivers/ide/ide.c
++++ b/drivers/ide/ide.c
+@@ -734,6 +734,7 @@ static struct device_attribute ide_dev_attrs[] = {
+ __ATTR_RO(model),
+ __ATTR_RO(firmware),
+ __ATTR(serial, 0400, serial_show, NULL),
++ __ATTR(unload_heads, 0644, ide_park_show, ide_park_store),
+ __ATTR_NULL
+ };
+
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 8a12d71..a26ebd2 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -667,6 +667,15 @@ static inline int ata_id_has_dword_io(const u16 *id)
+ return 0;
+ }
+
++static inline int ata_id_has_unload(const u16 *id)
++{
++ if (ata_id_major_version(id) >= 7 &&
++ (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
++ id[ATA_ID_CFSSE] & (1 << 13))
++ return 1;
++ return 0;
++}
++
+ static inline int ata_id_current_chs_valid(const u16 *id)
+ {
+ /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
+diff --git a/include/linux/ide.h b/include/linux/ide.h
+index 6514db8..03b6dea 100644
+--- a/include/linux/ide.h
++++ b/include/linux/ide.h
+@@ -143,6 +143,8 @@ struct ide_io_ports {
+ * Values should be in the range of 0x20 to 0x3f.
+ */
+ #define REQ_DRIVE_RESET 0x20
++#define REQ_PARK_HEADS 0x22
++#define REQ_UNPARK_HEADS 0x23
+
+ /*
+ * Check for an interrupt and acknowledge the interrupt status
+@@ -423,6 +425,8 @@ struct ide_drive_s {
+ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */
+ unsigned post_reset : 1;
+ unsigned udma33_warned : 1;
++ unsigned no_unload : 1; /* no support for unload feature */
++ unsigned parked : 1; /* device parked, heads unloaded */
+
+ u8 addressing; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */
+ u8 quirk_list; /* considered quirky, set for a specific host */
+@@ -1061,6 +1065,7 @@ extern int drive_is_ready(ide_drive_t *);
+
+ void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
+
++
+ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
+ ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry,
+ void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *),
+@@ -1072,6 +1077,13 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *,
+ ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *,
+ ide_handler_t *, unsigned int, ide_expiry_t *);
+
++/* Disk head parking */
++extern wait_queue_head_t ide_park_wq;
++ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
++ char *buf);
++ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t len);
++
+ ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
+
+ void task_end_request(ide_drive_t *, struct request *, u8);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 225bfc5..adc16cf 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -146,6 +146,7 @@ enum {
+ ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
+ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
++ ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
+ ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
+
+ ATA_DFLAG_DETACH = (1 << 24),
+@@ -244,6 +245,7 @@ enum {
+ ATA_TMOUT_BOOT = 30000, /* heuristic */
+ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ ATA_TMOUT_INTERNAL_QUICK = 5000,
++ ATA_TMOUT_MAX_PARK = 30000,
+
+ /* FIXME: GoVault needs 2s but we can't afford that without
+ * parallel probing. 800ms is enough for iVDR disk
+@@ -319,8 +321,9 @@ enum {
+ ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
+ ATA_EH_ENABLE_LINK = (1 << 3),
+ ATA_EH_LPM = (1 << 4), /* link power management action */
++ ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
+
+- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
++ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+
+ /* ata_eh_info->flags */
+ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
+@@ -452,6 +455,7 @@ enum link_pm {
+ MEDIUM_POWER,
+ };
+ extern struct device_attribute dev_attr_link_power_management_policy;
++extern struct device_attribute dev_attr_unload_heads;
+ extern struct device_attribute dev_attr_em_message_type;
+ extern struct device_attribute dev_attr_em_message;
+ extern struct device_attribute dev_attr_sw_activity;
+@@ -564,6 +568,7 @@ struct ata_device {
+ /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
+ u64 n_sectors; /* size of device, if ATA */
+ unsigned int class; /* ATA_DEV_xxx */
++ unsigned long unpark_deadline;
+
+ u8 pio_mode;
+ u8 dma_mode;
+@@ -621,6 +626,7 @@ struct ata_eh_context {
+ [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
+ unsigned int classes[ATA_MAX_DEVICES];
+ unsigned int did_probe_mask;
++ unsigned int unloaded_mask;
+ unsigned int saved_ncq_enabled;
+ u8 saved_xfer_mode[ATA_MAX_DEVICES];
+ /* timestamp for the last reset attempt or success */
+@@ -709,6 +715,7 @@ struct ata_port {
+ struct list_head eh_done_q;
+ wait_queue_head_t eh_wait_q;
+ int eh_tries;
++ struct completion park_req_pending;
+
+ pm_message_t pm_mesg;
+ int *pm_result;
+@@ -1098,6 +1105,7 @@ extern void ata_std_error_handler(struct ata_port *ap);
+ */
+ extern const struct ata_port_operations ata_base_port_ops;
+ extern const struct ata_port_operations sata_port_ops;
++extern struct device_attribute *ata_common_sdev_attrs[];
+
+ #define ATA_BASE_SHT(drv_name) \
+ .module = THIS_MODULE, \
+@@ -1112,7 +1120,8 @@ extern const struct ata_port_operations sata_port_ops;
+ .proc_name = drv_name, \
+ .slave_configure = ata_scsi_slave_config, \
+ .slave_destroy = ata_scsi_slave_destroy, \
+- .bios_param = ata_std_bios_param
++ .bios_param = ata_std_bios_param, \
++ .sdev_attrs = ata_common_sdev_attrs
+
+ #define ATA_NCQ_SHT(drv_name) \
+ ATA_BASE_SHT(drv_name), \
diff --git a/app-laptop/hdapsd/files/hdapsd.conf b/app-laptop/hdapsd/files/hdapsd.conf
new file mode 100644
index 0000000..6221909
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdapsd.conf
@@ -0,0 +1,11 @@
+# The name of the disk device that hdapsd should monitor.
+# Usually this is 'hda' or 'sda' the primary master.
+DISK="sda"
+
+# hdapsd sensitivity
+# The lower the threshold is the earlier
+# the heads are parked when the laptop is shaked
+THRESHOLD="10"
+
+# Set any extra options here, like -a for Adaptive mode
+OPTIONS="-a"
diff --git a/app-laptop/hdapsd/files/hdapsd.init b/app-laptop/hdapsd/files/hdapsd.init
new file mode 100644
index 0000000..ef146ee
--- /dev/null
+++ b/app-laptop/hdapsd/files/hdapsd.init
@@ -0,0 +1,49 @@
+#!/sbin/runscript
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License, v2
+
+depend() {
+ need localmount
+}
+
+checkconfig() {
+ if [[ -z ${DISK} || -z ${THRESHOLD} ]] ; then
+ eerror "You should setup DISK and THRESHOLD in /etc/conf.d/hdapsd."
+ return 1
+ fi
+
+ if [[ ! -e /sys/block/${DISK}/queue/protect ]] && [[ ! -e /sys/block/${DISK}/device/unload_heads ]] ; then
+ eerror "No protect entry for ${DISK}!"
+ eerror "Make sure your kernel is patched with the blk_freeze patch"
+ return 1
+ fi
+
+ # Load the tp_smapi module first
+ # This is not a requirement, but it helps hdapsd adaptive mode
+ if [[ ! -e /sys/devices/platform/smapi ]] ; then
+ modprobe tp_smapi 2>/dev/null
+ fi
+
+ if [[ ! -d /sys/devices/platform/hdaps ]]; then
+ ebegin "Loading hdaps module"
+ modprobe hdaps
+ eend $? || return 1
+ fi
+}
+
+start() {
+ checkconfig || return 1
+
+ ebegin "Starting Hard Drive Active Protection System daemon"
+ start-stop-daemon --start --exec /usr/sbin/hdapsd \
+ --pidfile /var/run/hdapsd.pid \
+ -- -b -p -d "${DISK}" -s "${THRESHOLD}" ${OPTIONS}
+ eend $?
+}
+
+stop() {
+ ebegin "Stopping Hard Drive Active Protection System daemon"
+ start-stop-daemon --stop --exec /usr/sbin/hdapsd \
+ --pidfile /var/run/hdapsd.pid
+ eend $?
+}
diff --git a/app-laptop/hdapsd/hdapsd-20090129.ebuild b/app-laptop/hdapsd/hdapsd-20090129.ebuild
new file mode 100644
index 0000000..4f70a04
--- /dev/null
+++ b/app-laptop/hdapsd/hdapsd-20090129.ebuild
@@ -0,0 +1,47 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header:
+
+inherit eutils linux-info
+
+DESCRIPTION="IBM ThinkPad Harddrive Active Protection disk head parking daemon"
+HOMEPAGE="http://hdaps.sourceforge.net/"
+SRC_URI="mirror://sourceforge/hdaps/${P}.tar.gz"
+
+LICENSE="GPL-2"
+SLOT="0"
+KEYWORDS="~x86 ~amd64"
+
+IUSE=""
+RDEPEND=""
+
+pkg_setup() {
+ # We require the hdaps module which can either come from kernel sources or
+ # from the tp_smapi package.
+ if ! has_version app-laptop/tp_smapi || ! built_with_use app-laptop/tp_smapi hdaps; then
+ CONFIG_CHECK="SENSORS_HDAPS"
+ ERROR_SENSORS_HDAPS="${P} requires app-laptop/tp_smapi (with hdaps USE enabled) or support for CONFIG_SENSORS_HDAPS enabled"
+ linux-info_pkg_setup
+ fi
+}
+
+src_install() {
+ emake DESTDIR="${D}" install || die "Install failed"
+ dodoc README AUTHORS COPYING ChangeLog || die "dodoc failed"
+ newconfd "${FILESDIR}"/hdapsd.conf hdapsd
+ newinitd "${FILESDIR}"/hdapsd.init hdapsd
+}
+
+
+pkg_postinst(){
+ [[ -z $(ls "${ROOT}"/sys/block/*/queue/protect 2>/dev/null) ]] && \
+ [[ -z $(ls "${ROOT}"/sys/block/*/device/unload_heads 2>/dev/null) ]] && \
+ ewarn "Your kernel has NOT been patched for blk_freeze!"
+
+ if ! has_version app-laptop/tp_smapi; then
+ ewarn "Using the hdaps module provided by app-laptop/tp_smapi instead"
+ ewarn "of the in-kernel driver is strongly recommended!"
+ fi
+
+ elog "You can change the default frequency by modifing /sys/devices/platform/hdaps/sampling_rate"
+}
diff --git a/app-laptop/hdapsd/metadata.xml b/app-laptop/hdapsd/metadata.xml
new file mode 100644
index 0000000..f9b504c
--- /dev/null
+++ b/app-laptop/hdapsd/metadata.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+ <herd>mobile</herd>
+ <longdescription>IBM ThinkPad Harddrive Active Protection disk head parking
+ daemon</longdescription>
+</pkgmetadata>