Updated acpi_cpu patch

From: Nate Lawson <nate_at_root.org>
Date: Tue, 18 Nov 2003 09:57:35 -0800 (PST)
Below you'll find the update patch for acpi_cpu.  Please test this,
especially for SMP and laptops with _CST objects in their ASL.

Thanks,
Nate

Notes:
* Add a detach method that disables entry to acpi_cpu_idle and in the SMP
case, IPIs all processors to exit sleeping.  This fixes a panic on
shutdown for MP boxes.

* Rework the initialization functions so that cpu_idle_hook is written
late in the boot process.  This fixes a panic on boot where acpi_cpu_idle
was called before the cpu_cx_states entry was filled out.

* Make the P_BLK, P_BLK_LEN, and cpu_cx_count all softc-local variables.
This will help SMP boxes that have _CST or multiple P_BLKs.  No such boxes
are known at this time.

* Always allocate the C1 state, even if the P_BLK is invalid.  This means
we will always take over idling if enabled.  Remove the value -1 as valid
for cx_lowest since this is redundant with machdep.cpu_idle_hlt.

* Reduce locking for the throttle initialization case to around the write
to the smi_cmd port.


Index: sys/dev/acpica/acpi_cpu.c
===================================================================
RCS file: /home/ncvs/src/sys/dev/acpica/acpi_cpu.c,v
retrieving revision 1.19
diff -u -r1.19 acpi_cpu.c
--- sys/dev/acpica/acpi_cpu.c	15 Nov 2003 19:26:05 -0000	1.19
+++ sys/dev/acpica/acpi_cpu.c	18 Nov 2003 17:46:23 -0000
_at__at_ -1,5 +1,5 _at__at_
 /*-
- * Copyright (c) 2003 Nate Lawson
+ * Copyright (c) 2003 Nate Lawson (SDG)
  * Copyright (c) 2001 Michael Smith
  * All rights reserved.
  *
_at__at_ -77,9 +77,11 _at__at_
     device_t		 cpu_dev;
     ACPI_HANDLE		 cpu_handle;
     uint32_t		 cpu_id;	/* ACPI processor id */
+    uint32_t		 cpu_p_blk;	/* ACPI P_BLK location */
+    uint32_t		 cpu_p_blk_len;	/* P_BLK length (must be 6). */
     struct resource	*cpu_p_cnt;	/* Throttling control register */
     struct acpi_cx	 cpu_cx_states[MAX_CX_STATES];
-    int			 cpu_bm_ok;	/* Bus mastering control available. */
+    int			 cpu_cx_count;	/* Number of valid Cx states. */
 };

 #define CPU_GET_REG(reg, width) 					\
_at__at_ -116,10 +118,9 _at__at_
 #define PCI_REVISION_4M		3

 /* Platform hardware resource information. */
-static uint32_t		 cpu_p_blk;	/* ACPI P_BLK location */
-static uint32_t		 cpu_p_blk_len;	/* P_BLK length (must be 6). */
 static uint32_t		 cpu_smi_cmd;	/* Value to write to SMI_CMD. */
 static uint8_t		 cpu_pstate_cnt;/* Register to take over throttling. */
+static uint8_t		 cpu_cst_cnt;	/* Indicate we are _CST aware. */
 static uint32_t		 cpu_rid;	/* Driver-wide resource id. */
 static uint32_t		 cpu_quirks;	/* Indicate any hardware bugs. */

_at__at_ -146,11 +147,13 _at__at_

 static int	acpi_cpu_probe(device_t dev);
 static int	acpi_cpu_attach(device_t dev);
+static int	acpi_cpu_detach(device_t dev);
 static int	acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc);
 static int	acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
 static int	acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
 static void	acpi_cpu_startup(void *arg);
 static void	acpi_cpu_startup_throttling(void);
+static void	acpi_cpu_startup_cx(void);
 static void	acpi_cpu_throttle_set(uint32_t speed);
 static void	acpi_cpu_idle(void);
 static void	acpi_cpu_c1(void);
_at__at_ -166,6 +169,7 _at__at_
     /* Device interface */
     DEVMETHOD(device_probe,	acpi_cpu_probe),
     DEVMETHOD(device_attach,	acpi_cpu_attach),
+    DEVMETHOD(device_detach,	acpi_cpu_detach),

     {0, 0}
 };
_at__at_ -178,6 +182,7 _at__at_

 static devclass_t acpi_cpu_devclass;
 DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
+
 static int
 acpi_cpu_probe(device_t dev)
 {
_at__at_ -272,11 +277,10 _at__at_
     AcpiEvaluateObject(sc->cpu_handle, "_INI", NULL, NULL);

     /* Get various global values from the Processor object. */
-    cpu_p_blk = pobj.Processor.PblkAddress;
-    cpu_p_blk_len = pobj.Processor.PblkLength;
+    sc->cpu_p_blk = pobj.Processor.PblkAddress;
+    sc->cpu_p_blk_len = pobj.Processor.PblkLength;
     ACPI_DEBUG_PRINT((ACPI_DB_IO, "acpi_cpu%d: P_BLK at %#x/%d%s\n",
-		     device_get_unit(dev), cpu_p_blk, cpu_p_blk_len,
-		     sc->cpu_p_cnt ? "" : " (shadowed)"));
+		     device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));

     acpi_sc = acpi_device_get_parent_softc(dev);
     sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
_at__at_ -297,7 +301,8 _at__at_
     if (thr_ret == 0 || cx_ret == 0) {
 	status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
 					  acpi_cpu_notify, sc);
-	AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, sc);
+	if (device_get_unit(dev) == 0)
+	    AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL);
     } else {
 	sysctl_ctx_free(&acpi_cpu_sysctl_ctx);
     }
_at__at_ -306,6 +311,21 _at__at_
 }

 static int
+acpi_cpu_detach(device_t dev)
+{
+
+    /* Disable any entry to the idle function. */
+    cpu_cx_count = 0;
+
+#ifdef SMP
+    /* Wait for all processors to exit acpi_cpu_idle(). */
+    smp_rendezvous(NULL, NULL, NULL, NULL);
+#endif
+
+    return_VALUE (0);
+}
+
+static int
 acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc)
 {
     uint32_t		 duty_end;
_at__at_ -319,10 +339,13 _at__at_
     ACPI_ASSERTLOCK;

     /* Get throttling parameters from the FADT.  0 means not supported. */
-    cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
-    cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt;
-    cpu_duty_offset = AcpiGbl_FADT->DutyOffset;
-    cpu_duty_width = AcpiGbl_FADT->DutyWidth;
+    if (device_get_unit(sc->cpu_dev) == 0) {
+	cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
+	cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt;
+	cpu_cst_cnt = AcpiGbl_FADT->CstCnt;
+	cpu_duty_offset = AcpiGbl_FADT->DutyOffset;
+	cpu_duty_width = AcpiGbl_FADT->DutyWidth;
+    }
     if (cpu_duty_width == 0 || (cpu_quirks & CPU_QUIRK_NO_THROTTLE) != 0)
 	return (ENXIO);

_at__at_ -363,9 +386,9 _at__at_
     /* If _PTC not present or other failure, try the P_BLK. */
     if (sc->cpu_p_cnt == NULL) {
 	/* The spec says P_BLK must be at least 6 bytes long. */
-	if (cpu_p_blk_len != 6)
+	if (sc->cpu_p_blk_len != 6)
 	    return (ENXIO);
-	gas.Address = cpu_p_blk;
+	gas.Address = sc->cpu_p_blk;
 	gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
 	gas.RegisterBitWidth = 32;
 	sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
_at__at_ -379,25 +402,6 _at__at_
     }
     cpu_rid++;

-    SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
-		   SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		   OID_AUTO, "max_speed", CTLFLAG_RD,
-		   &cpu_max_state, 0, "maximum CPU speed");
-    SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
-		   SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		   OID_AUTO, "current_speed", CTLFLAG_RD,
-		   &cpu_current_state, 0, "current CPU speed");
-    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
-		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		    OID_AUTO, "performance_speed",
-		    CTLTYPE_INT | CTLFLAG_RW, &cpu_performance_state,
-		    0, acpi_cpu_throttle_sysctl, "I", "");
-    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
-		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		    OID_AUTO, "economy_speed",
-		    CTLTYPE_INT | CTLFLAG_RW, &cpu_economy_state,
-		    0, acpi_cpu_throttle_sysctl, "I", "");
-
     return (0);
 }

_at__at_ -406,8 +410,7 _at__at_
 {
     ACPI_GENERIC_ADDRESS gas;
     struct acpi_cx	*cx_ptr;
-    struct sbuf		 sb;
-    int			 i, error;
+    int			 error;

     /* Bus mastering arbitration control is needed for C3. */
     if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) {
_at__at_ -420,11 +423,9 _at__at_
      * First, check for the ACPI 2.0 _CST sleep states object.
      * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3.
      */
-    cpu_cx_count = 0;
+    sc->cpu_cx_count = 0;
     error = acpi_cpu_cx_cst(sc);
     if (error != 0) {
-	if (cpu_p_blk_len != 6)
-	    return (ENXIO);
 	cx_ptr = sc->cpu_cx_states;

 	/* C1 has been required since just after ACPI 1.0 */
_at__at_ -432,13 +433,16 _at__at_
 	cx_ptr->trans_lat = 0;
 	cpu_non_c3 = 0;
 	cx_ptr++;
-	cpu_cx_count++;
+	sc->cpu_cx_count++;
+
+	if (sc->cpu_p_blk_len != 6)
+	    goto done;

 	/* Validate and allocate resources for C2 (P_LVL2). */
 	gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
 	gas.RegisterBitWidth = 8;
 	if (AcpiGbl_FADT->Plvl2Lat < 100) {
-	    gas.Address = cpu_p_blk + 4;
+	    gas.Address = sc->cpu_p_blk + 4;
 	    cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
 	    if (cx_ptr->p_lvlx != NULL) {
 		cpu_rid++;
_at__at_ -446,7 +450,7 _at__at_
 		cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat;
 		cpu_non_c3 = 1;
 		cx_ptr++;
-		cpu_cx_count++;
+		sc->cpu_cx_count++;
 	    }
 	}

_at__at_ -454,47 +458,23 _at__at_
 	if (AcpiGbl_FADT->Plvl3Lat < 1000 &&
 	    (cpu_quirks & CPU_QUIRK_NO_C3) == 0) {

-	    gas.Address = cpu_p_blk + 5;
+	    gas.Address = sc->cpu_p_blk + 5;
 	    cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
 	    if (cx_ptr->p_lvlx != NULL) {
 		cpu_rid++;
 		cx_ptr->type = ACPI_STATE_C3;
 		cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat;
 		cx_ptr++;
-		cpu_cx_count++;
+		sc->cpu_cx_count++;
 	    }
 	}
     }

+done:
     /* If no valid registers were found, don't attach. */
-    if (cpu_cx_count == 0)
+    if (sc->cpu_cx_count == 0)
 	return (ENXIO);

-    sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN);
-    for (i = 0; i < cpu_cx_count; i++) {
-	sbuf_printf(&sb, "C%d/%d ", sc->cpu_cx_states[i].type,
-		    sc->cpu_cx_states[i].trans_lat);
-    }
-    sbuf_trim(&sb);
-    sbuf_finish(&sb);
-    SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx,
-		      SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		      OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported,
-		      0, "Cx/microsecond values for supported Cx states");
-    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
-		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		    OID_AUTO, "cx_lowest", CTLTYPE_INT | CTLFLAG_RW,
-		    NULL, 0, acpi_cpu_cx_lowest_sysctl, "I",
-		    "lowest Cx sleep state to use");
-    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
-		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
-		    OID_AUTO, "cx_history", CTLTYPE_STRING | CTLFLAG_RD,
-		    NULL, 0, acpi_cpu_history_sysctl, "A", "");
-
-    /* Set next sleep state and hook the idle function. */
-    cpu_cx_next = cpu_cx_lowest;
-    cpu_idle_hook = acpi_cpu_idle;
-
     return (0);
 }

_at__at_ -534,13 +514,12 _at__at_
 	count = top->Package.Count - 1;
     }
     if (count > MAX_CX_STATES) {
-	device_printf(sc->cpu_dev, "_CST has too many states (%d)\n",
-		      count);
+	device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
 	count = MAX_CX_STATES;
     }

     /* Set up all valid states. */
-    cpu_cx_count = 0;
+    sc->cpu_cx_count = 0;
     cx_ptr = sc->cpu_cx_states;
     for (i = 0; i < count; i++) {
 	pkg = &top->Package.Elements[i + 1];
_at__at_ -559,7 +538,7 _at__at_
 	case ACPI_STATE_C1:
 	    cpu_non_c3 = i;
 	    cx_ptr++;
-	    cpu_cx_count++;
+	    sc->cpu_cx_count++;
 	    continue;
 	case ACPI_STATE_C2:
 	    if (cx_ptr->trans_lat > 100) {
_at__at_ -594,7 +573,7 _at__at_
 	    device_printf(sc->cpu_dev, "C%d state %d lat\n", cx_ptr->type,
 			  cx_ptr->trans_lat);
 	    cx_ptr++;
-	    cpu_cx_count++;
+	    sc->cpu_cx_count++;
 	}
     }
     AcpiOsFree(buf.Pointer);
_at__at_ -604,17 +583,12 _at__at_

 /*
  * Call this *after* all CPUs have been attached.
- *
- * Takes the ACPI lock to avoid fighting anyone over the SMI command
- * port.  Could probably lock less code.
  */
 static void
 acpi_cpu_startup(void *arg)
 {
-    struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg;
-    ACPI_LOCK_DECL;
-
-    ACPI_LOCK;
+    struct acpi_cpu_softc *sc;
+    int count, i;

     /* Get set of CPU devices */
     devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
_at__at_ -623,16 +597,35 _at__at_
     EVENTHANDLER_REGISTER(power_profile_change, acpi_cpu_power_profile,
 			  NULL, 0);

+    /*
+     * Make sure all the processors' Cx counts match.  We should probably
+     * also check the contents of each.  However, no known systems have
+     * non-matching Cx counts so we'll deal with this later.
+     */
+    count = MAX_CX_STATES;
+    for (i = 0; i < cpu_ndevices; i++) {
+	sc = device_get_softc(cpu_devices[i]);
+	count = min(sc->cpu_cx_count, count);
+    }
+    cpu_cx_count = count;
+
+    /* Perform throttling and Cx final initialization. */
+    sc = device_get_softc(cpu_devices[0]);
     if (sc->cpu_p_cnt != NULL)
 	acpi_cpu_startup_throttling();
-    else
-	ACPI_UNLOCK;
+    if (cpu_cx_count > 0)
+	acpi_cpu_startup_cx();
 }

+/*
+ * Takes the ACPI lock to avoid fighting anyone over the SMI command
+ * port.
+ */
 static void
 acpi_cpu_startup_throttling()
 {
     int cpu_temp_speed;
+    ACPI_LOCK_DECL;

     /* Initialise throttling states */
     cpu_max_state = CPU_MAX_SPEED;
_at__at_ -653,11 +646,31 _at__at_
 	cpu_economy_state = cpu_temp_speed;
     }

+    SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
+		   SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		   OID_AUTO, "max_speed", CTLFLAG_RD,
+		   &cpu_max_state, 0, "maximum CPU speed");
+    SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
+		   SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		   OID_AUTO, "current_speed", CTLFLAG_RD,
+		   &cpu_current_state, 0, "current CPU speed");
+    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
+		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		    OID_AUTO, "performance_speed",
+		    CTLTYPE_INT | CTLFLAG_RW, &cpu_performance_state,
+		    0, acpi_cpu_throttle_sysctl, "I", "");
+    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
+		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		    OID_AUTO, "economy_speed",
+		    CTLTYPE_INT | CTLFLAG_RW, &cpu_economy_state,
+		    0, acpi_cpu_throttle_sysctl, "I", "");
+
     /* If ACPI 2.0+, signal platform that we are taking over throttling. */
-    if (cpu_pstate_cnt != 0)
+    if (cpu_pstate_cnt != 0) {
+	ACPI_LOCK;
 	AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8);
-
-    ACPI_UNLOCK;
+	ACPI_UNLOCK;
+    }

     /* Set initial speed */
     acpi_cpu_power_profile(NULL);
_at__at_ -667,6 +680,50 _at__at_
 	   CPU_SPEED_PRINTABLE(cpu_current_state));
 }

+static void
+acpi_cpu_startup_cx()
+{
+    struct acpi_cpu_softc *sc;
+    struct sbuf		 sb;
+    int i;
+    ACPI_LOCK_DECL;
+
+    sc = device_get_softc(cpu_devices[0]);
+    sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN);
+    for (i = 0; i < cpu_cx_count; i++) {
+	sbuf_printf(&sb, "C%d/%d ", sc->cpu_cx_states[i].type,
+		    sc->cpu_cx_states[i].trans_lat);
+    }
+    sbuf_trim(&sb);
+    sbuf_finish(&sb);
+    SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx,
+		      SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		      OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported,
+		      0, "Cx/microsecond values for supported Cx states");
+    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
+		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		    OID_AUTO, "cx_lowest", CTLTYPE_INT | CTLFLAG_RW,
+		    NULL, 0, acpi_cpu_cx_lowest_sysctl, "I",
+		    "lowest Cx sleep state to use");
+    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
+		    SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
+		    OID_AUTO, "cx_history", CTLTYPE_STRING | CTLFLAG_RD,
+		    NULL, 0, acpi_cpu_history_sysctl, "A", "");
+
+#ifdef notyet
+    /* Signal platform that we can handle _CST notification. */
+    if (cpu_cst_cnt != 0) {
+	ACPI_LOCK;
+	AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
+	ACPI_UNLOCK;
+    }
+#endif
+
+    /* Take over idling from cpu_idle_default(). */
+    cpu_cx_next = cpu_cx_lowest;
+    cpu_idle_hook = acpi_cpu_idle;
+}
+
 /*
  * Set CPUs to the new state.
  *
_at__at_ -727,7 +784,7 _at__at_
     KASSERT(sc != NULL, ("NULL softc for %d", PCPU_GET(acpi_id)));

     /* If disabled, return immediately. */
-    if (cpu_cx_lowest < 0 || cpu_cx_count == 0) {
+    if (cpu_cx_count == 0) {
 	ACPI_ENABLE_IRQS();
 	return;
     }
_at__at_ -749,6 +806,17 _at__at_
     /* Perform the actual sleep based on the Cx-specific semantics. */
     cx_next = &sc->cpu_cx_states[cpu_cx_next];
     switch (cx_next->type) {
+    case ACPI_STATE_C0:
+    {
+	static int once;
+
+	if (once == 0) {
+	    printf("C0? cx_next %d cx_count %d\n", cpu_cx_next, cpu_cx_count);
+	    once = 1;
+	}
+	ACPI_ENABLE_IRQS();
+	return;
+    }
     case ACPI_STATE_C1:
 	/* Execute HLT (or equivalent) and wait for an interrupt. */
 	acpi_cpu_c1();
_at__at_ -854,6 +922,7 _at__at_
 /*
  * Re-evaluate the _PSS and _CST objects when we are notified that they
  * have changed.
+ *
  * XXX Re-evaluation disabled until locking is done.
  */
 static void
_at__at_ -1027,7 +1096,7 _at__at_
     error = sysctl_handle_int(oidp, &val, 0, req);
     if (error != 0 || req->newptr == NULL)
 	return (error);
-    if (val < -1 || val > cpu_cx_count - 1)
+    if (val < 0 || val > cpu_cx_count - 1)
 	return (EINVAL);

     /* Use the new value for the next idle slice. */
Index: share/man/man4/acpi.4
===================================================================
RCS file: /home/ncvs/src/share/man/man4/acpi.4,v
retrieving revision 1.17
diff -u -r1.17 acpi.4
--- share/man/man4/acpi.4	15 Nov 2003 19:26:05 -0000	1.17
+++ share/man/man4/acpi.4	17 Nov 2003 17:18:09 -0000
_at__at_ -342,7 +342,6 _at__at_
 is modified.
 .It Va hw.acpi.cpu.cx_lowest
 Zero-based index of the lowest CPU idle state to use.
-A value of -1 disables ACPI CPU idle states.
 To enable ACPI CPU idling control,
 .Va machdep.cpu_idle_hlt
 must be set to 1.
Received on Tue Nov 18 2003 - 08:57:38 UTC

This archive was generated by hypermail 2.4.0 : Wed May 19 2021 - 11:37:29 UTC