1 /****************************************************************************** 2 * 3 * Name: acmacros.h - C macros for the entire subsystem. 4 * 5 *****************************************************************************/ 6 7 /****************************************************************************** 8 * 9 * 1. Copyright Notice 10 * 11 * Some or all of this work - Copyright (c) 1999 - 2013, Intel Corp. 12 * All rights reserved. 13 * 14 * 2. License 15 * 16 * 2.1. This is your license from Intel Corp. under its intellectual property 17 * rights. You may have additional license terms from the party that provided 18 * you this software, covering your right to use that party's intellectual 19 * property rights. 20 * 21 * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a 22 * copy of the source code appearing in this file ("Covered Code") an 23 * irrevocable, perpetual, worldwide license under Intel's copyrights in the 24 * base code distributed originally by Intel ("Original Intel Code") to copy, 25 * make derivatives, distribute, use and display any portion of the Covered 26 * Code in any form, with the right to sublicense such rights; and 27 * 28 * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent 29 * license (with the right to sublicense), under only those claims of Intel 30 * patents that are infringed by the Original Intel Code, to make, use, sell, 31 * offer to sell, and import the Covered Code and derivative works thereof 32 * solely to the minimum extent necessary to exercise the above copyright 33 * license, and in no event shall the patent license extend to any additions 34 * to or modifications of the Original Intel Code. No other license or right 35 * is granted directly or by implication, estoppel or otherwise; 36 * 37 * The above copyright and patent license is granted only if the following 38 * conditions are met: 39 * 40 * 3. Conditions 41 * 42 * 3.1. Redistribution of Source with Rights to Further Distribute Source. 43 * Redistribution of source code of any substantial portion of the Covered 44 * Code or modification with rights to further distribute source must include 45 * the above Copyright Notice, the above License, this list of Conditions, 46 * and the following Disclaimer and Export Compliance provision. In addition, 47 * Licensee must cause all Covered Code to which Licensee contributes to 48 * contain a file documenting the changes Licensee made to create that Covered 49 * Code and the date of any change. Licensee must include in that file the 50 * documentation of any changes made by any predecessor Licensee. Licensee 51 * must include a prominent statement that the modification is derived, 52 * directly or indirectly, from Original Intel Code. 53 * 54 * 3.2. Redistribution of Source with no Rights to Further Distribute Source. 55 * Redistribution of source code of any substantial portion of the Covered 56 * Code or modification without rights to further distribute source must 57 * include the following Disclaimer and Export Compliance provision in the 58 * documentation and/or other materials provided with distribution. In 59 * addition, Licensee may not authorize further sublicense of source of any 60 * portion of the Covered Code, and must include terms to the effect that the 61 * license from Licensee to its licensee is limited to the intellectual 62 * property embodied in the software Licensee provides to its licensee, and 63 * not to intellectual property embodied in modifications its licensee may 64 * make. 65 * 66 * 3.3. Redistribution of Executable. Redistribution in executable form of any 67 * substantial portion of the Covered Code or modification must reproduce the 68 * above Copyright Notice, and the following Disclaimer and Export Compliance 69 * provision in the documentation and/or other materials provided with the 70 * distribution. 71 * 72 * 3.4. Intel retains all right, title, and interest in and to the Original 73 * Intel Code. 74 * 75 * 3.5. Neither the name Intel nor any other trademark owned or controlled by 76 * Intel shall be used in advertising or otherwise to promote the sale, use or 77 * other dealings in products derived from or relating to the Covered Code 78 * without prior written authorization from Intel. 79 * 80 * 4. Disclaimer and Export Compliance 81 * 82 * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED 83 * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE 84 * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, 85 * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY 86 * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY 87 * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A 88 * PARTICULAR PURPOSE. 89 * 90 * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES 91 * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR 92 * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, 93 * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY 94 * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL 95 * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS 96 * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY 97 * LIMITED REMEDY. 98 * 99 * 4.3. Licensee shall not export, either directly or indirectly, any of this 100 * software or system incorporating such software without first obtaining any 101 * required license or other approval from the U. S. Department of Commerce or 102 * any other agency or department of the United States Government. In the 103 * event Licensee exports any such software from the United States or 104 * re-exports any such software from a foreign destination, Licensee shall 105 * ensure that the distribution and export/re-export of the software is in 106 * compliance with all laws, regulations, orders, or other restrictions of the 107 * U.S. Export Administration Regulations. Licensee agrees that neither it nor 108 * any of its subsidiaries will export/re-export any technical data, process, 109 * software, or service, directly or indirectly, to any country for which the 110 * United States government or any agency thereof requires an export license, 111 * other governmental approval, or letter of assurance, without first obtaining 112 * such license, approval or letter. 113 * 114 *****************************************************************************/ 115 116 #ifndef __ACMACROS_H__ 117 #define __ACMACROS_H__ 118 119 120 /* 121 * Extract data using a pointer. Any more than a byte and we 122 * get into potential aligment issues -- see the STORE macros below. 123 * Use with care. 124 */ 125 #define ACPI_CAST8(ptr) ACPI_CAST_PTR (UINT8, (ptr)) 126 #define ACPI_CAST16(ptr) ACPI_CAST_PTR (UINT16, (ptr)) 127 #define ACPI_CAST32(ptr) ACPI_CAST_PTR (UINT32, (ptr)) 128 #define ACPI_CAST64(ptr) ACPI_CAST_PTR (UINT64, (ptr)) 129 #define ACPI_GET8(ptr) (*ACPI_CAST8 (ptr)) 130 #define ACPI_GET16(ptr) (*ACPI_CAST16 (ptr)) 131 #define ACPI_GET32(ptr) (*ACPI_CAST32 (ptr)) 132 #define ACPI_GET64(ptr) (*ACPI_CAST64 (ptr)) 133 #define ACPI_SET8(ptr, val) (*ACPI_CAST8 (ptr) = (UINT8) (val)) 134 #define ACPI_SET16(ptr, val) (*ACPI_CAST16 (ptr) = (UINT16) (val)) 135 #define ACPI_SET32(ptr, val) (*ACPI_CAST32 (ptr) = (UINT32) (val)) 136 #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (UINT64) (val)) 137 138 /* 139 * printf() format helpers 140 */ 141 142 /* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */ 143 144 #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i) 145 146 #if ACPI_MACHINE_WIDTH == 64 147 #define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i) 148 #else 149 #define ACPI_FORMAT_NATIVE_UINT(i) 0, (i) 150 #endif 151 152 153 /* 154 * Macros for moving data around to/from buffers that are possibly unaligned. 155 * If the hardware supports the transfer of unaligned data, just do the store. 156 * Otherwise, we have to move one byte at a time. 157 */ 158 #ifdef ACPI_BIG_ENDIAN 159 /* 160 * Macros for big-endian machines 161 */ 162 163 /* These macros reverse the bytes during the move, converting little-endian to big endian */ 164 165 /* Big Endian <== Little Endian */ 166 /* Hi...Lo Lo...Hi */ 167 /* 16-bit source, 16/32/64 destination */ 168 169 #define ACPI_MOVE_16_TO_16(d, s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[1];\ 170 (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[0];} 171 172 #define ACPI_MOVE_16_TO_32(d, s) {(*(UINT32 *)(void *)(d))=0;\ 173 ((UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[1];\ 174 ((UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[0];} 175 176 #define ACPI_MOVE_16_TO_64(d, s) {(*(UINT64 *)(void *)(d))=0;\ 177 ((UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\ 178 ((UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];} 179 180 /* 32-bit source, 16/32/64 destination */ 181 182 #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ 183 184 #define ACPI_MOVE_32_TO_32(d, s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[3];\ 185 (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[2];\ 186 (( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[1];\ 187 (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[0];} 188 189 #define ACPI_MOVE_32_TO_64(d, s) {(*(UINT64 *)(void *)(d))=0;\ 190 ((UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[3];\ 191 ((UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[2];\ 192 ((UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\ 193 ((UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];} 194 195 /* 64-bit source, 16/32/64 destination */ 196 197 #define ACPI_MOVE_64_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ 198 199 #define ACPI_MOVE_64_TO_32(d, s) ACPI_MOVE_32_TO_32(d, s) /* Truncate to 32 */ 200 201 #define ACPI_MOVE_64_TO_64(d, s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[7];\ 202 (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[6];\ 203 (( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[5];\ 204 (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[4];\ 205 (( UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[3];\ 206 (( UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[2];\ 207 (( UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\ 208 (( UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];} 209 #else 210 /* 211 * Macros for little-endian machines 212 */ 213 214 #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED 215 216 /* The hardware supports unaligned transfers, just do the little-endian move */ 217 218 /* 16-bit source, 16/32/64 destination */ 219 220 #define ACPI_MOVE_16_TO_16(d, s) *(UINT16 *)(void *)(d) = *(UINT16 *)(void *)(s) 221 #define ACPI_MOVE_16_TO_32(d, s) *(UINT32 *)(void *)(d) = *(UINT16 *)(void *)(s) 222 #define ACPI_MOVE_16_TO_64(d, s) *(UINT64 *)(void *)(d) = *(UINT16 *)(void *)(s) 223 224 /* 32-bit source, 16/32/64 destination */ 225 226 #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ 227 #define ACPI_MOVE_32_TO_32(d, s) *(UINT32 *)(void *)(d) = *(UINT32 *)(void *)(s) 228 #define ACPI_MOVE_32_TO_64(d, s) *(UINT64 *)(void *)(d) = *(UINT32 *)(void *)(s) 229 230 /* 64-bit source, 16/32/64 destination */ 231 232 #define ACPI_MOVE_64_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ 233 #define ACPI_MOVE_64_TO_32(d, s) ACPI_MOVE_32_TO_32(d, s) /* Truncate to 32 */ 234 #define ACPI_MOVE_64_TO_64(d, s) *(UINT64 *)(void *)(d) = *(UINT64 *)(void *)(s) 235 236 #else 237 /* 238 * The hardware does not support unaligned transfers. We must move the 239 * data one byte at a time. These macros work whether the source or 240 * the destination (or both) is/are unaligned. (Little-endian move) 241 */ 242 243 /* 16-bit source, 16/32/64 destination */ 244 245 #define ACPI_MOVE_16_TO_16(d, s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\ 246 (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];} 247 248 #define ACPI_MOVE_16_TO_32(d, s) {(*(UINT32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);} 249 #define ACPI_MOVE_16_TO_64(d, s) {(*(UINT64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);} 250 251 /* 32-bit source, 16/32/64 destination */ 252 253 #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ 254 255 #define ACPI_MOVE_32_TO_32(d, s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\ 256 (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];\ 257 (( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[2];\ 258 (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[3];} 259 260 #define ACPI_MOVE_32_TO_64(d, s) {(*(UINT64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d, s);} 261 262 /* 64-bit source, 16/32/64 destination */ 263 264 #define ACPI_MOVE_64_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ 265 #define ACPI_MOVE_64_TO_32(d, s) ACPI_MOVE_32_TO_32(d, s) /* Truncate to 32 */ 266 #define ACPI_MOVE_64_TO_64(d, s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\ 267 (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];\ 268 (( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[2];\ 269 (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[3];\ 270 (( UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[4];\ 271 (( UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[5];\ 272 (( UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[6];\ 273 (( UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[7];} 274 #endif 275 #endif 276 277 278 /* 279 * Fast power-of-two math macros for non-optimized compilers 280 */ 281 #define _ACPI_DIV(value, PowerOf2) ((UINT32) ((value) >> (PowerOf2))) 282 #define _ACPI_MUL(value, PowerOf2) ((UINT32) ((value) << (PowerOf2))) 283 #define _ACPI_MOD(value, Divisor) ((UINT32) ((value) & ((Divisor) -1))) 284 285 #define ACPI_DIV_2(a) _ACPI_DIV(a, 1) 286 #define ACPI_MUL_2(a) _ACPI_MUL(a, 1) 287 #define ACPI_MOD_2(a) _ACPI_MOD(a, 2) 288 289 #define ACPI_DIV_4(a) _ACPI_DIV(a, 2) 290 #define ACPI_MUL_4(a) _ACPI_MUL(a, 2) 291 #define ACPI_MOD_4(a) _ACPI_MOD(a, 4) 292 293 #define ACPI_DIV_8(a) _ACPI_DIV(a, 3) 294 #define ACPI_MUL_8(a) _ACPI_MUL(a, 3) 295 #define ACPI_MOD_8(a) _ACPI_MOD(a, 8) 296 297 #define ACPI_DIV_16(a) _ACPI_DIV(a, 4) 298 #define ACPI_MUL_16(a) _ACPI_MUL(a, 4) 299 #define ACPI_MOD_16(a) _ACPI_MOD(a, 16) 300 301 #define ACPI_DIV_32(a) _ACPI_DIV(a, 5) 302 #define ACPI_MUL_32(a) _ACPI_MUL(a, 5) 303 #define ACPI_MOD_32(a) _ACPI_MOD(a, 32) 304 305 /* 306 * Rounding macros (Power of two boundaries only) 307 */ 308 #define ACPI_ROUND_DOWN(value, boundary) (((ACPI_SIZE)(value)) & \ 309 (~(((ACPI_SIZE) boundary)-1))) 310 311 #define ACPI_ROUND_UP(value, boundary) ((((ACPI_SIZE)(value)) + \ 312 (((ACPI_SIZE) boundary)-1)) & \ 313 (~(((ACPI_SIZE) boundary)-1))) 314 315 /* Note: sizeof(ACPI_SIZE) evaluates to either 4 or 8 (32- vs 64-bit mode) */ 316 317 #define ACPI_ROUND_DOWN_TO_32BIT(a) ACPI_ROUND_DOWN(a, 4) 318 #define ACPI_ROUND_DOWN_TO_64BIT(a) ACPI_ROUND_DOWN(a, 8) 319 #define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a, sizeof(ACPI_SIZE)) 320 321 #define ACPI_ROUND_UP_TO_32BIT(a) ACPI_ROUND_UP(a, 4) 322 #define ACPI_ROUND_UP_TO_64BIT(a) ACPI_ROUND_UP(a, 8) 323 #define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a, sizeof(ACPI_SIZE)) 324 325 #define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7) 326 #define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a)) 327 328 #define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10) 329 330 /* Generic (non-power-of-two) rounding */ 331 332 #define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary)) 333 334 #define ACPI_IS_MISALIGNED(value) (((ACPI_SIZE) value) & (sizeof(ACPI_SIZE)-1)) 335 336 /* 337 * Bitmask creation 338 * Bit positions start at zero. 339 * MASK_BITS_ABOVE creates a mask starting AT the position and above 340 * MASK_BITS_BELOW creates a mask starting one bit BELOW the position 341 */ 342 #define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_UINT64_MAX) << ((UINT32) (position)))) 343 #define ACPI_MASK_BITS_BELOW(position) ((ACPI_UINT64_MAX) << ((UINT32) (position))) 344 345 /* Bitfields within ACPI registers */ 346 347 #define ACPI_REGISTER_PREPARE_BITS(Val, Pos, Mask) \ 348 ((Val << Pos) & Mask) 349 350 #define ACPI_REGISTER_INSERT_VALUE(Reg, Pos, Mask, Val) \ 351 Reg = (Reg & (~(Mask))) | ACPI_REGISTER_PREPARE_BITS(Val, Pos, Mask) 352 353 #define ACPI_INSERT_BITS(Target, Mask, Source) \ 354 Target = ((Target & (~(Mask))) | (Source & Mask)) 355 356 /* Generic bitfield macros and masks */ 357 358 #define ACPI_GET_BITS(SourcePtr, Position, Mask) \ 359 ((*SourcePtr >> Position) & Mask) 360 361 #define ACPI_SET_BITS(TargetPtr, Position, Mask, Value) \ 362 (*TargetPtr |= ((Value & Mask) << Position)) 363 364 #define ACPI_1BIT_MASK 0x00000001 365 #define ACPI_2BIT_MASK 0x00000003 366 #define ACPI_3BIT_MASK 0x00000007 367 #define ACPI_4BIT_MASK 0x0000000F 368 #define ACPI_5BIT_MASK 0x0000001F 369 #define ACPI_6BIT_MASK 0x0000003F 370 #define ACPI_7BIT_MASK 0x0000007F 371 #define ACPI_8BIT_MASK 0x000000FF 372 #define ACPI_16BIT_MASK 0x0000FFFF 373 #define ACPI_24BIT_MASK 0x00FFFFFF 374 375 /* Macros to extract flag bits from position zero */ 376 377 #define ACPI_GET_1BIT_FLAG(Value) ((Value) & ACPI_1BIT_MASK) 378 #define ACPI_GET_2BIT_FLAG(Value) ((Value) & ACPI_2BIT_MASK) 379 #define ACPI_GET_3BIT_FLAG(Value) ((Value) & ACPI_3BIT_MASK) 380 #define ACPI_GET_4BIT_FLAG(Value) ((Value) & ACPI_4BIT_MASK) 381 382 /* Macros to extract flag bits from position one and above */ 383 384 #define ACPI_EXTRACT_1BIT_FLAG(Field, Position) (ACPI_GET_1BIT_FLAG ((Field) >> Position)) 385 #define ACPI_EXTRACT_2BIT_FLAG(Field, Position) (ACPI_GET_2BIT_FLAG ((Field) >> Position)) 386 #define ACPI_EXTRACT_3BIT_FLAG(Field, Position) (ACPI_GET_3BIT_FLAG ((Field) >> Position)) 387 #define ACPI_EXTRACT_4BIT_FLAG(Field, Position) (ACPI_GET_4BIT_FLAG ((Field) >> Position)) 388 389 /* ACPI Pathname helpers */ 390 391 #define ACPI_IS_ROOT_PREFIX(c) ((c) == (UINT8) 0x5C) /* Backslash */ 392 #define ACPI_IS_PARENT_PREFIX(c) ((c) == (UINT8) 0x5E) /* Carat */ 393 #define ACPI_IS_PATH_SEPARATOR(c) ((c) == (UINT8) 0x2E) /* Period (dot) */ 394 395 /* 396 * An object of type ACPI_NAMESPACE_NODE can appear in some contexts 397 * where a pointer to an object of type ACPI_OPERAND_OBJECT can also 398 * appear. This macro is used to distinguish them. 399 * 400 * The "DescriptorType" field is the second field in both structures. 401 */ 402 #define ACPI_GET_DESCRIPTOR_PTR(d) (((ACPI_DESCRIPTOR *)(void *)(d))->Common.CommonPointer) 403 #define ACPI_SET_DESCRIPTOR_PTR(d, p) (((ACPI_DESCRIPTOR *)(void *)(d))->Common.CommonPointer = (p)) 404 #define ACPI_GET_DESCRIPTOR_TYPE(d) (((ACPI_DESCRIPTOR *)(void *)(d))->Common.DescriptorType) 405 #define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((ACPI_DESCRIPTOR *)(void *)(d))->Common.DescriptorType = (t)) 406 407 /* 408 * Macros for the master AML opcode table 409 */ 410 #if defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT) 411 #define ACPI_OP(Name, PArgs, IArgs, ObjType, Class, Type, Flags) \ 412 {Name, (UINT32)(PArgs), (UINT32)(IArgs), (UINT32)(Flags), ObjType, Class, Type} 413 #else 414 #define ACPI_OP(Name, PArgs, IArgs, ObjType, Class, Type, Flags) \ 415 {(UINT32)(PArgs), (UINT32)(IArgs), (UINT32)(Flags), ObjType, Class, Type} 416 #endif 417 418 #define ARG_TYPE_WIDTH 5 419 #define ARG_1(x) ((UINT32)(x)) 420 #define ARG_2(x) ((UINT32)(x) << (1 * ARG_TYPE_WIDTH)) 421 #define ARG_3(x) ((UINT32)(x) << (2 * ARG_TYPE_WIDTH)) 422 #define ARG_4(x) ((UINT32)(x) << (3 * ARG_TYPE_WIDTH)) 423 #define ARG_5(x) ((UINT32)(x) << (4 * ARG_TYPE_WIDTH)) 424 #define ARG_6(x) ((UINT32)(x) << (5 * ARG_TYPE_WIDTH)) 425 426 #define ARGI_LIST1(a) (ARG_1(a)) 427 #define ARGI_LIST2(a, b) (ARG_1(b)|ARG_2(a)) 428 #define ARGI_LIST3(a, b, c) (ARG_1(c)|ARG_2(b)|ARG_3(a)) 429 #define ARGI_LIST4(a, b, c, d) (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a)) 430 #define ARGI_LIST5(a, b, c, d, e) (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a)) 431 #define ARGI_LIST6(a, b, c, d, e, f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a)) 432 433 #define ARGP_LIST1(a) (ARG_1(a)) 434 #define ARGP_LIST2(a, b) (ARG_1(a)|ARG_2(b)) 435 #define ARGP_LIST3(a, b, c) (ARG_1(a)|ARG_2(b)|ARG_3(c)) 436 #define ARGP_LIST4(a, b, c, d) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)) 437 #define ARGP_LIST5(a, b, c, d, e) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)) 438 #define ARGP_LIST6(a, b, c, d, e, f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f)) 439 440 #define GET_CURRENT_ARG_TYPE(List) (List & ((UINT32) 0x1F)) 441 #define INCREMENT_ARG_LIST(List) (List >>= ((UINT32) ARG_TYPE_WIDTH)) 442 443 /* 444 * Ascii error messages can be configured out 445 */ 446 #ifndef ACPI_NO_ERROR_MESSAGES 447 /* 448 * Error reporting. Callers module and line number are inserted by AE_INFO, 449 * the plist contains a set of parens to allow variable-length lists. 450 * These macros are used for both the debug and non-debug versions of the code. 451 */ 452 #define ACPI_ERROR_NAMESPACE(s, e) AcpiUtNamespaceError (AE_INFO, s, e); 453 #define ACPI_ERROR_METHOD(s, n, p, e) AcpiUtMethodError (AE_INFO, s, n, p, e); 454 #define ACPI_WARN_PREDEFINED(plist) AcpiUtPredefinedWarning plist 455 #define ACPI_INFO_PREDEFINED(plist) AcpiUtPredefinedInfo plist 456 #define ACPI_BIOS_ERROR_PREDEFINED(plist) AcpiUtPredefinedBiosError plist 457 458 #else 459 460 /* No error messages */ 461 462 #define ACPI_ERROR_NAMESPACE(s, e) 463 #define ACPI_ERROR_METHOD(s, n, p, e) 464 #define ACPI_WARN_PREDEFINED(plist) 465 #define ACPI_INFO_PREDEFINED(plist) 466 #define ACPI_BIOS_ERROR_PREDEFINED(plist) 467 468 #endif /* ACPI_NO_ERROR_MESSAGES */ 469 470 #if (!ACPI_REDUCED_HARDWARE) 471 #define ACPI_HW_OPTIONAL_FUNCTION(addr) addr 472 #else 473 #define ACPI_HW_OPTIONAL_FUNCTION(addr) NULL 474 #endif 475 476 477 /* 478 * Some code only gets executed when the debugger is built in. 479 * Note that this is entirely independent of whether the 480 * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not. 481 */ 482 #ifdef ACPI_DEBUGGER 483 #define ACPI_DEBUGGER_EXEC(a) a 484 #else 485 #define ACPI_DEBUGGER_EXEC(a) 486 #endif 487 488 489 /* 490 * Memory allocation tracking (DEBUG ONLY) 491 */ 492 #define ACPI_MEM_PARAMETERS _COMPONENT, _AcpiModuleName, __LINE__ 493 494 #ifndef ACPI_DBG_TRACK_ALLOCATIONS 495 496 /* Memory allocation */ 497 498 #define ACPI_ALLOCATE(a) AcpiUtAllocate((ACPI_SIZE) (a), ACPI_MEM_PARAMETERS) 499 #define ACPI_ALLOCATE_ZEROED(a) AcpiUtAllocateZeroed((ACPI_SIZE) (a), ACPI_MEM_PARAMETERS) 500 #define ACPI_FREE(a) AcpiOsFree(a) 501 #define ACPI_MEM_TRACKING(a) 502 503 #else 504 505 /* Memory allocation */ 506 507 #define ACPI_ALLOCATE(a) AcpiUtAllocateAndTrack((ACPI_SIZE) (a), ACPI_MEM_PARAMETERS) 508 #define ACPI_ALLOCATE_ZEROED(a) AcpiUtAllocateZeroedAndTrack((ACPI_SIZE) (a), ACPI_MEM_PARAMETERS) 509 #define ACPI_FREE(a) AcpiUtFreeAndTrack(a, ACPI_MEM_PARAMETERS) 510 #define ACPI_MEM_TRACKING(a) a 511 512 #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ 513 514 515 /* 516 * Macros used for ACPICA utilities only 517 */ 518 519 /* Generate a UUID */ 520 521 #define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ 522 (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \ 523 (b) & 0xFF, ((b) >> 8) & 0xFF, \ 524 (c) & 0xFF, ((c) >> 8) & 0xFF, \ 525 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) 526 527 #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) 528 529 530 #endif /* ACMACROS_H */ 531