/kern_2.6.32/arch/mips/include/asm/bitops.h

http://omnia2droid.googlecode.com/ · C++ Header · 710 lines · 526 code · 69 blank · 115 comment · 72 complexity · 72ce36c67771146f0031cc99c344f3e3 MD5 · raw file

  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #ifndef _LINUX_BITOPS_H
  12. #error only <linux/bitops.h> can be included directly
  13. #endif
  14. #include <linux/compiler.h>
  15. #include <linux/irqflags.h>
  16. #include <linux/types.h>
  17. #include <asm/barrier.h>
  18. #include <asm/bug.h>
  19. #include <asm/byteorder.h> /* sigh ... */
  20. #include <asm/cpu-features.h>
  21. #include <asm/sgidefs.h>
  22. #include <asm/war.h>
  23. #if _MIPS_SZLONG == 32
  24. #define SZLONG_LOG 5
  25. #define SZLONG_MASK 31UL
  26. #define __LL "ll "
  27. #define __SC "sc "
  28. #define __INS "ins "
  29. #define __EXT "ext "
  30. #elif _MIPS_SZLONG == 64
  31. #define SZLONG_LOG 6
  32. #define SZLONG_MASK 63UL
  33. #define __LL "lld "
  34. #define __SC "scd "
  35. #define __INS "dins "
  36. #define __EXT "dext "
  37. #endif
  38. /*
  39. * clear_bit() doesn't provide any barrier for the compiler.
  40. */
  41. #define smp_mb__before_clear_bit() smp_llsc_mb()
  42. #define smp_mb__after_clear_bit() smp_llsc_mb()
  43. /*
  44. * set_bit - Atomically set a bit in memory
  45. * @nr: the bit to set
  46. * @addr: the address to start counting from
  47. *
  48. * This function is atomic and may not be reordered. See __set_bit()
  49. * if you do not require the atomic guarantees.
  50. * Note that @nr may be almost arbitrarily large; this function is not
  51. * restricted to acting on a single-word quantity.
  52. */
  53. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  54. {
  55. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  56. unsigned short bit = nr & SZLONG_MASK;
  57. unsigned long temp;
  58. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  59. __asm__ __volatile__(
  60. " .set mips3 \n"
  61. "1: " __LL "%0, %1 # set_bit \n"
  62. " or %0, %2 \n"
  63. " " __SC "%0, %1 \n"
  64. " beqzl %0, 1b \n"
  65. " .set mips0 \n"
  66. : "=&r" (temp), "=m" (*m)
  67. : "ir" (1UL << bit), "m" (*m));
  68. #ifdef CONFIG_CPU_MIPSR2
  69. } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
  70. __asm__ __volatile__(
  71. "1: " __LL "%0, %1 # set_bit \n"
  72. " " __INS "%0, %4, %2, 1 \n"
  73. " " __SC "%0, %1 \n"
  74. " beqz %0, 2f \n"
  75. " .subsection 2 \n"
  76. "2: b 1b \n"
  77. " .previous \n"
  78. : "=&r" (temp), "=m" (*m)
  79. : "ir" (bit), "m" (*m), "r" (~0));
  80. #endif /* CONFIG_CPU_MIPSR2 */
  81. } else if (kernel_uses_llsc) {
  82. __asm__ __volatile__(
  83. " .set mips3 \n"
  84. "1: " __LL "%0, %1 # set_bit \n"
  85. " or %0, %2 \n"
  86. " " __SC "%0, %1 \n"
  87. " beqz %0, 2f \n"
  88. " .subsection 2 \n"
  89. "2: b 1b \n"
  90. " .previous \n"
  91. " .set mips0 \n"
  92. : "=&r" (temp), "=m" (*m)
  93. : "ir" (1UL << bit), "m" (*m));
  94. } else {
  95. volatile unsigned long *a = addr;
  96. unsigned long mask;
  97. unsigned long flags;
  98. a += nr >> SZLONG_LOG;
  99. mask = 1UL << bit;
  100. raw_local_irq_save(flags);
  101. *a |= mask;
  102. raw_local_irq_restore(flags);
  103. }
  104. }
  105. /*
  106. * clear_bit - Clears a bit in memory
  107. * @nr: Bit to clear
  108. * @addr: Address to start counting from
  109. *
  110. * clear_bit() is atomic and may not be reordered. However, it does
  111. * not contain a memory barrier, so if it is used for locking purposes,
  112. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  113. * in order to ensure changes are visible on other processors.
  114. */
  115. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  116. {
  117. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  118. unsigned short bit = nr & SZLONG_MASK;
  119. unsigned long temp;
  120. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  121. __asm__ __volatile__(
  122. " .set mips3 \n"
  123. "1: " __LL "%0, %1 # clear_bit \n"
  124. " and %0, %2 \n"
  125. " " __SC "%0, %1 \n"
  126. " beqzl %0, 1b \n"
  127. " .set mips0 \n"
  128. : "=&r" (temp), "=m" (*m)
  129. : "ir" (~(1UL << bit)), "m" (*m));
  130. #ifdef CONFIG_CPU_MIPSR2
  131. } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
  132. __asm__ __volatile__(
  133. "1: " __LL "%0, %1 # clear_bit \n"
  134. " " __INS "%0, $0, %2, 1 \n"
  135. " " __SC "%0, %1 \n"
  136. " beqz %0, 2f \n"
  137. " .subsection 2 \n"
  138. "2: b 1b \n"
  139. " .previous \n"
  140. : "=&r" (temp), "=m" (*m)
  141. : "ir" (bit), "m" (*m));
  142. #endif /* CONFIG_CPU_MIPSR2 */
  143. } else if (kernel_uses_llsc) {
  144. __asm__ __volatile__(
  145. " .set mips3 \n"
  146. "1: " __LL "%0, %1 # clear_bit \n"
  147. " and %0, %2 \n"
  148. " " __SC "%0, %1 \n"
  149. " beqz %0, 2f \n"
  150. " .subsection 2 \n"
  151. "2: b 1b \n"
  152. " .previous \n"
  153. " .set mips0 \n"
  154. : "=&r" (temp), "=m" (*m)
  155. : "ir" (~(1UL << bit)), "m" (*m));
  156. } else {
  157. volatile unsigned long *a = addr;
  158. unsigned long mask;
  159. unsigned long flags;
  160. a += nr >> SZLONG_LOG;
  161. mask = 1UL << bit;
  162. raw_local_irq_save(flags);
  163. *a &= ~mask;
  164. raw_local_irq_restore(flags);
  165. }
  166. }
  167. /*
  168. * clear_bit_unlock - Clears a bit in memory
  169. * @nr: Bit to clear
  170. * @addr: Address to start counting from
  171. *
  172. * clear_bit() is atomic and implies release semantics before the memory
  173. * operation. It can be used for an unlock.
  174. */
  175. static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  176. {
  177. smp_mb__before_clear_bit();
  178. clear_bit(nr, addr);
  179. }
  180. /*
  181. * change_bit - Toggle a bit in memory
  182. * @nr: Bit to change
  183. * @addr: Address to start counting from
  184. *
  185. * change_bit() is atomic and may not be reordered.
  186. * Note that @nr may be almost arbitrarily large; this function is not
  187. * restricted to acting on a single-word quantity.
  188. */
  189. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  190. {
  191. unsigned short bit = nr & SZLONG_MASK;
  192. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  193. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  194. unsigned long temp;
  195. __asm__ __volatile__(
  196. " .set mips3 \n"
  197. "1: " __LL "%0, %1 # change_bit \n"
  198. " xor %0, %2 \n"
  199. " " __SC "%0, %1 \n"
  200. " beqzl %0, 1b \n"
  201. " .set mips0 \n"
  202. : "=&r" (temp), "=m" (*m)
  203. : "ir" (1UL << bit), "m" (*m));
  204. } else if (kernel_uses_llsc) {
  205. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  206. unsigned long temp;
  207. __asm__ __volatile__(
  208. " .set mips3 \n"
  209. "1: " __LL "%0, %1 # change_bit \n"
  210. " xor %0, %2 \n"
  211. " " __SC "%0, %1 \n"
  212. " beqz %0, 2f \n"
  213. " .subsection 2 \n"
  214. "2: b 1b \n"
  215. " .previous \n"
  216. " .set mips0 \n"
  217. : "=&r" (temp), "=m" (*m)
  218. : "ir" (1UL << bit), "m" (*m));
  219. } else {
  220. volatile unsigned long *a = addr;
  221. unsigned long mask;
  222. unsigned long flags;
  223. a += nr >> SZLONG_LOG;
  224. mask = 1UL << bit;
  225. raw_local_irq_save(flags);
  226. *a ^= mask;
  227. raw_local_irq_restore(flags);
  228. }
  229. }
  230. /*
  231. * test_and_set_bit - Set a bit and return its old value
  232. * @nr: Bit to set
  233. * @addr: Address to count from
  234. *
  235. * This operation is atomic and cannot be reordered.
  236. * It also implies a memory barrier.
  237. */
  238. static inline int test_and_set_bit(unsigned long nr,
  239. volatile unsigned long *addr)
  240. {
  241. unsigned short bit = nr & SZLONG_MASK;
  242. unsigned long res;
  243. smp_llsc_mb();
  244. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  245. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  246. unsigned long temp;
  247. __asm__ __volatile__(
  248. " .set mips3 \n"
  249. "1: " __LL "%0, %1 # test_and_set_bit \n"
  250. " or %2, %0, %3 \n"
  251. " " __SC "%2, %1 \n"
  252. " beqzl %2, 1b \n"
  253. " and %2, %0, %3 \n"
  254. " .set mips0 \n"
  255. : "=&r" (temp), "=m" (*m), "=&r" (res)
  256. : "r" (1UL << bit), "m" (*m)
  257. : "memory");
  258. } else if (kernel_uses_llsc) {
  259. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  260. unsigned long temp;
  261. __asm__ __volatile__(
  262. " .set push \n"
  263. " .set noreorder \n"
  264. " .set mips3 \n"
  265. "1: " __LL "%0, %1 # test_and_set_bit \n"
  266. " or %2, %0, %3 \n"
  267. " " __SC "%2, %1 \n"
  268. " beqz %2, 2f \n"
  269. " and %2, %0, %3 \n"
  270. " .subsection 2 \n"
  271. "2: b 1b \n"
  272. " nop \n"
  273. " .previous \n"
  274. " .set pop \n"
  275. : "=&r" (temp), "=m" (*m), "=&r" (res)
  276. : "r" (1UL << bit), "m" (*m)
  277. : "memory");
  278. } else {
  279. volatile unsigned long *a = addr;
  280. unsigned long mask;
  281. unsigned long flags;
  282. a += nr >> SZLONG_LOG;
  283. mask = 1UL << bit;
  284. raw_local_irq_save(flags);
  285. res = (mask & *a);
  286. *a |= mask;
  287. raw_local_irq_restore(flags);
  288. }
  289. smp_llsc_mb();
  290. return res != 0;
  291. }
  292. /*
  293. * test_and_set_bit_lock - Set a bit and return its old value
  294. * @nr: Bit to set
  295. * @addr: Address to count from
  296. *
  297. * This operation is atomic and implies acquire ordering semantics
  298. * after the memory operation.
  299. */
  300. static inline int test_and_set_bit_lock(unsigned long nr,
  301. volatile unsigned long *addr)
  302. {
  303. unsigned short bit = nr & SZLONG_MASK;
  304. unsigned long res;
  305. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  306. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  307. unsigned long temp;
  308. __asm__ __volatile__(
  309. " .set mips3 \n"
  310. "1: " __LL "%0, %1 # test_and_set_bit \n"
  311. " or %2, %0, %3 \n"
  312. " " __SC "%2, %1 \n"
  313. " beqzl %2, 1b \n"
  314. " and %2, %0, %3 \n"
  315. " .set mips0 \n"
  316. : "=&r" (temp), "=m" (*m), "=&r" (res)
  317. : "r" (1UL << bit), "m" (*m)
  318. : "memory");
  319. } else if (kernel_uses_llsc) {
  320. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  321. unsigned long temp;
  322. __asm__ __volatile__(
  323. " .set push \n"
  324. " .set noreorder \n"
  325. " .set mips3 \n"
  326. "1: " __LL "%0, %1 # test_and_set_bit \n"
  327. " or %2, %0, %3 \n"
  328. " " __SC "%2, %1 \n"
  329. " beqz %2, 2f \n"
  330. " and %2, %0, %3 \n"
  331. " .subsection 2 \n"
  332. "2: b 1b \n"
  333. " nop \n"
  334. " .previous \n"
  335. " .set pop \n"
  336. : "=&r" (temp), "=m" (*m), "=&r" (res)
  337. : "r" (1UL << bit), "m" (*m)
  338. : "memory");
  339. } else {
  340. volatile unsigned long *a = addr;
  341. unsigned long mask;
  342. unsigned long flags;
  343. a += nr >> SZLONG_LOG;
  344. mask = 1UL << bit;
  345. raw_local_irq_save(flags);
  346. res = (mask & *a);
  347. *a |= mask;
  348. raw_local_irq_restore(flags);
  349. }
  350. smp_llsc_mb();
  351. return res != 0;
  352. }
  353. /*
  354. * test_and_clear_bit - Clear a bit and return its old value
  355. * @nr: Bit to clear
  356. * @addr: Address to count from
  357. *
  358. * This operation is atomic and cannot be reordered.
  359. * It also implies a memory barrier.
  360. */
  361. static inline int test_and_clear_bit(unsigned long nr,
  362. volatile unsigned long *addr)
  363. {
  364. unsigned short bit = nr & SZLONG_MASK;
  365. unsigned long res;
  366. smp_llsc_mb();
  367. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  368. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  369. unsigned long temp;
  370. __asm__ __volatile__(
  371. " .set mips3 \n"
  372. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  373. " or %2, %0, %3 \n"
  374. " xor %2, %3 \n"
  375. " " __SC "%2, %1 \n"
  376. " beqzl %2, 1b \n"
  377. " and %2, %0, %3 \n"
  378. " .set mips0 \n"
  379. : "=&r" (temp), "=m" (*m), "=&r" (res)
  380. : "r" (1UL << bit), "m" (*m)
  381. : "memory");
  382. #ifdef CONFIG_CPU_MIPSR2
  383. } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
  384. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  385. unsigned long temp;
  386. __asm__ __volatile__(
  387. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  388. " " __EXT "%2, %0, %3, 1 \n"
  389. " " __INS "%0, $0, %3, 1 \n"
  390. " " __SC "%0, %1 \n"
  391. " beqz %0, 2f \n"
  392. " .subsection 2 \n"
  393. "2: b 1b \n"
  394. " .previous \n"
  395. : "=&r" (temp), "=m" (*m), "=&r" (res)
  396. : "ir" (bit), "m" (*m)
  397. : "memory");
  398. #endif
  399. } else if (kernel_uses_llsc) {
  400. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  401. unsigned long temp;
  402. __asm__ __volatile__(
  403. " .set push \n"
  404. " .set noreorder \n"
  405. " .set mips3 \n"
  406. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  407. " or %2, %0, %3 \n"
  408. " xor %2, %3 \n"
  409. " " __SC "%2, %1 \n"
  410. " beqz %2, 2f \n"
  411. " and %2, %0, %3 \n"
  412. " .subsection 2 \n"
  413. "2: b 1b \n"
  414. " nop \n"
  415. " .previous \n"
  416. " .set pop \n"
  417. : "=&r" (temp), "=m" (*m), "=&r" (res)
  418. : "r" (1UL << bit), "m" (*m)
  419. : "memory");
  420. } else {
  421. volatile unsigned long *a = addr;
  422. unsigned long mask;
  423. unsigned long flags;
  424. a += nr >> SZLONG_LOG;
  425. mask = 1UL << bit;
  426. raw_local_irq_save(flags);
  427. res = (mask & *a);
  428. *a &= ~mask;
  429. raw_local_irq_restore(flags);
  430. }
  431. smp_llsc_mb();
  432. return res != 0;
  433. }
  434. /*
  435. * test_and_change_bit - Change a bit and return its old value
  436. * @nr: Bit to change
  437. * @addr: Address to count from
  438. *
  439. * This operation is atomic and cannot be reordered.
  440. * It also implies a memory barrier.
  441. */
  442. static inline int test_and_change_bit(unsigned long nr,
  443. volatile unsigned long *addr)
  444. {
  445. unsigned short bit = nr & SZLONG_MASK;
  446. unsigned long res;
  447. smp_llsc_mb();
  448. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  449. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  450. unsigned long temp;
  451. __asm__ __volatile__(
  452. " .set mips3 \n"
  453. "1: " __LL "%0, %1 # test_and_change_bit \n"
  454. " xor %2, %0, %3 \n"
  455. " " __SC "%2, %1 \n"
  456. " beqzl %2, 1b \n"
  457. " and %2, %0, %3 \n"
  458. " .set mips0 \n"
  459. : "=&r" (temp), "=m" (*m), "=&r" (res)
  460. : "r" (1UL << bit), "m" (*m)
  461. : "memory");
  462. } else if (kernel_uses_llsc) {
  463. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  464. unsigned long temp;
  465. __asm__ __volatile__(
  466. " .set push \n"
  467. " .set noreorder \n"
  468. " .set mips3 \n"
  469. "1: " __LL "%0, %1 # test_and_change_bit \n"
  470. " xor %2, %0, %3 \n"
  471. " " __SC "\t%2, %1 \n"
  472. " beqz %2, 2f \n"
  473. " and %2, %0, %3 \n"
  474. " .subsection 2 \n"
  475. "2: b 1b \n"
  476. " nop \n"
  477. " .previous \n"
  478. " .set pop \n"
  479. : "=&r" (temp), "=m" (*m), "=&r" (res)
  480. : "r" (1UL << bit), "m" (*m)
  481. : "memory");
  482. } else {
  483. volatile unsigned long *a = addr;
  484. unsigned long mask;
  485. unsigned long flags;
  486. a += nr >> SZLONG_LOG;
  487. mask = 1UL << bit;
  488. raw_local_irq_save(flags);
  489. res = (mask & *a);
  490. *a ^= mask;
  491. raw_local_irq_restore(flags);
  492. }
  493. smp_llsc_mb();
  494. return res != 0;
  495. }
  496. #include <asm-generic/bitops/non-atomic.h>
  497. /*
  498. * __clear_bit_unlock - Clears a bit in memory
  499. * @nr: Bit to clear
  500. * @addr: Address to start counting from
  501. *
  502. * __clear_bit() is non-atomic and implies release semantics before the memory
  503. * operation. It can be used for an unlock if no other CPUs can concurrently
  504. * modify other bits in the word.
  505. */
  506. static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  507. {
  508. smp_mb();
  509. __clear_bit(nr, addr);
  510. }
  511. /*
  512. * Return the bit position (0..63) of the most significant 1 bit in a word
  513. * Returns -1 if no 1 bit exists
  514. */
  515. static inline unsigned long __fls(unsigned long word)
  516. {
  517. int num;
  518. if (BITS_PER_LONG == 32 &&
  519. __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
  520. __asm__(
  521. " .set push \n"
  522. " .set mips32 \n"
  523. " clz %0, %1 \n"
  524. " .set pop \n"
  525. : "=r" (num)
  526. : "r" (word));
  527. return 31 - num;
  528. }
  529. if (BITS_PER_LONG == 64 &&
  530. __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
  531. __asm__(
  532. " .set push \n"
  533. " .set mips64 \n"
  534. " dclz %0, %1 \n"
  535. " .set pop \n"
  536. : "=r" (num)
  537. : "r" (word));
  538. return 63 - num;
  539. }
  540. num = BITS_PER_LONG - 1;
  541. #if BITS_PER_LONG == 64
  542. if (!(word & (~0ul << 32))) {
  543. num -= 32;
  544. word <<= 32;
  545. }
  546. #endif
  547. if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
  548. num -= 16;
  549. word <<= 16;
  550. }
  551. if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
  552. num -= 8;
  553. word <<= 8;
  554. }
  555. if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
  556. num -= 4;
  557. word <<= 4;
  558. }
  559. if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
  560. num -= 2;
  561. word <<= 2;
  562. }
  563. if (!(word & (~0ul << (BITS_PER_LONG-1))))
  564. num -= 1;
  565. return num;
  566. }
  567. /*
  568. * __ffs - find first bit in word.
  569. * @word: The word to search
  570. *
  571. * Returns 0..SZLONG-1
  572. * Undefined if no bit exists, so code should check against 0 first.
  573. */
  574. static inline unsigned long __ffs(unsigned long word)
  575. {
  576. return __fls(word & -word);
  577. }
  578. /*
  579. * fls - find last bit set.
  580. * @word: The word to search
  581. *
  582. * This is defined the same way as ffs.
  583. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  584. */
  585. static inline int fls(int x)
  586. {
  587. int r;
  588. if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
  589. __asm__("clz %0, %1" : "=r" (x) : "r" (x));
  590. return 32 - x;
  591. }
  592. r = 32;
  593. if (!x)
  594. return 0;
  595. if (!(x & 0xffff0000u)) {
  596. x <<= 16;
  597. r -= 16;
  598. }
  599. if (!(x & 0xff000000u)) {
  600. x <<= 8;
  601. r -= 8;
  602. }
  603. if (!(x & 0xf0000000u)) {
  604. x <<= 4;
  605. r -= 4;
  606. }
  607. if (!(x & 0xc0000000u)) {
  608. x <<= 2;
  609. r -= 2;
  610. }
  611. if (!(x & 0x80000000u)) {
  612. x <<= 1;
  613. r -= 1;
  614. }
  615. return r;
  616. }
  617. #include <asm-generic/bitops/fls64.h>
  618. /*
  619. * ffs - find first bit set.
  620. * @word: The word to search
  621. *
  622. * This is defined the same way as
  623. * the libc and compiler builtin ffs routines, therefore
  624. * differs in spirit from the above ffz (man ffs).
  625. */
  626. static inline int ffs(int word)
  627. {
  628. if (!word)
  629. return 0;
  630. return fls(word & -word);
  631. }
  632. #include <asm-generic/bitops/ffz.h>
  633. #include <asm-generic/bitops/find.h>
  634. #ifdef __KERNEL__
  635. #include <asm-generic/bitops/sched.h>
  636. #include <asm-generic/bitops/hweight.h>
  637. #include <asm-generic/bitops/ext2-non-atomic.h>
  638. #include <asm-generic/bitops/ext2-atomic.h>
  639. #include <asm-generic/bitops/minix.h>
  640. #endif /* __KERNEL__ */
  641. #endif /* _ASM_BITOPS_H */