/thirdparty/breakpad/third_party/libdisasm/ia32_insn.c

http://github.com/tomahawk-player/tomahawk · C · 625 lines · 398 code · 94 blank · 133 comment · 118 complexity · b554786001c9da548a331033e403d758 MD5 · raw file

  1. #include <stdio.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include "qword.h"
  5. #include "ia32_insn.h"
  6. #include "ia32_opcode_tables.h"
  7. #include "ia32_reg.h"
  8. #include "ia32_operand.h"
  9. #include "ia32_implicit.h"
  10. #include "ia32_settings.h"
  11. #include "libdis.h"
  12. extern ia32_table_desc_t ia32_tables[];
  13. extern ia32_settings_t ia32_settings;
  14. #define IS_SP( op ) (op->type == op_register && \
  15. (op->data.reg.id == REG_ESP_INDEX || \
  16. op->data.reg.alias == REG_ESP_INDEX) )
  17. #define IS_IMM( op ) (op->type == op_immediate )
  18. #ifdef WIN32
  19. # define INLINE
  20. #else
  21. # define INLINE inline
  22. #endif
  23. /* for calculating stack modification based on an operand */
  24. static INLINE int32_t long_from_operand( x86_op_t *op ) {
  25. if (! IS_IMM(op) ) {
  26. return 0L;
  27. }
  28. switch ( op->datatype ) {
  29. case op_byte:
  30. return (int32_t) op->data.sbyte;
  31. case op_word:
  32. return (int32_t) op->data.sword;
  33. case op_qword:
  34. return (int32_t) op->data.sqword;
  35. case op_dword:
  36. return op->data.sdword;
  37. default:
  38. /* these are not used in stack insn */
  39. break;
  40. }
  41. return 0L;
  42. }
  43. /* determine what this insn does to the stack */
  44. static void ia32_stack_mod(x86_insn_t *insn) {
  45. x86_op_t *dest, *src = NULL;
  46. if (! insn || ! insn->operands ) {
  47. return;
  48. }
  49. dest = &insn->operands->op;
  50. if ( dest ) {
  51. src = &insn->operands->next->op;
  52. }
  53. insn->stack_mod = 0;
  54. insn->stack_mod_val = 0;
  55. switch ( insn->type ) {
  56. case insn_call:
  57. case insn_callcc:
  58. insn->stack_mod = 1;
  59. insn->stack_mod_val = insn->addr_size * -1;
  60. break;
  61. case insn_push:
  62. insn->stack_mod = 1;
  63. insn->stack_mod_val = insn->addr_size * -1;
  64. break;
  65. case insn_return:
  66. insn->stack_mod = 1;
  67. insn->stack_mod_val = insn->addr_size;
  68. case insn_int: case insn_intcc:
  69. case insn_iret:
  70. break;
  71. case insn_pop:
  72. insn->stack_mod = 1;
  73. if (! IS_SP( dest ) ) {
  74. insn->stack_mod_val = insn->op_size;
  75. } /* else we don't know the stack change in a pop esp */
  76. break;
  77. case insn_enter:
  78. insn->stack_mod = 1;
  79. insn->stack_mod_val = 0; /* TODO : FIX */
  80. break;
  81. case insn_leave:
  82. insn->stack_mod = 1;
  83. insn->stack_mod_val = 0; /* TODO : FIX */
  84. break;
  85. case insn_pushregs:
  86. insn->stack_mod = 1;
  87. insn->stack_mod_val = 0; /* TODO : FIX */
  88. break;
  89. case insn_popregs:
  90. insn->stack_mod = 1;
  91. insn->stack_mod_val = 0; /* TODO : FIX */
  92. break;
  93. case insn_pushflags:
  94. insn->stack_mod = 1;
  95. insn->stack_mod_val = 0; /* TODO : FIX */
  96. break;
  97. case insn_popflags:
  98. insn->stack_mod = 1;
  99. insn->stack_mod_val = 0; /* TODO : FIX */
  100. break;
  101. case insn_add:
  102. if ( IS_SP( dest ) ) {
  103. insn->stack_mod = 1;
  104. insn->stack_mod_val = long_from_operand( src );
  105. }
  106. break;
  107. case insn_sub:
  108. if ( IS_SP( dest ) ) {
  109. insn->stack_mod = 1;
  110. insn->stack_mod_val = long_from_operand( src );
  111. insn->stack_mod_val *= -1;
  112. }
  113. break;
  114. case insn_inc:
  115. if ( IS_SP( dest ) ) {
  116. insn->stack_mod = 1;
  117. insn->stack_mod_val = 1;
  118. }
  119. break;
  120. case insn_dec:
  121. if ( IS_SP( dest ) ) {
  122. insn->stack_mod = 1;
  123. insn->stack_mod_val = 1;
  124. }
  125. break;
  126. case insn_mov: case insn_movcc:
  127. case insn_xchg: case insn_xchgcc:
  128. case insn_mul: case insn_div:
  129. case insn_shl: case insn_shr:
  130. case insn_rol: case insn_ror:
  131. case insn_and: case insn_or:
  132. case insn_not: case insn_neg:
  133. case insn_xor:
  134. if ( IS_SP( dest ) ) {
  135. insn->stack_mod = 1;
  136. }
  137. break;
  138. default:
  139. break;
  140. }
  141. if (! strcmp("enter", insn->mnemonic) ) {
  142. insn->stack_mod = 1;
  143. } else if (! strcmp("leave", insn->mnemonic) ) {
  144. insn->stack_mod = 1;
  145. }
  146. /* for mov, etc we return 0 -- unknown stack mod */
  147. return;
  148. }
  149. /* get the cpu details for this insn from cpu flags int */
  150. static void ia32_handle_cpu( x86_insn_t *insn, unsigned int cpu ) {
  151. insn->cpu = (enum x86_insn_cpu) CPU_MODEL(cpu);
  152. insn->isa = (enum x86_insn_isa) (ISA_SUBSET(cpu)) >> 16;
  153. return;
  154. }
  155. /* handle mnemonic type and group */
  156. static void ia32_handle_mnemtype(x86_insn_t *insn, unsigned int mnemtype) {
  157. unsigned int type = mnemtype & ~INS_FLAG_MASK;
  158. insn->group = (enum x86_insn_group) (INS_GROUP(type)) >> 12;
  159. insn->type = (enum x86_insn_type) INS_TYPE(type);
  160. return;
  161. }
  162. static void ia32_handle_notes(x86_insn_t *insn, unsigned int notes) {
  163. insn->note = (enum x86_insn_note) notes;
  164. return;
  165. }
  166. static void ia32_handle_eflags( x86_insn_t *insn, unsigned int eflags) {
  167. unsigned int flags;
  168. /* handle flags effected */
  169. flags = INS_FLAGS_TEST(eflags);
  170. /* handle weird OR cases */
  171. /* these are either JLE (ZF | SF<>OF) or JBE (CF | ZF) */
  172. if (flags & INS_TEST_OR) {
  173. flags &= ~INS_TEST_OR;
  174. if ( flags & INS_TEST_ZERO ) {
  175. flags &= ~INS_TEST_ZERO;
  176. if ( flags & INS_TEST_CARRY ) {
  177. flags &= ~INS_TEST_CARRY ;
  178. flags |= (int)insn_carry_or_zero_set;
  179. } else if ( flags & INS_TEST_SFNEOF ) {
  180. flags &= ~INS_TEST_SFNEOF;
  181. flags |= (int)insn_zero_set_or_sign_ne_oflow;
  182. }
  183. }
  184. }
  185. insn->flags_tested = (enum x86_flag_status) flags;
  186. insn->flags_set = (enum x86_flag_status) INS_FLAGS_SET(eflags) >> 16;
  187. return;
  188. }
  189. static void ia32_handle_prefix( x86_insn_t *insn, unsigned int prefixes ) {
  190. insn->prefix = (enum x86_insn_prefix) prefixes & PREFIX_MASK; // >> 20;
  191. if (! (insn->prefix & PREFIX_PRINT_MASK) ) {
  192. /* no printable prefixes */
  193. insn->prefix = insn_no_prefix;
  194. }
  195. /* concat all prefix strings */
  196. if ( (unsigned int)insn->prefix & PREFIX_LOCK ) {
  197. strncat(insn->prefix_string, "lock ", 32 -
  198. strlen(insn->prefix_string));
  199. }
  200. if ( (unsigned int)insn->prefix & PREFIX_REPNZ ) {
  201. strncat(insn->prefix_string, "repnz ", 32 -
  202. strlen(insn->prefix_string));
  203. } else if ( (unsigned int)insn->prefix & PREFIX_REPZ ) {
  204. strncat(insn->prefix_string, "repz ", 32 -
  205. strlen(insn->prefix_string));
  206. }
  207. return;
  208. }
  209. static void reg_32_to_16( x86_op_t *op, x86_insn_t *insn, void *arg ) {
  210. /* if this is a 32-bit register and it is a general register ... */
  211. if ( op->type == op_register && op->data.reg.size == 4 &&
  212. (op->data.reg.type & reg_gen) ) {
  213. /* WORD registers are 8 indices off from DWORD registers */
  214. ia32_handle_register( &(op->data.reg),
  215. op->data.reg.id + 8 );
  216. }
  217. }
  218. static void handle_insn_metadata( x86_insn_t *insn, ia32_insn_t *raw_insn ) {
  219. ia32_handle_mnemtype( insn, raw_insn->mnem_flag );
  220. ia32_handle_notes( insn, raw_insn->notes );
  221. ia32_handle_eflags( insn, raw_insn->flags_effected );
  222. ia32_handle_cpu( insn, raw_insn->cpu );
  223. ia32_stack_mod( insn );
  224. }
  225. static size_t ia32_decode_insn( unsigned char *buf, size_t buf_len,
  226. ia32_insn_t *raw_insn, x86_insn_t *insn,
  227. unsigned int prefixes ) {
  228. size_t size, op_size;
  229. unsigned char modrm;
  230. /* this should never happen, but just in case... */
  231. if ( raw_insn->mnem_flag == INS_INVALID ) {
  232. return 0;
  233. }
  234. if (ia32_settings.options & opt_16_bit) {
  235. insn->op_size = ( prefixes & PREFIX_OP_SIZE ) ? 4 : 2;
  236. insn->addr_size = ( prefixes & PREFIX_ADDR_SIZE ) ? 4 : 2;
  237. } else {
  238. insn->op_size = ( prefixes & PREFIX_OP_SIZE ) ? 2 : 4;
  239. insn->addr_size = ( prefixes & PREFIX_ADDR_SIZE ) ? 2 : 4;
  240. }
  241. /* ++++ 1. Copy mnemonic and mnemonic-flags to CODE struct */
  242. if ((ia32_settings.options & opt_att_mnemonics) && raw_insn->mnemonic_att[0]) {
  243. strncpy( insn->mnemonic, raw_insn->mnemonic_att, 16 );
  244. }
  245. else {
  246. strncpy( insn->mnemonic, raw_insn->mnemonic, 16 );
  247. }
  248. ia32_handle_prefix( insn, prefixes );
  249. handle_insn_metadata( insn, raw_insn );
  250. /* prefetch the next byte in case it is a modr/m byte -- saves
  251. * worrying about whether the 'mod/rm' operand or the 'reg' operand
  252. * occurs first */
  253. modrm = GET_BYTE( buf, buf_len );
  254. /* ++++ 2. Decode Explicit Operands */
  255. /* Intel uses up to 3 explicit operands in its instructions;
  256. * the first is 'dest', the second is 'src', and the third
  257. * is an additional source value (usually an immediate value,
  258. * e.g. in the MUL instructions). These three explicit operands
  259. * are encoded in the opcode tables, even if they are not used
  260. * by the instruction. Additional implicit operands are stored
  261. * in a supplemental table and are handled later. */
  262. op_size = ia32_decode_operand( buf, buf_len, insn, raw_insn->dest,
  263. raw_insn->dest_flag, prefixes, modrm );
  264. /* advance buffer, increase size if necessary */
  265. buf += op_size;
  266. buf_len -= op_size;
  267. size = op_size;
  268. op_size = ia32_decode_operand( buf, buf_len, insn, raw_insn->src,
  269. raw_insn->src_flag, prefixes, modrm );
  270. buf += op_size;
  271. buf_len -= op_size;
  272. size += op_size;
  273. op_size = ia32_decode_operand( buf, buf_len, insn, raw_insn->aux,
  274. raw_insn->aux_flag, prefixes, modrm );
  275. size += op_size;
  276. /* ++++ 3. Decode Implicit Operands */
  277. /* apply implicit operands */
  278. ia32_insn_implicit_ops( insn, raw_insn->implicit_ops );
  279. /* we have one small inelegant hack here, to deal with
  280. * the two prefixes that have implicit operands. If Intel
  281. * adds more, we'll change the algorithm to suit :) */
  282. if ( (prefixes & PREFIX_REPZ) || (prefixes & PREFIX_REPNZ) ) {
  283. ia32_insn_implicit_ops( insn, IDX_IMPLICIT_REP );
  284. }
  285. /* 16-bit hack: foreach operand, if 32-bit reg, make 16-bit reg */
  286. if ( insn->op_size == 2 ) {
  287. x86_operand_foreach( insn, reg_32_to_16, NULL, op_any );
  288. }
  289. return size;
  290. }
  291. /* convenience routine */
  292. #define USES_MOD_RM(flag) \
  293. (flag == ADDRMETH_E || flag == ADDRMETH_M || flag == ADDRMETH_Q || \
  294. flag == ADDRMETH_W || flag == ADDRMETH_R)
  295. static int uses_modrm_flag( unsigned int flag ) {
  296. unsigned int meth;
  297. if ( flag == ARG_NONE ) {
  298. return 0;
  299. }
  300. meth = (flag & ADDRMETH_MASK);
  301. if ( USES_MOD_RM(meth) ) {
  302. return 1;
  303. }
  304. return 0;
  305. }
  306. /* This routine performs the actual byte-by-byte opcode table lookup.
  307. * Originally it was pretty simple: get a byte, adjust it to a proper
  308. * index into the table, then check the table row at that index to
  309. * determine what to do next. But is anything that simple with Intel?
  310. * This is now a huge, convoluted mess, mostly of bitter comments. */
  311. /* buf: pointer to next byte to read from stream
  312. * buf_len: length of buf
  313. * table: index of table to use for lookups
  314. * raw_insn: output pointer that receives opcode definition
  315. * prefixes: output integer that is encoded with prefixes in insn
  316. * returns : number of bytes consumed from stream during lookup */
  317. size_t ia32_table_lookup( unsigned char *buf, size_t buf_len,
  318. unsigned int table, ia32_insn_t **raw_insn,
  319. unsigned int *prefixes ) {
  320. unsigned char *next, op = buf[0]; /* byte value -- 'opcode' */
  321. size_t size = 1, sub_size = 0, next_len;
  322. ia32_table_desc_t *table_desc;
  323. unsigned int subtable, prefix = 0, recurse_table = 0;
  324. table_desc = &ia32_tables[table];
  325. op = GET_BYTE( buf, buf_len );
  326. if ( table_desc->type == tbl_fpu && op > table_desc->maxlim) {
  327. /* one of the fucking FPU tables out of the 00-BH range */
  328. /* OK,. this is a bit of a hack -- the proper way would
  329. * have been to use subtables in the 00-BF FPU opcode tables,
  330. * but that is rather wasteful of space... */
  331. table_desc = &ia32_tables[table +1];
  332. }
  333. /* PERFORM TABLE LOOKUP */
  334. /* ModR/M trick: shift extension bits into lowest bits of byte */
  335. /* Note: non-ModR/M tables have a shift value of 0 */
  336. op >>= table_desc->shift;
  337. /* ModR/M trick: mask out high bits to turn extension into an index */
  338. /* Note: non-ModR/M tables have a mask value of 0xFF */
  339. op &= table_desc->mask;
  340. /* Sparse table trick: check that byte is <= max value */
  341. /* Note: full (256-entry) tables have a maxlim of 155 */
  342. if ( op > table_desc->maxlim ) {
  343. /* this is a partial table, truncated at the tail,
  344. and op is out of range! */
  345. return INVALID_INSN;
  346. }
  347. /* Sparse table trick: check that byte is >= min value */
  348. /* Note: full (256-entry) tables have a minlim of 0 */
  349. if ( table_desc->minlim > op ) {
  350. /* this is a partial table, truncated at the head,
  351. and op is out of range! */
  352. return INVALID_INSN;
  353. }
  354. /* adjust op to be an offset from table index 0 */
  355. op -= table_desc->minlim;
  356. /* Yay! 'op' is now fully adjusted to be an index into 'table' */
  357. *raw_insn = &(table_desc->table[op]);
  358. //printf("BYTE %X TABLE %d OP %X\n", buf[0], table, op );
  359. if ( (*raw_insn)->mnem_flag & INS_FLAG_PREFIX ) {
  360. prefix = (*raw_insn)->mnem_flag & PREFIX_MASK;
  361. }
  362. /* handle escape to a multibyte/coproc/extension/etc table */
  363. /* NOTE: if insn is a prefix and has a subtable, then we
  364. * only recurse if this is the first prefix byte --
  365. * that is, if *prefixes is 0.
  366. * NOTE also that suffix tables are handled later */
  367. subtable = (*raw_insn)->table;
  368. if ( subtable && ia32_tables[subtable].type != tbl_suffix &&
  369. (! prefix || ! *prefixes) ) {
  370. if ( ia32_tables[subtable].type == tbl_ext_ext ||
  371. ia32_tables[subtable].type == tbl_fpu_ext ) {
  372. /* opcode extension: reuse current byte in buffer */
  373. next = buf;
  374. next_len = buf_len;
  375. } else {
  376. /* "normal" opcode: advance to next byte in buffer */
  377. if ( buf_len > 1 ) {
  378. next = &buf[1];
  379. next_len = buf_len - 1;
  380. }
  381. else {
  382. // buffer is truncated
  383. return INVALID_INSN;
  384. }
  385. }
  386. /* we encountered a multibyte opcode: recurse using the
  387. * table specified in the opcode definition */
  388. sub_size = ia32_table_lookup( next, next_len, subtable,
  389. raw_insn, prefixes );
  390. /* SSE/prefix hack: if the original opcode def was a
  391. * prefix that specified a subtable, and the subtable
  392. * lookup returned a valid insn, then we have encountered
  393. * an SSE opcode definition; otherwise, we pretend we
  394. * never did the subtable lookup, and deal with the
  395. * prefix normally later */
  396. if ( prefix && ( sub_size == INVALID_INSN ||
  397. INS_TYPE((*raw_insn)->mnem_flag) == INS_INVALID ) ) {
  398. /* this is a prefix, not an SSE insn :
  399. * lookup next byte in main table,
  400. * subsize will be reset during the
  401. * main table lookup */
  402. recurse_table = 1;
  403. } else {
  404. /* this is either a subtable (two-byte) insn
  405. * or an invalid insn: either way, set prefix
  406. * to NULL and end the opcode lookup */
  407. prefix = 0;
  408. // short-circuit lookup on invalid insn
  409. if (sub_size == INVALID_INSN) return INVALID_INSN;
  410. }
  411. } else if ( prefix ) {
  412. recurse_table = 1;
  413. }
  414. /* by default, we assume that we have the opcode definition,
  415. * and there is no need to recurse on the same table, but
  416. * if we do then a prefix was encountered... */
  417. if ( recurse_table ) {
  418. /* this must have been a prefix: use the same table for
  419. * lookup of the next byte */
  420. sub_size = ia32_table_lookup( &buf[1], buf_len - 1, table,
  421. raw_insn, prefixes );
  422. // short-circuit lookup on invalid insn
  423. if (sub_size == INVALID_INSN) return INVALID_INSN;
  424. /* a bit of a hack for branch hints */
  425. if ( prefix & BRANCH_HINT_MASK ) {
  426. if ( INS_GROUP((*raw_insn)->mnem_flag) == INS_EXEC ) {
  427. /* segment override prefixes are invalid for
  428. * all branch instructions, so delete them */
  429. prefix &= ~PREFIX_REG_MASK;
  430. } else {
  431. prefix &= ~BRANCH_HINT_MASK;
  432. }
  433. }
  434. /* apply prefix to instruction */
  435. /* TODO: implement something enforcing prefix groups */
  436. (*prefixes) |= prefix;
  437. }
  438. /* if this lookup was in a ModR/M table, then an opcode byte is
  439. * NOT consumed: subtract accordingly. NOTE that if none of the
  440. * operands used the ModR/M, then we need to consume the byte
  441. * here, but ONLY in the 'top-level' opcode extension table */
  442. if ( table_desc->type == tbl_ext_ext ) {
  443. /* extensions-to-extensions never consume a byte */
  444. --size;
  445. } else if ( (table_desc->type == tbl_extension ||
  446. table_desc->type == tbl_fpu ||
  447. table_desc->type == tbl_fpu_ext ) &&
  448. /* extensions that have an operand encoded in ModR/M
  449. * never consume a byte */
  450. (uses_modrm_flag((*raw_insn)->dest_flag) ||
  451. uses_modrm_flag((*raw_insn)->src_flag) ) ) {
  452. --size;
  453. }
  454. size += sub_size;
  455. return size;
  456. }
  457. static size_t handle_insn_suffix( unsigned char *buf, size_t buf_len,
  458. ia32_insn_t *raw_insn, x86_insn_t * insn ) {
  459. ia32_table_desc_t *table_desc;
  460. ia32_insn_t *sfx_insn;
  461. size_t size;
  462. unsigned int prefixes = 0;
  463. table_desc = &ia32_tables[raw_insn->table];
  464. size = ia32_table_lookup( buf, buf_len, raw_insn->table, &sfx_insn,
  465. &prefixes );
  466. if (size == INVALID_INSN || sfx_insn->mnem_flag == INS_INVALID ) {
  467. return 0;
  468. }
  469. strncpy( insn->mnemonic, sfx_insn->mnemonic, 16 );
  470. handle_insn_metadata( insn, sfx_insn );
  471. return 1;
  472. }
  473. /* invalid instructions are handled by returning 0 [error] from the
  474. * function, setting the size of the insn to 1 byte, and copying
  475. * the byte at the start of the invalid insn into the x86_insn_t.
  476. * if the caller is saving the x86_insn_t for invalid instructions,
  477. * instead of discarding them, this will maintain a consistent
  478. * address space in the x86_insn_ts */
  479. /* this function is called by the controlling disassembler, so its name and
  480. * calling convention cannot be changed */
  481. /* buf points to the loc of the current opcode (start of the
  482. * instruction) in the instruction stream. The instruction
  483. * stream is assumed to be a buffer of bytes read directly
  484. * from the file for the purpose of disassembly; a mem-mapped
  485. * file is ideal for * this.
  486. * insn points to a code structure to be filled by instr_decode
  487. * returns the size of the decoded instruction in bytes */
  488. size_t ia32_disasm_addr( unsigned char * buf, size_t buf_len,
  489. x86_insn_t *insn ) {
  490. ia32_insn_t *raw_insn = NULL;
  491. unsigned int prefixes = 0;
  492. size_t size, sfx_size;
  493. if ( (ia32_settings.options & opt_ignore_nulls) && buf_len > 3 &&
  494. !buf[0] && !buf[1] && !buf[2] && !buf[3]) {
  495. /* IF IGNORE_NULLS is set AND
  496. * first 4 bytes in the intruction stream are NULL
  497. * THEN return 0 (END_OF_DISASSEMBLY) */
  498. /* TODO: set errno */
  499. MAKE_INVALID( insn, buf );
  500. return 0; /* 4 00 bytes in a row? This isn't code! */
  501. }
  502. /* Perform recursive table lookup starting with main table (0) */
  503. size = ia32_table_lookup(buf, buf_len, idx_Main, &raw_insn, &prefixes);
  504. if ( size == INVALID_INSN || size > buf_len || raw_insn->mnem_flag == INS_INVALID ) {
  505. MAKE_INVALID( insn, buf );
  506. /* TODO: set errno */
  507. return 0;
  508. }
  509. /* We now have the opcode itself figured out: we can decode
  510. * the rest of the instruction. */
  511. size += ia32_decode_insn( &buf[size], buf_len - size, raw_insn, insn,
  512. prefixes );
  513. if ( raw_insn->mnem_flag & INS_FLAG_SUFFIX ) {
  514. /* AMD 3DNow! suffix -- get proper operand type here */
  515. sfx_size = handle_insn_suffix( &buf[size], buf_len - size,
  516. raw_insn, insn );
  517. if (! sfx_size ) {
  518. /* TODO: set errno */
  519. MAKE_INVALID( insn, buf );
  520. return 0;
  521. }
  522. size += sfx_size;
  523. }
  524. if (! size ) {
  525. /* invalid insn */
  526. MAKE_INVALID( insn, buf );
  527. return 0;
  528. }
  529. insn->size = size;
  530. return size; /* return size of instruction in bytes */
  531. }