Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
let op_sp = asm.operands[operand_idx].1;
1use std::collections::hash_map::Entry;23use rustc_ast::*;4use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};5use rustc_errors::msg;6use rustc_hir as hir;7use rustc_hir::def::{DefKind, Res};8use rustc_session::errors::feature_err;9use rustc_span::{Span, sym};10use rustc_target::asm;1112use super::LoweringContext;13use super::errors::{14 AbiSpecifiedMultipleTimes, AttSyntaxOnlyX86, ClobberAbiNotSupported,15 InlineAsmUnsupportedTarget, InvalidAbiClobberAbi, InvalidAsmTemplateModifierConst,16 InvalidAsmTemplateModifierLabel, InvalidAsmTemplateModifierRegClass,17 InvalidAsmTemplateModifierRegClassSub, InvalidAsmTemplateModifierSym, InvalidRegister,18 InvalidRegisterClass, RegisterClassOnlyClobber, RegisterClassOnlyClobberStable,19 RegisterConflict,20};21use crate::{AllowReturnTypeNotation, ImplTraitContext, ImplTraitPosition, ParamMode};2223impl<'hir> LoweringContext<'_, 'hir> {24 pub(crate) fn lower_inline_asm(25 &mut self,26 sp: Span,27 asm: &InlineAsm,28 ) -> &'hir hir::InlineAsm<'hir> {29 // Rustdoc needs to support asm! from foreign architectures: don't try30 // lowering the register constraints in this case.31 let asm_arch =32 if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch };33 if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc {34 self.dcx().emit_err(InlineAsmUnsupportedTarget { span: sp });35 }36 if let Some(asm_arch) = asm_arch {37 // Inline assembly is currently only stable for these architectures.38 // (See also compiletest's `has_asm_support`.)39 let is_stable = matches!(40 asm_arch,41 asm::InlineAsmArch::X8642 | asm::InlineAsmArch::X86_6443 | asm::InlineAsmArch::Arm44 | asm::InlineAsmArch::AArch6445 | asm::InlineAsmArch::Arm64EC46 | asm::InlineAsmArch::RiscV3247 | asm::InlineAsmArch::RiscV6448 | asm::InlineAsmArch::LoongArch3249 | asm::InlineAsmArch::LoongArch6450 | asm::InlineAsmArch::S390x51 | asm::InlineAsmArch::PowerPC52 | asm::InlineAsmArch::PowerPC6453 );54 if !is_stable55 && !self.tcx.features().asm_experimental_arch()56 && sp57 .ctxt()58 .outer_expn_data()59 .allow_internal_unstable60 .filter(|features| features.contains(&sym::asm_experimental_arch))61 .is_none()62 {63 feature_err(64 &self.tcx.sess,65 sym::asm_experimental_arch,66 sp,67 msg!("inline assembly is not stable yet on this architecture"),68 )69 .emit();70 }71 }72 let allow_experimental_reg = self.tcx.features().asm_experimental_reg();73 if asm.options.contains(InlineAsmOptions::ATT_SYNTAX)74 && !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))75 && !self.tcx.sess.opts.actually_rustdoc76 {77 self.dcx().emit_err(AttSyntaxOnlyX86 { span: sp });78 }79 if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind() {80 feature_err(81 &self.tcx.sess,82 sym::asm_unwind,83 sp,84 msg!("the `may_unwind` option is unstable"),85 )86 .emit();87 }8889 let mut clobber_abis = FxIndexMap::default();90 if let Some(asm_arch) = asm_arch {91 for (abi_name, abi_span) in &asm.clobber_abis {92 match asm::InlineAsmClobberAbi::parse(93 asm_arch,94 &self.tcx.sess.target,95 &self.tcx.sess.unstable_target_features,96 *abi_name,97 ) {98 Ok(abi) => {99 // If the abi was already in the list, emit an error100 match clobber_abis.get(&abi) {101 Some((prev_name, prev_sp)) => {102 // Multiple different abi names may actually be the same ABI103 // If the specified ABIs are not the same name, alert the user that they resolve to the same ABI104 let source_map = self.tcx.sess.source_map();105 let equivalent = source_map.span_to_snippet(*prev_sp)106 != source_map.span_to_snippet(*abi_span);107108 self.dcx().emit_err(AbiSpecifiedMultipleTimes {109 abi_span: *abi_span,110 prev_name: *prev_name,111 prev_span: *prev_sp,112 equivalent,113 });114 }115 None => {116 clobber_abis.insert(abi, (*abi_name, *abi_span));117 }118 }119 }120 Err(&[]) => {121 self.dcx().emit_err(ClobberAbiNotSupported { abi_span: *abi_span });122 }123 Err(supported_abis) => {124 self.dcx().emit_err(InvalidAbiClobberAbi {125 abi_span: *abi_span,126 supported_abis: supported_abis.to_vec().into(),127 });128 }129 }130 }131 }132133 // Lower operands to HIR. We use dummy register classes if an error134 // occurs during lowering because we still need to be able to produce a135 // valid HIR.136 let sess = self.tcx.sess;137 let mut operands: Vec<_> = asm138 .operands139 .iter()140 .map(|(op, op_sp)| {141 let lower_reg = |®: &_| match reg {142 InlineAsmRegOrRegClass::Reg(reg) => {143 asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {144 asm::InlineAsmReg::parse(asm_arch, reg).unwrap_or_else(|error| {145 self.dcx().emit_err(InvalidRegister {146 op_span: *op_sp,147 reg,148 error,149 });150 asm::InlineAsmReg::Err151 })152 } else {153 asm::InlineAsmReg::Err154 })155 }156 InlineAsmRegOrRegClass::RegClass(reg_class) => {157 asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {158 asm::InlineAsmRegClass::parse(asm_arch, reg_class).unwrap_or_else(159 |supported_register_classes| {160 self.dcx().emit_err(InvalidRegisterClass {161 op_span: *op_sp,162 reg_class,163 supported_register_classes: supported_register_classes164 .to_vec()165 .into(),166 });167 asm::InlineAsmRegClass::Err168 },169 )170 } else {171 asm::InlineAsmRegClass::Err172 })173 }174 };175176 let op = match op {177 InlineAsmOperand::In { reg, expr } => hir::InlineAsmOperand::In {178 reg: lower_reg(reg),179 expr: self.lower_expr(expr),180 },181 InlineAsmOperand::Out { reg, late, expr } => hir::InlineAsmOperand::Out {182 reg: lower_reg(reg),183 late: *late,184 expr: expr.as_ref().map(|expr| self.lower_expr(expr)),185 },186 InlineAsmOperand::InOut { reg, late, expr } => hir::InlineAsmOperand::InOut {187 reg: lower_reg(reg),188 late: *late,189 expr: self.lower_expr(expr),190 },191 InlineAsmOperand::SplitInOut { reg, late, in_expr, out_expr } => {192 hir::InlineAsmOperand::SplitInOut {193 reg: lower_reg(reg),194 late: *late,195 in_expr: self.lower_expr(in_expr),196 out_expr: out_expr.as_ref().map(|expr| self.lower_expr(expr)),197 }198 }199 InlineAsmOperand::Const { anon_const } => hir::InlineAsmOperand::Const {200 anon_const: self.lower_const_block(anon_const),201 },202 InlineAsmOperand::Sym { sym } => {203 let static_def_id = self204 .get_partial_res(sym.id)205 .and_then(|res| res.full_res())206 .and_then(|res| match res {207 Res::Def(DefKind::Static { .. }, def_id) => Some(def_id),208 _ => None,209 });210211 if let Some(def_id) = static_def_id {212 let path = self.lower_qpath(213 sym.id,214 &sym.qself,215 &sym.path,216 ParamMode::Optional,217 AllowReturnTypeNotation::No,218 ImplTraitContext::Disallowed(ImplTraitPosition::Path),219 None,220 );221 hir::InlineAsmOperand::SymStatic { path, def_id }222 } else {223 // Replace the InlineAsmSym AST node with an224 // Expr using the name node id.225 let expr = Expr {226 id: sym.id,227 kind: ExprKind::Path(sym.qself.clone(), sym.path.clone()),228 span: *op_sp,229 attrs: AttrVec::new(),230 tokens: None,231 };232233 hir::InlineAsmOperand::SymFn { expr: self.lower_expr(&expr) }234 }235 }236 InlineAsmOperand::Label { block } => {237 hir::InlineAsmOperand::Label { block: self.lower_block(block, false) }238 }239 };240 (op, self.lower_span(*op_sp))241 })242 .collect();243244 // Validate template modifiers against the register classes for the operands245 for p in &asm.template {246 if let InlineAsmTemplatePiece::Placeholder {247 operand_idx,248 modifier: Some(modifier),249 span: placeholder_span,250 } = *p251 {252 let op_sp = asm.operands[operand_idx].1;253 match &operands[operand_idx].0 {254 hir::InlineAsmOperand::In { reg, .. }255 | hir::InlineAsmOperand::Out { reg, .. }256 | hir::InlineAsmOperand::InOut { reg, .. }257 | hir::InlineAsmOperand::SplitInOut { reg, .. } => {258 let class = reg.reg_class();259 if class == asm::InlineAsmRegClass::Err {260 continue;261 }262 let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());263 if !valid_modifiers.contains(&modifier) {264 let sub = if valid_modifiers.is_empty() {265 InvalidAsmTemplateModifierRegClassSub::DoesNotSupportModifier {266 class_name: class.name(),267 }268 } else {269 InvalidAsmTemplateModifierRegClassSub::SupportModifier {270 class_name: class.name(),271 modifiers: valid_modifiers.to_vec().into(),272 }273 };274 self.dcx().emit_err(InvalidAsmTemplateModifierRegClass {275 placeholder_span,276 op_span: op_sp,277 modifier: modifier.to_string(),278 sub,279 });280 }281 }282 hir::InlineAsmOperand::Const { .. } => {283 self.dcx().emit_err(InvalidAsmTemplateModifierConst {284 placeholder_span,285 op_span: op_sp,286 });287 }288 hir::InlineAsmOperand::SymFn { .. }289 | hir::InlineAsmOperand::SymStatic { .. } => {290 self.dcx().emit_err(InvalidAsmTemplateModifierSym {291 placeholder_span,292 op_span: op_sp,293 });294 }295 hir::InlineAsmOperand::Label { .. } => {296 self.dcx().emit_err(InvalidAsmTemplateModifierLabel {297 placeholder_span,298 op_span: op_sp,299 });300 }301 }302 }303 }304305 let mut used_input_regs = FxHashMap::default();306 let mut used_output_regs = FxHashMap::default();307308 for (idx, &(ref op, op_sp)) in operands.iter().enumerate() {309 if let Some(reg) = op.reg() {310 let reg_class = reg.reg_class();311 if reg_class == asm::InlineAsmRegClass::Err {312 continue;313 }314315 // Some register classes can only be used as clobbers. This316 // means that we disallow passing a value in/out of the asm and317 // require that the operand name an explicit register, not a318 // register class.319 if reg_class.is_clobber_only(asm_arch.unwrap(), allow_experimental_reg)320 && !op.is_clobber()321 {322 if allow_experimental_reg || reg_class.is_clobber_only(asm_arch.unwrap(), true)323 {324 // always clobber-only325 self.dcx().emit_err(RegisterClassOnlyClobber {326 op_span: op_sp,327 reg_class_name: reg_class.name(),328 });329 } else {330 // clobber-only in stable331 self.tcx332 .sess333 .create_feature_err(334 RegisterClassOnlyClobberStable {335 op_span: op_sp,336 reg_class_name: reg_class.name(),337 },338 sym::asm_experimental_reg,339 )340 .emit();341 }342 continue;343 }344345 // Check for conflicts between explicit register operands.346 if let asm::InlineAsmRegOrRegClass::Reg(reg) = reg {347 let (input, output) = match op {348 hir::InlineAsmOperand::In { .. } => (true, false),349350 // Late output do not conflict with inputs, but normal outputs do351 hir::InlineAsmOperand::Out { late, .. } => (!late, true),352353 hir::InlineAsmOperand::InOut { .. }354 | hir::InlineAsmOperand::SplitInOut { .. } => (true, true),355356 hir::InlineAsmOperand::Const { .. }357 | hir::InlineAsmOperand::SymFn { .. }358 | hir::InlineAsmOperand::SymStatic { .. }359 | hir::InlineAsmOperand::Label { .. } => {360 unreachable!("{op:?} is not a register operand");361 }362 };363364 // Flag to output the error only once per operand365 let mut skip = false;366367 let mut check = |used_regs: &mut FxHashMap<asm::InlineAsmReg, usize>,368 input,369 r: asm::InlineAsmReg| {370 match used_regs.entry(r) {371 Entry::Occupied(o) => {372 if skip {373 return;374 }375 skip = true;376377 let idx2 = *o.get();378 let (ref op2, op_sp2) = operands[idx2];379380 let in_out = match (op, op2) {381 (382 hir::InlineAsmOperand::In { .. },383 hir::InlineAsmOperand::Out { late, .. },384 )385 | (386 hir::InlineAsmOperand::Out { late, .. },387 hir::InlineAsmOperand::In { .. },388 ) => {389 assert!(!*late);390 let out_op_sp = if input { op_sp2 } else { op_sp };391 Some(out_op_sp)392 }393 _ => None,394 };395 let reg_str = |idx| -> &str {396 // HIR asm doesn't preserve the original alias string of the explicit register,397 // so we have to retrieve it from AST398 let (op, _): &(InlineAsmOperand, Span) = &asm.operands[idx];399 if let Some(ast::InlineAsmRegOrRegClass::Reg(reg_sym)) =400 op.reg()401 {402 reg_sym.as_str()403 } else {404 unreachable!("{op:?} is not a register operand");405 }406 };407408 self.dcx().emit_err(RegisterConflict {409 op_span1: op_sp,410 op_span2: op_sp2,411 reg1_name: reg_str(idx),412 reg2_name: reg_str(idx2),413 in_out,414 });415 }416 Entry::Vacant(v) => {417 if r == reg {418 v.insert(idx);419 }420 }421 }422 };423 let mut overlapping_with = vec![];424 reg.overlapping_regs(|r| {425 overlapping_with.push(r);426 });427 for r in overlapping_with {428 if input {429 check(&mut used_input_regs, true, r);430 }431 if output {432 check(&mut used_output_regs, false, r);433 }434 }435 }436 }437 }438439 // If a clobber_abi is specified, add the necessary clobbers to the440 // operands list.441 let mut clobbered = FxHashSet::default();442 for (abi, (_, abi_span)) in clobber_abis {443 for &clobber in abi.clobbered_regs() {444 // Don't emit a clobber for a register already clobbered445 if clobbered.contains(&clobber) {446 continue;447 }448449 let mut overlapping_with = vec![];450 clobber.overlapping_regs(|reg| {451 overlapping_with.push(reg);452 });453 let output_used =454 overlapping_with.iter().any(|reg| used_output_regs.contains_key(®));455456 if !output_used {457 operands.push((458 hir::InlineAsmOperand::Out {459 reg: asm::InlineAsmRegOrRegClass::Reg(clobber),460 late: true,461 expr: None,462 },463 self.lower_span(abi_span),464 ));465 clobbered.insert(clobber);466 }467 }468 }469470 // Feature gate checking for `asm_goto_with_outputs`.471 if let Some((_, op_sp)) =472 operands.iter().find(|(op, _)| matches!(op, hir::InlineAsmOperand::Label { .. }))473 {474 // Check if an output operand is used.475 let output_operand_used = operands.iter().any(|(op, _)| {476 matches!(477 op,478 hir::InlineAsmOperand::Out { expr: Some(_), .. }479 | hir::InlineAsmOperand::InOut { .. }480 | hir::InlineAsmOperand::SplitInOut { out_expr: Some(_), .. }481 )482 });483 if output_operand_used && !self.tcx.features().asm_goto_with_outputs() {484 feature_err(485 sess,486 sym::asm_goto_with_outputs,487 *op_sp,488 msg!("using both label and output operands for inline assembly is unstable"),489 )490 .emit();491 }492 }493494 let operands = self.arena.alloc_from_iter(operands);495 let template = self.arena.alloc_from_iter(asm.template.iter().cloned());496 let template_strs = self.arena.alloc_from_iter(497 asm.template_strs498 .iter()499 .map(|(sym, snippet, span)| (*sym, *snippet, self.lower_span(*span))),500 );501 let line_spans =502 self.arena.alloc_from_iter(asm.line_spans.iter().map(|span| self.lower_span(*span)));503 let hir_asm = hir::InlineAsm {504 asm_macro: asm.asm_macro,505 template,506 template_strs,507 operands,508 options: asm.options,509 line_spans,510 };511 self.arena.alloc(hir_asm)512 }513}
Same data, no extra tab — call code_get_file + code_get_findings over MCP from Claude/Cursor/Copilot.