Skip to content

Commit

Permalink
more logic tweaks
Browse files Browse the repository at this point in the history
  • Loading branch information
diceroll123 committed Jan 14, 2024
1 parent b574c93 commit 260ad1d
Show file tree
Hide file tree
Showing 4 changed files with 139 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,21 @@
# Errors.
###
incorrect_set = {"value1", 23, 5, "value1"}
incorrect_set = {1, 1, 2, 1}
incorrect_set = {1, 1, 2}
incorrect_set_multiline = {
"value1",
23,
5,
"value1",
# B033
}
incorrect_set = {1, 1}
incorrect_set = {1, 1,}

###
# Non-errors.
###
correct_set = {"value1", 23, 5}
correct_set = {5, "5"}
correct_set = {5}
correct_set = {}
4 changes: 2 additions & 2 deletions crates/ruff_linter/src/checkers/ast/analyze/expression.rs
Original file line number Diff line number Diff line change
Expand Up @@ -980,9 +980,9 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
flake8_pie::rules::unnecessary_spread(checker, dict);
}
}
Expr::Set(ast::ExprSet { elts, range }) => {
Expr::Set(_) => {
if checker.enabled(Rule::DuplicateValue) {
flake8_bugbear::rules::duplicate_value(checker, elts, *range);
flake8_bugbear::rules::duplicate_value(checker, expr);
}
}
Expr::Yield(_) => {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
use anyhow::{Context, Result};

use ruff_python_ast::{Expr, ExprSet};
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use rustc_hash::FxHashSet;

use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_text_size::{Ranged, TextRange};
use ruff_text_size::Ranged;

use crate::checkers::ast::Checker;

Expand Down Expand Up @@ -44,7 +47,11 @@ impl AlwaysFixableViolation for DuplicateValue {
}

/// B033
pub(crate) fn duplicate_value(checker: &mut Checker, elts: &[Expr], range: TextRange) {
pub(crate) fn duplicate_value(checker: &mut Checker, expr: &Expr) {
let Expr::Set(ExprSet { elts, .. }) = expr else {
return;
};

let mut seen_values: FxHashSet<ComparableExpr> = FxHashSet::default();
let mut duplicate_indices: Vec<usize> = Vec::new();
let mut unique_indices: Vec<usize> = Vec::new();
Expand All @@ -63,32 +70,62 @@ pub(crate) fn duplicate_value(checker: &mut Checker, elts: &[Expr], range: TextR
}
}

if duplicate_indices.is_empty() {
return;
}

let fix = Fix::safe_edit(Edit::range_replacement(
checker.generator().expr(&Expr::Set(ExprSet {
elts: unique_indices
.into_iter()
.map(|index| elts[index].clone())
.collect(),
range: TextRange::default(),
})),
range,
));

for index in duplicate_indices {
let elt = &elts[index];

let mut diagnostic = Diagnostic::new(
DuplicateValue {
value: checker.generator().expr(elt),
},
elt.range(),
);

diagnostic.set_fix(fix.clone());
diagnostic.try_set_fix(|| {
remove_member(elt, elts, checker.locator().contents()).map(Fix::safe_edit)
});

checker.diagnostics.push(diagnostic);
}
}

fn remove_member(expr: &Expr, elts: &[Expr], source: &str) -> Result<Edit> {
let (before, after): (Vec<_>, Vec<_>) = elts
.iter()
.map(Ranged::range)
.filter(|range| expr.range() != *range)
.partition(|range| range.start() < expr.start());

if !after.is_empty() {
// Case 1: expr is _not_ the last node, so delete from the start of the
// expr to the end of the subsequent comma.
let mut tokenizer = SimpleTokenizer::starts_at(expr.end(), source);

// Find the trailing comma.
tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;

// Find the next non-whitespace token.
let next = tokenizer
.find(|token| {
token.kind != SimpleTokenKind::Whitespace && token.kind != SimpleTokenKind::Newline
})
.context("Unable to find next token")?;

Ok(Edit::deletion(expr.start(), next.start()))
} else if let Some(previous) = before.iter().map(Ranged::end).max() {
// Case 2: expr is the last node, so delete from the start of the
// previous comma to the end of the expr.
let mut tokenizer = SimpleTokenizer::starts_at(previous, source);

// Find the trailing comma.
let comma = tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;

Ok(Edit::deletion(comma.start(), expr.end()))
} else {
// Case 3: expr is the only node, so delete it
Ok(Edit::range_deletion(expr.range()))
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ B033.py:4:35: B033 [*] Sets should not contain duplicate item `"value1"`
3 | ###
4 | incorrect_set = {"value1", 23, 5, "value1"}
| ^^^^^^^^ B033
5 | incorrect_set = {1, 1, 2, 1}
5 | incorrect_set = {1, 1, 2}
6 | incorrect_set_multiline = {
|
= help: Remove duplicate item `"value1"`

Expand All @@ -17,50 +18,90 @@ B033.py:4:35: B033 [*] Sets should not contain duplicate item `"value1"`
3 3 | ###
4 |-incorrect_set = {"value1", 23, 5, "value1"}
4 |+incorrect_set = {"value1", 23, 5}
5 5 | incorrect_set = {1, 1, 2, 1}
6 6 |
7 7 | ###
5 5 | incorrect_set = {1, 1, 2}
6 6 | incorrect_set_multiline = {
7 7 | "value1",

B033.py:5:21: B033 [*] Sets should not contain duplicate item `1`
|
3 | ###
4 | incorrect_set = {"value1", 23, 5, "value1"}
5 | incorrect_set = {1, 1, 2, 1}
5 | incorrect_set = {1, 1, 2}
| ^ B033
6 |
7 | ###
6 | incorrect_set_multiline = {
7 | "value1",
|
= help: Remove duplicate item `1`

Safe fix
2 2 | # Errors.
3 3 | ###
4 4 | incorrect_set = {"value1", 23, 5, "value1"}
5 |-incorrect_set = {1, 1, 2, 1}
5 |-incorrect_set = {1, 1, 2}
5 |+incorrect_set = {1, 2}
6 6 |
7 7 | ###
8 8 | # Non-errors.
6 6 | incorrect_set_multiline = {
7 7 | "value1",
8 8 | 23,

B033.py:5:27: B033 [*] Sets should not contain duplicate item `1`
|
3 | ###
4 | incorrect_set = {"value1", 23, 5, "value1"}
5 | incorrect_set = {1, 1, 2, 1}
| ^ B033
6 |
7 | ###
|
= help: Remove duplicate item `1`
B033.py:10:5: B033 [*] Sets should not contain duplicate item `"value1"`
|
8 | 23,
9 | 5,
10 | "value1",
| ^^^^^^^^ B033
11 | # B033
12 | }
|
= help: Remove duplicate item `"value1"`

Safe fix
2 2 | # Errors.
3 3 | ###
4 4 | incorrect_set = {"value1", 23, 5, "value1"}
5 |-incorrect_set = {1, 1, 2, 1}
5 |+incorrect_set = {1, 2}
6 6 |
7 7 | ###
8 8 | # Non-errors.
7 7 | "value1",
8 8 | 23,
9 9 | 5,
10 |- "value1",
11 10 | # B033
12 11 | }
13 12 | incorrect_set = {1, 1}

B033.py:13:21: B033 [*] Sets should not contain duplicate item `1`
|
11 | # B033
12 | }
13 | incorrect_set = {1, 1}
| ^ B033
14 | incorrect_set = {1, 1,}
|
= help: Remove duplicate item `1`

Safe fix
10 10 | "value1",
11 11 | # B033
12 12 | }
13 |-incorrect_set = {1, 1}
13 |+incorrect_set = {1}
14 14 | incorrect_set = {1, 1,}
15 15 |
16 16 | ###

B033.py:14:21: B033 [*] Sets should not contain duplicate item `1`
|
12 | }
13 | incorrect_set = {1, 1}
14 | incorrect_set = {1, 1,}
| ^ B033
15 |
16 | ###
|
= help: Remove duplicate item `1`

Safe fix
11 11 | # B033
12 12 | }
13 13 | incorrect_set = {1, 1}
14 |-incorrect_set = {1, 1,}
14 |+incorrect_set = {1,}
15 15 |
16 16 | ###
17 17 | # Non-errors.


0 comments on commit 260ad1d

Please sign in to comment.