Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion dtest_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,14 @@ def retry_till_success(fun, *args, **kwargs):
def default_ignore_log_patterns():
# to allow tests to append to the list, make sure to create a new list as the output
# to this function, else multiple tests could corrupt the default set
return ['.*\[epollEventLoopGroup-.*\].*- Unknown exception in client networking.*: Connection reset by peer']
return ['failed: Connection reset by peer',
r'Invalid or unsupported protocol version \(5\)',
# See python-driver, SHA a7295e103023e12152fc0940906071b18356def3
# cassandra/__init__.py
r'Invalid or unsupported protocol version \(65\)', # DSE_V1
r'Invalid or unsupported protocol version \(66\)', # DSE_V2
'Beta version of the protocol used',
]


class DTestSetup(object):
Expand Down
3 changes: 3 additions & 0 deletions hintedhandoff_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,9 @@ class TestHintedHandoff(Tester):
@ported_to_in_jvm('4.0')
@pytest.mark.no_vnodes
def test_hintedhandoff_decom(self):
self.fixture_dtest_setup.ignore_log_patterns = [
'Could not update repaired ranges.*Giving up'
]
self.cluster.populate(4).start()
[node1, node2, node3, node4] = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
Expand Down
2 changes: 2 additions & 0 deletions materialized_views_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,13 +342,15 @@ def test_populate_mv_after_insert_wide_rows(self):
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)

session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.cluster.control_connection.wait_for_schema_agreement()

for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))

session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()

logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
Expand Down
14 changes: 14 additions & 0 deletions repair_tests/deprecated_repair_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,13 @@ def test_force_repair_range_async_1(self):
Collection<String> hosts, boolean fullRepair,
String... columnFamilies)
"""
# when giving token ranges, there needs to be logic to make sure we have partitions for
# those tokens... which self._deprecated_repair_jmx does not do... for this reason most
# runs will not actually trigger repair and will abort (we check logging, which will happen
# still).
self.fixture_dtest_setup.ignore_log_patterns = [
'Nothing to repair for'
]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you provide a bit of info when we see this case and why it is ok to ignore it, please?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the log we generate when we skip repair due to no data (or no tables, etc.); this test does not write data, so skipping repair is the expected behavior

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added docs explaining why; these tests do repairs for a token range, but doesn't put in effort to make sure that there are partitions in that range, so the repair aborts.

The test is checking logging, which is done after we parse arguments and print them... if repair doesn't happen these tests don't really care.

opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,boolean,java.util.Collection,java.util.Collection,boolean,[Ljava.lang.String;)",
["0", "1000", "ks", True, ["dc1"], [], False, ["cf"]])
assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
Expand Down Expand Up @@ -122,6 +129,13 @@ def test_force_repair_range_async_3(self):
boolean isLocal, boolean fullRepair,
String... columnFamilies)
"""
# when giving token ranges, there needs to be logic to make sure we have partitions for
# those tokens... which self._deprecated_repair_jmx does not do... for this reason most
# runs will not actually trigger repair and will abort (we check logging, which will happen
# still).
self.fixture_dtest_setup.ignore_log_patterns = [
'Nothing to repair for'
]
opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,boolean,boolean,boolean,[Ljava.lang.String;)",
["0", "1000", "ks", True, True, True, ["cf"]])
assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
Expand Down