Skip to content

Commit e501e1d

Browse files
gfyoungjreback
authored andcommitted
MAINT: Remove assertIn from testing (#16101)
1 parent 12f0762 commit e501e1d

29 files changed

+81
-87
lines changed

pandas/tests/frame/test_alter_axes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def test_set_index_nonuniq(self):
138138
'E': np.random.randn(5)})
139139
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
140140
df.set_index('A', verify_integrity=True, inplace=True)
141-
self.assertIn('A', df)
141+
assert 'A' in df
142142

143143
def test_set_index_bug(self):
144144
# GH1590

pandas/tests/frame/test_axis_select_reindex.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -734,7 +734,7 @@ def test_filter_regex_search(self):
734734
# regex
735735
filtered = fcopy.filter(regex='[A]+')
736736
self.assertEqual(len(filtered.columns), 2)
737-
self.assertIn('AA', filtered)
737+
assert 'AA' in filtered
738738

739739
# doesn't have to be at beginning
740740
df = DataFrame({'aBBa': [1, 2],

pandas/tests/frame/test_convert_to.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -156,16 +156,16 @@ def test_to_records_index_name(self):
156156
df = DataFrame(np.random.randn(3, 3))
157157
df.index.name = 'X'
158158
rs = df.to_records()
159-
self.assertIn('X', rs.dtype.fields)
159+
assert 'X' in rs.dtype.fields
160160

161161
df = DataFrame(np.random.randn(3, 3))
162162
rs = df.to_records()
163-
self.assertIn('index', rs.dtype.fields)
163+
assert 'index' in rs.dtype.fields
164164

165165
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
166166
df.index.names = ['A', None]
167167
rs = df.to_records()
168-
self.assertIn('level_0', rs.dtype.fields)
168+
assert 'level_0' in rs.dtype.fields
169169

170170
def test_to_records_with_unicode_index(self):
171171
# GH13172

pandas/tests/frame/test_indexing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,7 @@ def test_setitem(self):
422422
# not sure what else to do here
423423
series = self.frame['A'][::2]
424424
self.frame['col5'] = series
425-
self.assertIn('col5', self.frame)
425+
assert 'col5' in self.frame
426426

427427
self.assertEqual(len(series), 15)
428428
self.assertEqual(len(self.frame), 30)
@@ -600,7 +600,7 @@ def test_setitem_corner(self):
600600
index=np.arange(3))
601601
del df['B']
602602
df['B'] = [1., 2., 3.]
603-
self.assertIn('B', df)
603+
assert 'B' in df
604604
self.assertEqual(len(df.columns), 2)
605605

606606
df['A'] = 'beginning'

pandas/tests/frame/test_operators.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -831,7 +831,7 @@ def test_combineSeries(self):
831831

832832
for key, s in compat.iteritems(self.frame):
833833
assert_series_equal(larger_added[key], s + series[key])
834-
self.assertIn('E', larger_added)
834+
assert 'E' in larger_added
835835
self.assertTrue(np.isnan(larger_added['E']).all())
836836

837837
# vs mix (upcast) as needed

pandas/tests/frame/test_repr_info.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def test_repr_column_name_unicode_truncation_bug(self):
171171
' the File through the code..')})
172172

173173
result = repr(df)
174-
self.assertIn('StringCol', result)
174+
assert 'StringCol' in result
175175

176176
def test_latex_repr(self):
177177
result = r"""\begin{tabular}{llll}

pandas/tests/frame/test_to_csv.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -909,7 +909,7 @@ def test_to_csv_compression_gzip(self):
909909
text = f.read().decode('utf8')
910910
f.close()
911911
for col in df.columns:
912-
self.assertIn(col, text)
912+
assert col in text
913913

914914
def test_to_csv_compression_bz2(self):
915915
# GH7615
@@ -932,7 +932,7 @@ def test_to_csv_compression_bz2(self):
932932
text = f.read().decode('utf8')
933933
f.close()
934934
for col in df.columns:
935-
self.assertIn(col, text)
935+
assert col in text
936936

937937
def test_to_csv_compression_xz(self):
938938
# GH11852

pandas/tests/groupby/test_groupby.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2483,14 +2483,14 @@ def test_groupby_series_with_name(self):
24832483
result = self.df.groupby(self.df['A']).mean()
24842484
result2 = self.df.groupby(self.df['A'], as_index=False).mean()
24852485
self.assertEqual(result.index.name, 'A')
2486-
self.assertIn('A', result2)
2486+
assert 'A' in result2
24872487

24882488
result = self.df.groupby([self.df['A'], self.df['B']]).mean()
24892489
result2 = self.df.groupby([self.df['A'], self.df['B']],
24902490
as_index=False).mean()
24912491
self.assertEqual(result.index.names, ('A', 'B'))
2492-
self.assertIn('A', result2)
2493-
self.assertIn('B', result2)
2492+
assert 'A' in result2
2493+
assert 'B' in result2
24942494

24952495
def test_seriesgroupby_name_attr(self):
24962496
# GH 6265
@@ -3357,10 +3357,10 @@ def test_groupby_with_small_elem(self):
33573357
'change': [1234, 5678]},
33583358
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10']))
33593359
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
3360-
self.assertEqual(len(grouped.groups), 2)
3361-
self.assertEqual(grouped.ngroups, 2)
3362-
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
3363-
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
3360+
assert len(grouped.groups) == 2
3361+
assert grouped.ngroups == 2
3362+
assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups
3363+
assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups
33643364

33653365
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
33663366
tm.assert_frame_equal(res, df.iloc[[0], :])
@@ -3372,10 +3372,10 @@ def test_groupby_with_small_elem(self):
33723372
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
33733373
'2014-09-15']))
33743374
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
3375-
self.assertEqual(len(grouped.groups), 2)
3376-
self.assertEqual(grouped.ngroups, 2)
3377-
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
3378-
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
3375+
assert len(grouped.groups) == 2
3376+
assert grouped.ngroups == 2
3377+
assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups
3378+
assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups
33793379

33803380
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
33813381
tm.assert_frame_equal(res, df.iloc[[0, 2], :])
@@ -3388,11 +3388,11 @@ def test_groupby_with_small_elem(self):
33883388
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
33893389
'2014-08-05']))
33903390
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
3391-
self.assertEqual(len(grouped.groups), 3)
3392-
self.assertEqual(grouped.ngroups, 3)
3393-
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
3394-
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
3395-
self.assertIn((pd.Timestamp('2014-08-31'), 'start'), grouped.groups)
3391+
assert len(grouped.groups) == 3
3392+
assert grouped.ngroups == 3
3393+
assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups
3394+
assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups
3395+
assert (pd.Timestamp('2014-08-31'), 'start') in grouped.groups
33963396

33973397
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
33983398
tm.assert_frame_equal(res, df.iloc[[0], :])

pandas/tests/indexes/datetimes/test_datetime.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def test_reasonable_keyerror(self):
101101
try:
102102
index.get_loc('1/1/2000')
103103
except KeyError as e:
104-
self.assertIn('2000', str(e))
104+
assert '2000' in str(e)
105105

106106
def test_roundtrip_pickle_with_tz(self):
107107

pandas/tests/indexes/datetimes/test_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ def test_nonunique_contains(self):
632632
for idx in map(DatetimeIndex,
633633
([0, 1, 0], [0, 0, -1], [0, -1, -1],
634634
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
635-
tm.assertIn(idx[0], idx)
635+
assert idx[0] in idx
636636

637637
def test_order(self):
638638
# with freq

pandas/tests/indexes/test_base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -857,10 +857,10 @@ def test_add_string(self):
857857
def test_iadd_string(self):
858858
index = pd.Index(['a', 'b', 'c'])
859859
# doesn't fail test unless there is a check before `+=`
860-
self.assertIn('a', index)
860+
assert 'a' in index
861861

862862
index += '_x'
863-
self.assertIn('a_x', index)
863+
assert 'a_x' in index
864864

865865
def test_difference(self):
866866

@@ -963,8 +963,8 @@ def test_summary(self):
963963
ind = Index(['{other}%s', "~:{range}:0"], name='A')
964964
result = ind.summary()
965965
# shouldn't be formatted accidentally.
966-
self.assertIn('~:{range}:0', result)
967-
self.assertIn('{other}%s', result)
966+
assert '~:{range}:0' in result
967+
assert '{other}%s' in result
968968

969969
def test_format(self):
970970
self._check_method_works(Index.format)

pandas/tests/indexes/test_multi.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1597,8 +1597,8 @@ def test_union(self):
15971597
# other = Index(['A', 'B', 'C'])
15981598

15991599
# result = other.union(self.index)
1600-
# self.assertIn(('foo', 'one'), result)
1601-
# self.assertIn('B', result)
1600+
# assert ('foo', 'one') in result
1601+
# assert 'B' in result
16021602

16031603
# result2 = self.index.union(other)
16041604
# self.assertTrue(result.equals(result2))

pandas/tests/indexes/timedeltas/test_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -561,7 +561,7 @@ def test_nonunique_contains(self):
561561
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
562562
['00:01:00', '00:01:00', '00:02:00'],
563563
['00:01:00', '00:01:00', '00:00:01'])):
564-
tm.assertIn(idx[0], idx)
564+
assert idx[0] in idx
565565

566566
def test_unknown_attribute(self):
567567
# see gh-9680

pandas/tests/indexing/test_chaining_and_caching.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -373,15 +373,15 @@ def test_cache_updating(self):
373373
df['A'] # cache series
374374
with catch_warnings(record=True):
375375
df.ix["Hello Friend"] = df.ix[0]
376-
self.assertIn("Hello Friend", df['A'].index)
377-
self.assertIn("Hello Friend", df['B'].index)
376+
assert "Hello Friend" in df['A'].index
377+
assert "Hello Friend" in df['B'].index
378378

379379
with catch_warnings(record=True):
380380
panel = tm.makePanel()
381381
panel.ix[0] # get first item into cache
382382
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
383-
self.assertIn("A+1", panel.ix[0].columns)
384-
self.assertIn("A+1", panel.ix[1].columns)
383+
assert "A+1" in panel.ix[0].columns
384+
assert "A+1" in panel.ix[1].columns
385385

386386
# 5216
387387
# make sure that we don't try to set a dead cache

pandas/tests/io/formats/test_format.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -959,7 +959,7 @@ def test_wide_repr_named(self):
959959
self.assertTrue(len(wider_repr) < len(wide_repr))
960960

961961
for line in wide_repr.splitlines()[1::13]:
962-
self.assertIn('DataFrame Index', line)
962+
assert 'DataFrame Index' in line
963963

964964
reset_option('display.expand_frame_repr')
965965

@@ -981,7 +981,7 @@ def test_wide_repr_multiindex(self):
981981
self.assertTrue(len(wider_repr) < len(wide_repr))
982982

983983
for line in wide_repr.splitlines()[1::13]:
984-
self.assertIn('Level 0 Level 1', line)
984+
assert 'Level 0 Level 1' in line
985985

986986
reset_option('display.expand_frame_repr')
987987

@@ -1875,9 +1875,9 @@ def test_float_trim_zeros(self):
18751875
if line.startswith('dtype:'):
18761876
continue
18771877
if _three_digit_exp():
1878-
self.assertIn('+010', line)
1878+
assert '+010' in line
18791879
else:
1880-
self.assertIn('+10', line)
1880+
assert '+10' in line
18811881

18821882
def test_datetimeindex(self):
18831883

pandas/tests/io/parser/c_parser_only.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def test_buffer_overflow(self):
3333
try:
3434
self.read_table(StringIO(malf))
3535
except Exception as err:
36-
self.assertIn(cperr, str(err))
36+
assert cperr in str(err)
3737

3838
def test_buffer_rd_bytes(self):
3939
# see gh-12098: src->buffer in the C parser can be freed twice leading

pandas/tests/io/parser/parse_dates.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def test_multiple_date_cols_int_cast(self):
135135
# it works!
136136
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
137137
date_parser=conv.parse_date_time)
138-
self.assertIn('nominal', df)
138+
assert 'nominal' in df
139139

140140
def test_multiple_date_col_timestamp_parse(self):
141141
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
@@ -530,7 +530,7 @@ def test_parse_date_time(self):
530530
df = self.read_csv(StringIO(data), sep=',', header=0,
531531
parse_dates=datecols,
532532
date_parser=conv.parse_date_time)
533-
self.assertIn('date_time', df)
533+
assert 'date_time' in df
534534
self.assertEqual(df.date_time.loc[0], datetime(2001, 1, 5, 10, 0, 0))
535535

536536
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
@@ -558,7 +558,7 @@ def test_parse_date_fields(self):
558558
df = self.read_csv(StringIO(data), sep=',', header=0,
559559
parse_dates=datecols,
560560
date_parser=conv.parse_date_fields)
561-
self.assertIn('ymd', df)
561+
assert 'ymd' in df
562562
self.assertEqual(df.ymd.loc[0], datetime(2001, 1, 10))
563563

564564
def test_datetime_six_col(self):
@@ -585,7 +585,7 @@ def test_datetime_six_col(self):
585585
df = self.read_csv(StringIO(data), sep=',', header=0,
586586
parse_dates=datecols,
587587
date_parser=conv.parse_all_fields)
588-
self.assertIn('ymdHMS', df)
588+
assert 'ymdHMS' in df
589589
self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0))
590590

591591
def test_datetime_fractional_seconds(self):
@@ -598,7 +598,7 @@ def test_datetime_fractional_seconds(self):
598598
df = self.read_csv(StringIO(data), sep=',', header=0,
599599
parse_dates=datecols,
600600
date_parser=conv.parse_all_fields)
601-
self.assertIn('ymdHMS', df)
601+
assert 'ymdHMS' in df
602602
self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0,
603603
microsecond=123456))
604604
self.assertEqual(df.ymdHMS.loc[1], datetime(2001, 1, 5, 10, 0, 0,
@@ -611,7 +611,7 @@ def test_generic(self):
611611
df = self.read_csv(StringIO(data), sep=',', header=0,
612612
parse_dates=datecols,
613613
date_parser=dateconverter)
614-
self.assertIn('ym', df)
614+
assert 'ym' in df
615615
self.assertEqual(df.ym.loc[0], date(2001, 1, 1))
616616

617617
def test_dateparser_resolution_if_not_ns(self):

pandas/tests/io/test_html.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -566,10 +566,10 @@ def test_gold_canyon(self):
566566
with open(self.banklist_data, 'r') as f:
567567
raw_text = f.read()
568568

569-
self.assertIn(gc, raw_text)
569+
assert gc in raw_text
570570
df = self.read_html(self.banklist_data, 'Gold Canyon',
571571
attrs={'id': 'table'})[0]
572-
self.assertIn(gc, df.to_string())
572+
assert gc in df.to_string()
573573

574574
def test_different_number_of_rows(self):
575575
expected = """<table border="1" class="dataframe">

pandas/tests/reshape/test_join.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -153,15 +153,15 @@ def test_handle_overlap(self):
153153
joined = merge(self.df, self.df2, on='key2',
154154
suffixes=['.foo', '.bar'])
155155

156-
self.assertIn('key1.foo', joined)
157-
self.assertIn('key1.bar', joined)
156+
assert 'key1.foo' in joined
157+
assert 'key1.bar' in joined
158158

159159
def test_handle_overlap_arbitrary_key(self):
160160
joined = merge(self.df, self.df2,
161161
left_on='key2', right_on='key1',
162162
suffixes=['.foo', '.bar'])
163-
self.assertIn('key1.foo', joined)
164-
self.assertIn('key2.bar', joined)
163+
assert 'key1.foo' in joined
164+
assert 'key2.bar' in joined
165165

166166
def test_join_on(self):
167167
target = self.target
@@ -251,7 +251,7 @@ def test_join_with_len0(self):
251251
# nothing to merge
252252
merged = self.target.join(self.source.reindex([]), on='C')
253253
for col in self.source:
254-
self.assertIn(col, merged)
254+
assert col in merged
255255
self.assertTrue(merged[col].isnull().all())
256256

257257
merged2 = self.target.join(self.source.reindex([]), on='C',

pandas/tests/reshape/test_merge.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,8 +128,8 @@ def test_merge_overlap(self):
128128
merged = merge(self.left, self.left, on='key')
129129
exp_len = (self.left['key'].value_counts() ** 2).sum()
130130
self.assertEqual(len(merged), exp_len)
131-
self.assertIn('v1_x', merged)
132-
self.assertIn('v1_y', merged)
131+
assert 'v1_x' in merged
132+
assert 'v1_y' in merged
133133

134134
def test_merge_different_column_key_names(self):
135135
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],

0 commit comments

Comments
 (0)