Skip to content

Commit 2ffe23d

Browse files
remove unnecessary dependencies
1 parent 1db03fd commit 2ffe23d

File tree

6 files changed

+7
-111
lines changed

6 files changed

+7
-111
lines changed

competitions/kaggle/Cryo-ET/1st_place_solution/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ We provide three different configurations which differ only in the used backbone
6666

6767
```python train.py -C cfg_resnet34 --output_dir WHATEVERISYOUROUTPUTDIR```
6868

69-
This will save checkpoints under the specified WHATEVERISYOUROUTPUTDIR.
69+
This will save checkpoints under the specified WHATEVERISYOUROUTPUTDIR when training is finished.
7070
By default models are trained using bfloat16 which requires a GPU capable of that. Alternatively you can set ```cfg.bf16=False``` or overwrite as flag ```--bf16 False``` when running ```train.py ```.
7171

7272
### Replicating 1st place solution (segmentation part)

competitions/kaggle/Cryo-ET/1st_place_solution/metrics/metric_1.py

Lines changed: 1 addition & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,7 @@
11
import numpy as np
22
import torch
33
from sklearn.metrics import roc_auc_score
4-
5-
"""
6-
Derived from:
7-
https://github.com/cellcanvas/album-catalog/blob/main/solutions/copick/compare-picks/solution.py
8-
"""
9-
10-
import numpy as np
114
import pandas as pd
12-
135
from scipy.spatial import KDTree
146

157

@@ -156,13 +148,6 @@ def calc_metric(cfg, pp_out, val_df, pre="val"):
156148
submission['experiment'] = solution['experiment'].unique()[0]
157149
submission['id'] = range(len(submission))
158150

159-
# score003 = score(
160-
# solution.copy(),
161-
# submission[submission['conf']>0.03].copy(),
162-
# row_id_column_name = 'id',
163-
# distance_multiplier=0.5,
164-
# beta=4)[0]
165-
# print('score003',score003)
166151

167152
best_ths = []
168153
for p in particles:
@@ -196,42 +181,5 @@ def calc_metric(cfg, pp_out, val_df, pre="val"):
196181
result['score'] = score_pp
197182
# print(result)
198183
return result
199-
# # if isinstance(pred_df,list):
200-
# # pred_df,gt_df = pred_df
201-
# # else:
202-
# # gt_df = None
203-
204-
# y_true = val_df['score'].values
205-
# y_pred = val_data['preds'].cpu().numpy()
206-
# score = get_score(y_true.flatten(), y_pred.flatten())
207-
# # print(score)
208-
209-
# # df['score'] = df['location'].apply(ast.literal_eval)
210-
# # df['span'] = df['location'].apply(location_to_span)
211-
# # spans_true = df['span'].values
212-
213-
# # df_pred = pred_df.copy()
214-
# # # df_pred['location'] = df_pred['location'].apply(ast.literal_eval)
215-
# # df_pred['span'] = df_pred['pred_location'].apply(pred_location_to_span)
216-
# # spans_pred = df_pred['span'].values
217-
218-
# # score = span_micro_f1(spans_pred, spans_true)
219-
220-
# if hasattr(cfg, "neptune_run"):
221-
# cfg.neptune_run[f"{pre}/score/"].log(score, step=cfg.curr_step)
222-
# print(f"{pre} score: {score:.6}")
223-
# # else:
224-
# # return score
225-
226-
# # if gt_df is not None:
227-
# # df_pred = gt_df.copy()
228-
# # df_pred['span'] = df_pred['pred_location'].apply(pred_location_to_span)
229-
# # spans_pred = df_pred['span'].values
230-
231-
# # score = span_micro_f1(spans_pred, spans_true)
232-
233-
# # if hasattr(cfg, "neptune_run"):
234-
# # cfg.neptune_run[f"{pre}/score_debug/"].log(score, step=cfg.curr_step)
235-
# # # print(f"{pre} score_debug: {score:.6}")
236-
# return score
184+
237185

competitions/kaggle/Cryo-ET/1st_place_solution/postprocess/pp_1.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
2-
31
import pandas as pd
42
import torch
53
from torch.nn import functional as F
@@ -8,8 +6,6 @@
86
import torch
97
from torch import nn
108

11-
12-
139
def simple_nms(scores, nms_radius: int):
1410
""" Fast Non-maximum suppression to remove nearby points """
1511
assert(nms_radius >= 0)
@@ -54,7 +50,6 @@ def post_process_pipeline(cfg, val_data, val_df):
5450
pred_df_ = pd.DataFrame(xyz.cpu().numpy(),columns=['x','y','z'])
5551
pred_df_['particle_type'] = p
5652
pred_df_['conf'] = conf.cpu().numpy()
57-
# pred_df_['experiment'] = experiments[fold]
5853
pred_df += [pred_df_]
5954
pred_df = pd.concat(pred_df)
6055
pred_df = pred_df[(pred_df['x']<6300) & (pred_df['y']<6300)& (pred_df['z']<1840) & (pred_df['conf']>0.01)].copy()
Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,2 @@
1-
kaggle
2-
optuna
3-
boto3
4-
neptune
51
zarr
6-
albumentations==1.4.21
7-
opencv-python==4.5.5.64
8-
timm==1.0.11
92
monai==1.4.0
10-
mrcfile

competitions/kaggle/Cryo-ET/1st_place_solution/train.py

Lines changed: 5 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,6 @@
2929
from utils import (
3030
get_optimizer,
3131
get_scheduler,
32-
setup_neptune,
33-
upload_s3,
3432
)
3533

3634

@@ -120,9 +118,7 @@ def run_eval(model, val_dataloader, cfg, pre="val", curr_epoch=0):
120118
loss = np.mean(losses)
121119

122120
print(f"Mean {pre}_{k}", loss)
123-
if cfg.neptune_run:
124-
if not math.isinf(loss) and not math.isnan(loss):
125-
cfg.neptune_run[f"{pre}/{k}"].log(loss, step=cfg.curr_step)
121+
126122

127123

128124
if (cfg.local_rank == 0) and (cfg.calc_metric) and (((curr_epoch + 1) % cfg.calc_metric_epochs) == 0):
@@ -135,9 +131,7 @@ def run_eval(model, val_dataloader, cfg, pre="val", curr_epoch=0):
135131

136132
for k, v in val_score.items():
137133
print(f"{pre}_{k}: {v:.3f}")
138-
if cfg.neptune_run:
139-
if not math.isinf(v) and not math.isnan(v):
140-
cfg.neptune_run[f"{pre}/{k}"].log(v, step=cfg.curr_step)
134+
141135

142136
if cfg.distributed:
143137
torch.distributed.barrier()
@@ -191,8 +185,7 @@ def train(cfg):
191185

192186
set_seed(cfg.seed)
193187

194-
if cfg.local_rank == 0:
195-
cfg.neptune_run = setup_neptune(cfg)
188+
196189

197190
train_df, val_df, test_df = get_data(cfg)
198191

@@ -347,17 +340,7 @@ def train(cfg):
347340
if cfg.local_rank == 0 and cfg.curr_step % cfg.batch_size == 0:
348341

349342
loss_names = [key for key in output_dict if 'loss' in key]
350-
if cfg.neptune_run:
351-
for l in loss_names:
352-
v = output_dict[l].item()
353-
if not math.isinf(v) and not math.isnan(v):
354-
cfg.neptune_run[f"train/{l}"].log(value=v, step=cfg.curr_step)
355-
cfg.neptune_run["lr"].log(value=optimizer.param_groups[0]["lr"], step=cfg.curr_step)
356-
if total_grad_norm is not None:
357-
cfg.neptune_run["total_grad_norm"].log(value=total_grad_norm.item(), step=cfg.curr_step)
358-
cfg.neptune_run["total_grad_norm_after_clip"].log(value=total_grad_norm_after_clip.item(), step=cfg.curr_step)
359-
if total_weight_norm is not None:
360-
cfg.neptune_run["total_weight_norm"].log(value=total_weight_norm.item(), step=cfg.curr_step)
343+
361344
progress_bar.set_description(f"loss: {np.mean(losses[-10:]):.4f}")
362345

363346
if cfg.eval_steps != 0:
@@ -426,14 +409,11 @@ def train(cfg):
426409
parser = argparse.ArgumentParser(description="")
427410

428411
parser.add_argument("-C", "--config", help="config filename")
429-
parser.add_argument("-D", "--debug", action='store_true', help="debugging True/ False")
430412
parser_args, other_args = parser.parse_known_args(sys.argv)
431413

432414
cfg = copy(importlib.import_module(parser_args.config).cfg)
433415

434-
if parser_args.debug:
435-
print('debug mode')
436-
cfg.neptune_connection_mode = 'debug'
416+
437417

438418

439419
# overwrite params in config with additional args

competitions/kaggle/Cryo-ET/1st_place_solution/utils.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,14 @@
1-
21
import random
32
import os
43
import numpy as np
54
import pandas as pd
65
import torch
76
from torch.utils.data import Sampler, RandomSampler, SequentialSampler, DataLoader, WeightedRandomSampler
87
from torch import nn, optim
9-
# from torch.optim import AdamW
108
from torch.optim.lr_scheduler import LambdaLR
119
from torch.optim import lr_scheduler
1210
import importlib
1311
import math
14-
import neptune
15-
from neptune.utils import stringify_unsupported
1612

1713
import logging
1814
import pickle
@@ -469,22 +465,7 @@ def get_scheduler(cfg, optimizer, total_steps):
469465
return scheduler
470466

471467

472-
def setup_neptune(cfg):
473-
neptune_run = None
474-
if cfg.neptune_project:
475-
neptune_run = neptune.init_run(
476-
project=cfg.neptune_project,
477-
# tags=cfg.tags,
478-
mode=cfg.neptune_connection_mode,
479-
capture_stdout=False,
480-
capture_stderr=False,
481-
source_files=[f'models/{cfg.model}.py',f'data/{cfg.dataset}.py',f'configs/{cfg.name}.py']
482-
)
483-
484-
485-
neptune_run["cfg"] = stringify_unsupported(cfg.__dict__)
486468

487-
return neptune_run
488469

489470

490471
def read_df(fn):

0 commit comments

Comments
 (0)