Skip to content
Open

Frida #569

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions configs/im_osnet_x1_0_softmax_256x128_amsgrad_cosine.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ loss:
train:
optim: 'amsgrad'
lr: 0.0015
max_epoch: 250
batch_size: 64
max_epoch: 50 # 250
batch_size: 32 # 64
fixbase_epoch: 10
open_layers: ['classifier']
lr_scheduler: 'cosine'

test:
batch_size: 300
batch_size: 100 #300
dist_metric: 'euclidean'
normalize_feature: False
evaluate: False
eval_freq: -1
rerank: False
eval_freq: 5 # -1
rerank: False
10 changes: 5 additions & 5 deletions scripts/default_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def get_default_config():
cfg.model = CN()
cfg.model.name = 'resnet50'
cfg.model.pretrained = True # automatically load pretrained model weights if available
cfg.model.load_weights = '' # path to model weights
cfg.model.load_weights = 'log/osnet_x1_0_market1501_softmax_cosinelr/model.pth.tar-250' # path to model weights
cfg.model.resume = '' # path to checkpoint for resume training

# data
Expand Down Expand Up @@ -47,7 +47,7 @@ def get_default_config():

# video reid setting
cfg.video = CN()
cfg.video.seq_len = 15 # number of images to sample in a tracklet
cfg.video.seq_len = 15 # 15 # number of images to sample in a tracklet
cfg.video.sample_method = 'evenly' # how to sample images from a tracklet
cfg.video.pooling_method = 'avg' # how to pool features over a tracklet

Expand All @@ -56,7 +56,7 @@ def get_default_config():
cfg.train.optim = 'adam'
cfg.train.lr = 0.0003
cfg.train.weight_decay = 5e-4
cfg.train.max_epoch = 60
cfg.train.max_epoch = 60
cfg.train.start_epoch = 0
cfg.train.batch_size = 32
cfg.train.fixbase_epoch = 0 # number of epochs to fix base layers
Expand Down Expand Up @@ -99,8 +99,8 @@ def get_default_config():
cfg.test.dist_metric = 'euclidean' # distance metric, ['euclidean', 'cosine']
cfg.test.normalize_feature = False # normalize feature vectors before computing distance
cfg.test.ranks = [1, 5, 10, 20] # cmc ranks
cfg.test.evaluate = False # test only
cfg.test.eval_freq = -1 # evaluation frequency (-1 means to only test after training)
cfg.test.evaluate = False # True: test only -- False: train and test
cfg.test.eval_freq = 1 # -1 # evaluation frequency (-1 means to only test after training)
cfg.test.start_eval = 0 # start to evaluate after a specific epoch
cfg.test.rerank = False # use person re-ranking
cfg.test.visrank = False # visualize ranked results (only available when cfg.test.evaluate=True)
Expand Down
12 changes: 11 additions & 1 deletion scripts/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
get_default_config, lr_scheduler_kwargs
)


def build_datamanager(cfg):
print(f"HIIIIIIIIIIIIIII cfg.data.type {cfg.data.type} BYEEEEEEEE")
if cfg.data.type == 'image':
return torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))
else:
Expand Down Expand Up @@ -121,6 +121,11 @@ def main():
parser.add_argument(
'--root', type=str, default='', help='path to data root'
)

parser.add_argument(
'--data-type', type=str, help='data type'
)

parser.add_argument(
'opts',
default=None,
Expand All @@ -131,9 +136,14 @@ def main():

cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()

if args.config_file:
cfg.merge_from_file(args.config_file)
reset_config(cfg, args)

if args.data_type:
cfg.data.type = args.data_type

cfg.merge_from_list(args.opts)
set_random_seed(cfg.train.seed)
check_cfg(cfg)
Expand Down
22 changes: 22 additions & 0 deletions scripts/register_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@


from __future__ import absolute_import
from __future__ import print_function
from __future__ import division

import sys
import os
import os.path as osp
import torchreid
from torchreid.data.datasets.video.FRIDA import FRIDA
from torchreid.data.datasets.image.FRIDAimg import FRIDAimg

try:
torchreid.data.register_video_dataset('FRIDA', FRIDA)
except Exception as e:
print(e)

# try:
# torchreid.data.register_image_dataset('FRIDAimg', FRIDAimg)
# except Exception as e:
# print(e)
29 changes: 29 additions & 0 deletions testamany.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
from torchreid.utils import FeatureExtractor

market1501_model = 'log/osnet_x1_0_market1501_softmax_cosinelr/model/model.pth.tar-250'
frida_model = 'log/osnet_x1_0_market1501_softmax_cosinelr/model/frida_model.pth.tar-3'

extractor_1 = FeatureExtractor(
model_name='osnet_x1_0',
model_path=market1501_model,
device='cuda'
)

extractor_2 = FeatureExtractor(
model_name='osnet_x1_0',
model_path=frida_model,
device='cuda'
)

image_list = [
'../FRIDA/BBs/Segment_1/0001155/Camera_1/person_01.jpg',
'../FRIDA/BBs/Segment_1/0001155/Camera_1/person_02.jpg',
'../FRIDA/BBs/Segment_1/0001155/Camera_1/person_03.jpg'
]

features_market = extractor_1(image_list)
features_frida= extractor_2(image_list)
print("Features MARKET1501")
print(features_market)
print("Features FRIDA")
print(features_frida)
14 changes: 8 additions & 6 deletions torchreid/data/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

from .image import (
GRID, PRID, CUHK01, CUHK02, CUHK03, MSMT17, CUHKSYSU, VIPeR, SenseReID,
Market1501, DukeMTMCreID, University1652, iLIDS
Market1501, DukeMTMCreID, University1652, iLIDS, FRIDAimg
)
from .video import PRID2011, Mars, DukeMTMCVidReID, iLIDSVID
from .video import PRID2011, Mars, DukeMTMCVidReID, iLIDSVID, FRIDA
from .dataset import Dataset, ImageDataset, VideoDataset

__image_datasets = {
Expand All @@ -20,14 +20,16 @@
'prid': PRID,
'cuhk02': CUHK02,
'university1652': University1652,
'cuhksysu': CUHKSYSU
'cuhksysu': CUHKSYSU,
'FRIDAimg':FRIDAimg
}

__video_datasets = {
'mars': Mars,
'ilidsvid': iLIDSVID,
'prid2011': PRID2011,
'dukemtmcvidreid': DukeMTMCVidReID
'dukemtmcvidreid': DukeMTMCVidReID,
'FRIDA':FRIDA
}


Expand All @@ -37,7 +39,7 @@ def init_image_dataset(name, **kwargs):
if name not in avai_datasets:
raise ValueError(
'Invalid dataset name. Received "{}", '
'but expected to be one of {}'.format(name, avai_datasets)
'but expected to be one of image datasets: {}'.format(name, avai_datasets)
)
return __image_datasets[name](**kwargs)

Expand All @@ -48,7 +50,7 @@ def init_video_dataset(name, **kwargs):
if name not in avai_datasets:
raise ValueError(
'Invalid dataset name. Received "{}", '
'but expected to be one of {}'.format(name, avai_datasets)
'but expected to be one of video datasets: {}'.format(name, avai_datasets)
)
return __video_datasets[name](**kwargs)

Expand Down
4 changes: 3 additions & 1 deletion torchreid/data/datasets/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,8 +324,10 @@ def __init__(self, train, query, gallery, **kwargs):
def __getitem__(self, index):
img_path, pid, camid, dsetid = self.data[index]
img = read_image(img_path)

if self.transform is not None:
img = self._transform_image(self.transform, self.k_tfm, img)
if img is not None:
img = self._transform_image(self.transform, self.k_tfm, img)
item = {
'img': img,
'pid': pid,
Expand Down
175 changes: 175 additions & 0 deletions torchreid/data/datasets/image/FRIDAimg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@

from __future__ import print_function, absolute_import
from __future__ import division, print_function, absolute_import
import os
import json
from collections import defaultdict
import random
from ..dataset import ImageDataset
import os.path as osp
import warnings

import shutil


class FRIDAimg(ImageDataset):
"""
FRIDA Dataset
Args:
data_dir (str): Path to the root directory of FRIDA dataset.
min_seq_len (int): Tracklet with length shorter than this value will be discarded (default: 0).
"""

data_dir = ''
data_url = None

def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.data_dir = osp.join(self.root, self.data_dir)
self.train_dirs = [f"Segment_{i + 1}" for i in range(4)] # FRIDA has 4 segments
self.cameras = ['Camera_1', 'Camera_2', 'Camera_3'] # FRIDA has 3 cameras
self._check_before_run()

train, test, num_train_tracklets, num_test_tracklets, num_train_pids, num_test_pids, selected_persons_test = \
self._process_data(self.train_dirs, min_seq_len=0, num_train_ids=10)

self.train = train
self.test = test
self.num_train_pids = num_train_pids
self.num_test_pids = num_test_pids
self.num_train_cams = len(self.cameras)
self.num_test_cams = len(self.cameras)
self.num_train_vids = num_train_tracklets
self.num_test_vids = num_test_tracklets

print(f"First 3 tracklets in train IMAGES: {self.train[:3]}")

query, gallery, tracklet_query, tracklet_gallery, num_query_pids, num_gallery_pids = \
self._create_query_gallery(self.test, selected_persons_test)
self.query = query
self.gallery = gallery
print(f"First 3 tracklets in query IMAGES: {self.query[:3]}")
print(f"First 3 tracklets in gallery IMAGES: {self.gallery[:3]}")


num_query_tracklets = tracklet_query
num_gallery_tracklets = tracklet_gallery

super(FRIDAimg, self).__init__(train, query, gallery, **kwargs)

def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not os.path.exists(self.data_dir):
raise RuntimeError("'{}' is not available".format(self.data_dir))


def _process_data(self, dirnames, min_seq_len=0, num_train_ids=10):
tracklets_train = []
tracklets_test = []

pid_container = list(range(1, 21))

# Randomly shuffle the list of person IDs
# random.shuffle(pid_container)

random.seed(15)

# Select the first num_train_ids for training, and the rest for testing
selected_persons_train = random.sample(pid_container, num_train_ids) # pid_container[:num_train_ids]
selected_persons_test = [pid for pid in pid_container if pid not in selected_persons_train] # pid_container[num_train_ids:]

print("selected_persons_train: ", selected_persons_train)
print("selected_persons_test: ", selected_persons_test)

# Define label_dict for training set
labelset = selected_persons_train
label_dict = {label: index for index, label in enumerate(labelset)}

# Keep track of person IDs already added to the training and testing sets
added_persons_train = set()
added_persons_test = set()

for segment in dirnames:
for camera in self.cameras:
json_file = os.path.join(self.data_dir, 'Annotations', segment, camera, 'data2.json')
with open(json_file, 'r') as f:
data = json.load(f)

for person_info in data:
img_id = person_info['image_id']
pid = person_info['person_id']
person_id = f'person_{str(pid).zfill(2)}' # Convert integer ID to zero-padded string

for camera_idx in range(len(self.cameras)):
camera_name = f'Camera_{camera_idx + 1}'
img_path = os.path.join(self.data_dir, 'BBs', segment, img_id, camera_name, f'{person_id}.jpg')

if os.path.exists(img_path):
if pid in selected_persons_train:
if len(tracklets_train) < 100000:
if pid not in added_persons_train:
tracklet = (img_path, label_dict[pid], camera_idx)
tracklets_train.append(tracklet)
added_persons_train.add(pid)
elif pid in added_persons_train and len(added_persons_train) == len(selected_persons_train):
tracklet = (img_path, label_dict[pid], camera_idx)
tracklets_train.append(tracklet)
elif pid in selected_persons_test:
#if len(tracklets_test) < 80000:
if pid not in added_persons_test:
tracklet = (img_path, pid, camera_idx)
tracklets_test.append(tracklet)
added_persons_test.add(pid)
elif pid in added_persons_test and len(added_persons_test) == len(selected_persons_test):
tracklet = (img_path, pid, camera_idx)
tracklets_test.append(tracklet)



num_train_tracklets = len(tracklets_train)
num_test_tracklets = len(tracklets_test)
num_train_pids = len(added_persons_train)
num_test_pids = len(added_persons_test)


return tracklets_train, tracklets_test, num_train_tracklets, num_test_tracklets, \
num_train_pids, num_test_pids, selected_persons_test




def _create_query_gallery(self, tracklets, selected_persons):
query = []
gallery = []
tracklet_query, tracklet_gallery = 0, 0
num_query_pids, num_gallery_pids = set(), set()

for tracklet in tracklets:
img_path, person_id, camera_idx = tracklet
if person_id == 5 and camera_idx != 0:
print(" REQUIRED TRACKLET FOUND!")

if camera_idx == 0: # Camera A (query)
if tracklet_query < 15000:
query.append(tracklet)
tracklet_query += 1
num_query_pids.add(person_id)
else: # Other cameras (gallery)
if tracklet_gallery < 30000:
gallery.append(tracklet)
tracklet_gallery += 1
num_gallery_pids.add(person_id)

print("num_query_pids: ", list(num_query_pids))
print("num_gallery_pids: ", list(num_gallery_pids))
print("selected persons: ", selected_persons)

num_query_pids = len(list(num_query_pids))
num_gallery_pids = len(list(num_gallery_pids))

return query, gallery, tracklet_query, tracklet_gallery, num_query_pids, num_gallery_pids





1 change: 1 addition & 0 deletions torchreid/data/datasets/image/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@
from .market1501 import Market1501
from .dukemtmcreid import DukeMTMCreID
from .university1652 import University1652
from .FRIDAimg import FRIDAimg
Loading