Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions chapter_computer-vision/semantic-segmentation-and-dataset.md
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ voc_test = VOCSegDataset(False, crop_size, voc_dir)
batch_size = 64
train_iter = gluon.data.DataLoader(voc_train, batch_size, shuffle=True,
last_batch='discard',
num_workers=d2l.get_dataloader_workers())
num_workers=0)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
Expand All @@ -459,7 +459,7 @@ for X, Y in train_iter:
batch_size = 64
train_iter = torch.utils.data.DataLoader(voc_train, batch_size, shuffle=True,
drop_last=True,
num_workers=d2l.get_dataloader_workers())
num_workers=0)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
Expand All @@ -472,7 +472,7 @@ batch_size = 64
train_iter = paddle.io.DataLoader(voc_train, batch_size=batch_size, shuffle=True,
drop_last=True,
return_list=True,
num_workers=d2l.get_dataloader_workers())
num_workers=0)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
Expand All @@ -490,7 +490,7 @@ def load_data_voc(batch_size, crop_size):
"""加载VOC语义分割数据集"""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
num_workers = 0
train_iter = gluon.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size,
shuffle=True, last_batch='discard', num_workers=num_workers)
Expand All @@ -507,7 +507,7 @@ def load_data_voc(batch_size, crop_size):
"""加载VOC语义分割数据集"""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
num_workers = 0
train_iter = torch.utils.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size,
shuffle=True, drop_last=True, num_workers=num_workers)
Expand All @@ -524,7 +524,7 @@ def load_data_voc(batch_size, crop_size):
"""加载VOC语义分割数据集"""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
num_workers = 0
train_iter = paddle.io.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size=batch_size,
shuffle=True, return_list=True, drop_last=True, num_workers=num_workers)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ Different from in image classification or object detection, labels here are thre
batch_size = 64
train_iter = gluon.data.DataLoader(voc_train, batch_size, shuffle=True,
last_batch='discard',
num_workers=d2l.get_dataloader_workers())
num_workers=0)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
Expand All @@ -395,7 +395,7 @@ for X, Y in train_iter:
batch_size = 64
train_iter = torch.utils.data.DataLoader(voc_train, batch_size, shuffle=True,
drop_last=True,
num_workers=d2l.get_dataloader_workers())
num_workers=0)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
Expand All @@ -414,7 +414,7 @@ def load_data_voc(batch_size, crop_size):
"""Load the VOC semantic segmentation dataset."""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
num_workers = 0
train_iter = gluon.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size,
shuffle=True, last_batch='discard', num_workers=num_workers)
Expand All @@ -431,7 +431,7 @@ def load_data_voc(batch_size, crop_size):
"""Load the VOC semantic segmentation dataset."""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
num_workers = 0
train_iter = torch.utils.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size,
shuffle=True, drop_last=True, num_workers=num_workers)
Expand Down
13 changes: 8 additions & 5 deletions chapter_recurrent-modern/seq2seq.md
Original file line number Diff line number Diff line change
Expand Up @@ -609,17 +609,20 @@ class MaskedSoftmaxCELoss(gluon.loss.SoftmaxCELoss):
```{.python .input}
#@tab pytorch
#@save
class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):
class MaskedSoftmaxCELoss(nn.Module):
"""带遮蔽的softmax交叉熵损失函数"""
# pred的形状:(batch_size,num_steps,vocab_size)
# label的形状:(batch_size,num_steps)
# valid_len的形状:(batch_size,)
def __init__(self, **kwargs):
super(MaskedSoftmaxCELoss, self).__init__(**kwargs)
# 初始化 nn.CrossEntropyLoss 作为类的成员
# reduction='none' 确保返回的是未聚合的逐元素损失,方便遮蔽
self.cross_entropy = nn.CrossEntropyLoss(reduction='none')
def forward(self, pred, label, valid_len):
weights = torch.ones_like(label)
weights = sequence_mask(weights, valid_len)
self.reduction='none'
unweighted_loss = super(MaskedSoftmaxCELoss, self).forward(
pred.permute(0, 2, 1), label)
unweighted_loss = self.cross_entropy(pred.permute(0, 2, 1), label)
weighted_loss = (unweighted_loss * weights).mean(dim=1)
return weighted_loss
```
Expand Down Expand Up @@ -1120,4 +1123,4 @@ for eng, fra in zip(engs, fras):

:begin_tab:`paddle`
[Discussions](https://discuss.d2l.ai/t/11838)
:end_tab:
:end_tab:
Loading