Softmax Loss

Aug. 9, 2019, 10:27 p.m.

read: 54

0x00 交叉熵Loss(Softmax Loss)

import torch
import torch.nn as nn
import torch.nn.functional as F

# 自己实现一个LossSoftmaxLoss
#可以看到
class LogSoftmaxLoss:
    def _onehot_encoding_(self, label, num_classes):
        shape_0 = label.shape[0]
        label = label.reshape(-1, 1)
        result = torch.zeros(shape_0, num_classes)
        result = result.scatter_(1, label, 1)
        result = result.reshape(shape_0, -1)
        return result

    def __call__(self, input, label):
        num_classes = input.size(-1)
        label_one_hot = self._onehot_encoding_(label, num_classes)
        input_exp = torch.exp(input)
        input_exp_sum = input_exp.sum(-1)
        input_mask = input * label_one_hot
        input_mask_sum = input_mask.sum(-1)
        input_mask_sum_exp = torch.exp(input_mask_sum)
        div_log = torch.log(input_mask_sum_exp / input_exp_sum)
        return div_log.mean() * -1

batch_size = 10
output_size = 64
classes_size = 128

net_out = torch.rand(batch_size, output_size)
weight = torch.rand(output_size, classes_size)
input = torch.mm(net_out, weight)


label = target = torch.empty(batch_size, dtype=torch.long).random_(classes_size)

ce_loss = nn.CrossEntropyLoss()
output_1 = ce_loss(input, label)

ls_loss = LogSoftmaxLoss()

output_2 = ls_loss(input, label)

print(output_1)
print(output_2)

# 修改之后的代码
class LogSoftmaxLoss:
    def __init__(self, input_size, num_classes):
        self.fc = nn.Linear(input_size, num_classes)
        self.num_classes = num_classes

    def _onehot_encoding_(self, label, num_classes):
        shape_0 = label.shape[0]
        label = label.reshape(-1, 1)
        result = torch.zeros(shape_0, num_classes)
        result = result.scatter_(1, label, 1)
        result = result.reshape(shape_0, -1)
        return result

    def __call__(self, input, label):
        fc_out = self.fc(input)
        num_classes = self.num_classes
        label_one_hot = self._onehot_encoding_(label, num_classes)
        input_exp = torch.exp(fc_out)
        input_exp_sum = input_exp.sum(-1)
        input_mask = fc_out * label_one_hot
        input_mask_sum = input_mask.sum(-1)
        input_mask_sum_exp = torch.exp(input_mask_sum)
        div_log = torch.log(input_mask_sum_exp / input_exp_sum)
        return div_log.mean() * -1

0x01 A-Softmax Loss

SSH 蜜罐数据收集计划

数据来自ssh蜜罐收集:共23639条,下面是部分数据 root/qyeee1234 root/cnnic root/utterly root/broke root/963256478...

浏览器UA标识收集计划

文章标题:浏览器UA标识收集计划文章内容:浏览器UA标识收集计划 以下UA从某网站的真实访问数据筛选去重而来包含操作系统信息的UA(1163条) Mozilla/5.0 (iPhone; CPU...

推荐使用 Firefox 访问此站点 | 友情链接: 张鹏的博客  Web布拉格  杨洋的博客   李号的博客   魏文成博客 | Developed by zhangpeng | Copyright © 2018-2019 hupeng.me