{"id":406,"date":"2022-08-14T15:20:16","date_gmt":"2022-08-14T07:20:16","guid":{"rendered":"http:\/\/www.gislxz.top\/?p=406"},"modified":"2022-08-14T16:15:07","modified_gmt":"2022-08-14T08:15:07","slug":"%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0%e7%ac%94%e8%ae%b0%ef%bc%8811%ef%bc%89","status":"publish","type":"post","link":"https:\/\/www.gislxz.com\/index.php\/2022\/08\/14\/%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0%e7%ac%94%e8%ae%b0%ef%bc%8811%ef%bc%89\/","title":{"rendered":"\u6df1\u5ea6\u5b66\u4e60\u7b14\u8bb0\uff0811\uff09"},"content":{"rendered":"\n<p><a href=\"https:\/\/aistudio.baidu.com\/aistudio\/projectdetail\/1613144\" target=\"_blank\"  rel=\"nofollow\" >\u5b98\u65b9\u6559\u7a0b\u56fe\u50cf\u5206\u7c7b<\/a>\u8fd9\u4e00\u7ae0\u5185\u5bb9\u5f88\u591a\uff0c\u6211\u4eec\u62c6\u6210\u4e09\u90e8\u5206\u6765\u770b<\/p>\n\n\n\n<p>\u8fd9\u4e00\u90e8\u5206\u5f15\u5165\u65b0\u7684\u6570\u636e\u96c6\u773c\u75be\u8bc6\u522b\u6570\u636e\u96c6iChallenge-PM\uff0c\u5e76\u590d\u73b0AlexNet\u6765\u8fdb\u884c\u8bc6\u522b\u3002\u540c\u6837\u4e5f\u662fpaddle\u548ctorch\u5bf9\u6bd4\u5b9e\u73b0\u3002<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">iChallenge-PM\u6570\u636e\u96c6<\/h2>\n\n\n\n<p>iChallenge-PM\u4e2d\u65e2\u6709\u75c5\u7406\u6027\u8fd1\u89c6\u60a3\u8005\u7684\u773c\u5e95\u56fe\u7247\uff0c\u4e5f\u6709\u975e\u75c5\u7406\u6027\u8fd1\u89c6\u60a3\u8005\u7684\u56fe\u7247\uff0c\u547d\u540d\u89c4\u5219\u5982\u4e0b\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\"><li>\u75c5\u7406\u6027\u8fd1\u89c6\uff08PM\uff09\uff1a\u6587\u4ef6\u540d\u4ee5P\u5f00\u5934<\/li><li>\u975e\u75c5\u7406\u6027\u8fd1\u89c6\uff08non-PM\uff09\uff1a<ul><li>\u9ad8\u5ea6\u8fd1\u89c6\uff08high myopia\uff09\uff1a\u6587\u4ef6\u540d\u4ee5H\u5f00\u5934<\/li><li>\u6b63\u5e38\u773c\u775b\uff08normal\uff09\uff1a\u6587\u4ef6\u540d\u4ee5N\u5f00\u5934<\/li><\/ul><\/li><\/ul>\n\n\n\n<p>\u540c\u6837\u6211\u4eec\u4e5f\u9700\u8981\u5199\u4e00\u4e2adata_loader\u3002\u8fd9\u5957\u6570\u636e\u4e0e\u4e4b\u524d\u7684mnist\uff08\u4ee5json\u683c\u5f0f\u5b58\u50a8\uff09\u4e0d\u4e00\u6837\uff0c\u662f\u4ee5\u56fe\u50cf\u5b58\u50a8\u7684\uff0c\u800clabel\u5728\u6587\u4ef6\u540d\u4e2d\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import cv2\nimport random\nimport numpy as np\nimport os\n\n# \u5bf9\u8bfb\u5165\u7684\u56fe\u50cf\u6570\u636e\u8fdb\u884c\u9884\u5904\u7406\ndef transform_img(img):\n    # \u5c06\u56fe\u7247\u5c3a\u5bf8\u7f29\u653e\u9053 224x224\n    img = cv2.resize(img, (224, 224))\n    # \u8bfb\u5165\u7684\u56fe\u50cf\u6570\u636e\u683c\u5f0f\u662f&#91;H, W, C]\n    # \u4f7f\u7528\u8f6c\u7f6e\u64cd\u4f5c\u5c06\u5176\u53d8\u6210&#91;C, H, W]\n    img = np.transpose(img, (2,0,1))\n    img = img.astype('float32')\n    # \u5c06\u6570\u636e\u8303\u56f4\u8c03\u6574\u5230&#91;-1.0, 1.0]\u4e4b\u95f4\n    img = img \/ 255.\n    img = img * 2.0 - 1.0\n    return img\n\n# \u5b9a\u4e49\u8bad\u7ec3\u96c6\u6570\u636e\u8bfb\u53d6\u5668\ndef data_loader(datadir, batch_size=10, mode = 'train'):\n    # \u5c06datadir\u76ee\u5f55\u4e0b\u7684\u6587\u4ef6\u5217\u51fa\u6765\uff0c\u6bcf\u6761\u6587\u4ef6\u90fd\u8981\u8bfb\u5165\n    filenames = os.listdir(datadir)\n    def reader():\n        if mode == 'train':\n            # \u8bad\u7ec3\u65f6\u968f\u673a\u6253\u4e71\u6570\u636e\u987a\u5e8f\n            random.shuffle(filenames)\n        batch_imgs = &#91;]\n        batch_labels = &#91;]\n        for name in filenames:\n            filepath = os.path.join(datadir, name)\n            img = cv2.imread(filepath)\n            img = transform_img(img)\n            if name&#91;0] == 'H' or name&#91;0] == 'N':\n                # H\u5f00\u5934\u7684\u6587\u4ef6\u540d\u8868\u793a\u9ad8\u5ea6\u8fd1\u4f3c\uff0cN\u5f00\u5934\u7684\u6587\u4ef6\u540d\u8868\u793a\u6b63\u5e38\u89c6\u529b\n                # \u9ad8\u5ea6\u8fd1\u89c6\u548c\u6b63\u5e38\u89c6\u529b\u7684\u6837\u672c\uff0c\u90fd\u4e0d\u662f\u75c5\u7406\u6027\u7684\uff0c\u5c5e\u4e8e\u8d1f\u6837\u672c\uff0c\u6807\u7b7e\u4e3a0\n                label = 0\n            elif name&#91;0] == 'P':\n                # P\u5f00\u5934\u7684\u662f\u75c5\u7406\u6027\u8fd1\u89c6\uff0c\u5c5e\u4e8e\u6b63\u6837\u672c\uff0c\u6807\u7b7e\u4e3a1\n                label = 1\n            else:\n                raise('Not excepted file name')\n            # \u6bcf\u8bfb\u53d6\u4e00\u4e2a\u6837\u672c\u7684\u6570\u636e\uff0c\u5c31\u5c06\u5176\u653e\u5165\u6570\u636e\u5217\u8868\u4e2d\n            batch_imgs.append(img)\n            batch_labels.append(label)\n            if len(batch_imgs) == batch_size:\n                # \u5f53\u6570\u636e\u5217\u8868\u7684\u957f\u5ea6\u7b49\u4e8ebatch_size\u7684\u65f6\u5019\uff0c\n                # \u628a\u8fd9\u4e9b\u6570\u636e\u5f53\u4f5c\u4e00\u4e2amini-batch\uff0c\u5e76\u4f5c\u4e3a\u6570\u636e\u751f\u6210\u5668\u7684\u4e00\u4e2a\u8f93\u51fa\n                imgs_array = np.array(batch_imgs).astype('float32')\n                labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)\n                yield imgs_array, labels_array\n                batch_imgs = &#91;]\n                batch_labels = &#91;]\n\n        if len(batch_imgs) &gt; 0:\n            # \u5269\u4f59\u6837\u672c\u6570\u76ee\u4e0d\u8db3\u4e00\u4e2abatch_size\u7684\u6570\u636e\uff0c\u4e00\u8d77\u6253\u5305\u6210\u4e00\u4e2amini-batch\n            imgs_array = np.array(batch_imgs).astype('float32')\n            labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)\n            yield imgs_array, labels_array\n\n    return reader\n\n# \u5b9a\u4e49\u9a8c\u8bc1\u96c6\u6570\u636e\u8bfb\u53d6\u5668\ndef valid_data_loader(datadir, csvfile, batch_size=10, mode='valid'):\n    # \u8bad\u7ec3\u96c6\u8bfb\u53d6\u65f6\u901a\u8fc7\u6587\u4ef6\u540d\u6765\u786e\u5b9a\u6837\u672c\u6807\u7b7e\uff0c\u9a8c\u8bc1\u96c6\u5219\u901a\u8fc7csvfile\u6765\u8bfb\u53d6\u6bcf\u4e2a\u56fe\u7247\u5bf9\u5e94\u7684\u6807\u7b7e\n    # \u8bf7\u67e5\u770b\u89e3\u538b\u540e\u7684\u9a8c\u8bc1\u96c6\u6807\u7b7e\u6570\u636e\uff0c\u89c2\u5bdfcsvfile\u6587\u4ef6\u91cc\u9762\u6240\u5305\u542b\u7684\u5185\u5bb9\n    # csvfile\u6587\u4ef6\u6240\u5305\u542b\u7684\u5185\u5bb9\u683c\u5f0f\u5982\u4e0b\uff0c\u6bcf\u4e00\u884c\u4ee3\u8868\u4e00\u4e2a\u6837\u672c\uff0c\n    # \u5176\u4e2d\u7b2c\u4e00\u5217\u662f\u56fe\u7247id\uff0c\u7b2c\u4e8c\u5217\u662f\u6587\u4ef6\u540d\uff0c\u7b2c\u4e09\u5217\u662f\u56fe\u7247\u6807\u7b7e\uff0c\n    # \u7b2c\u56db\u5217\u548c\u7b2c\u4e94\u5217\u662fFovea\u7684\u5750\u6807\uff0c\u4e0e\u5206\u7c7b\u4efb\u52a1\u65e0\u5173\n    # ID,imgName,Label,Fovea_X,Fovea_Y\n    # 1,V0001.jpg,0,1157.74,1019.87\n    # 2,V0002.jpg,1,1285.82,1080.47\n    # \u6253\u5f00\u5305\u542b\u9a8c\u8bc1\u96c6\u6807\u7b7e\u7684csvfile\uff0c\u5e76\u8bfb\u5165\u5176\u4e2d\u7684\u5185\u5bb9\n    filelists = open(csvfile).readlines()\n    def reader():\n        batch_imgs = &#91;]\n        batch_labels = &#91;]\n        for line in filelists&#91;1:]:\n            line = line.strip().split(',')\n            name = line&#91;1]\n            label = int(line&#91;2])\n            # \u6839\u636e\u56fe\u7247\u6587\u4ef6\u540d\u52a0\u8f7d\u56fe\u7247\uff0c\u5e76\u5bf9\u56fe\u50cf\u6570\u636e\u4f5c\u9884\u5904\u7406\n            filepath = os.path.join(datadir, name)\n            img = cv2.imread(filepath)\n            img = transform_img(img)\n            # \u6bcf\u8bfb\u53d6\u4e00\u4e2a\u6837\u672c\u7684\u6570\u636e\uff0c\u5c31\u5c06\u5176\u653e\u5165\u6570\u636e\u5217\u8868\u4e2d\n            batch_imgs.append(img)\n            batch_labels.append(label)\n            if len(batch_imgs) == batch_size:\n                # \u5f53\u6570\u636e\u5217\u8868\u7684\u957f\u5ea6\u7b49\u4e8ebatch_size\u7684\u65f6\u5019\uff0c\n                # \u628a\u8fd9\u4e9b\u6570\u636e\u5f53\u4f5c\u4e00\u4e2amini-batch\uff0c\u5e76\u4f5c\u4e3a\u6570\u636e\u751f\u6210\u5668\u7684\u4e00\u4e2a\u8f93\u51fa\n                imgs_array = np.array(batch_imgs).astype('float32')\n                labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)\n                yield imgs_array, labels_array\n                batch_imgs = &#91;]\n                batch_labels = &#91;]\n\n        if len(batch_imgs) &gt; 0:\n            # \u5269\u4f59\u6837\u672c\u6570\u76ee\u4e0d\u8db3\u4e00\u4e2abatch_size\u7684\u6570\u636e\uff0c\u4e00\u8d77\u6253\u5305\u6210\u4e00\u4e2amini-batch\n            imgs_array = np.array(batch_imgs).astype('float32')\n            labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)\n            yield imgs_array, labels_array\n\n    return reader\n\n# \u67e5\u770b\u6570\u636e\u5f62\u72b6\nDATADIR = '\/home\/aistudio\/work\/palm\/PALM-Training400\/PALM-Training400'\ntrain_loader = data_loader(DATADIR, \n                           batch_size=10, mode='train')\ndata_reader = train_loader()\ndata = next(data_reader)\ndata&#91;0].shape, data&#91;1].shape\n\neval_loader = data_loader(DATADIR, \n                           batch_size=10, mode='eval')\ndata_reader = eval_loader()\ndata = next(data_reader)\ndata&#91;0].shape, data&#91;1].shape<\/code><\/pre>\n\n\n\n<p>\u8fd9\u91cc\u5c45\u7136\u51fd\u6570\u5d4c\u5957\u5b9a\u4e49\uff0c\u6211\u4e0d\u662f\u592a\u7406\u89e3\uff0c\u53ef\u80fd\u8fd9\u5c31\u662f\u5927\u4f6c\u7684coding\u4e60\u60ef\u5427<\/p>\n\n\n\n<p>\u8bad\u7ec3\u8fc7\u7a0b<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># -*- coding: utf-8 -*-\n# LeNet \u8bc6\u522b\u773c\u75be\u56fe\u7247\nimport os\nimport random\nimport paddle\nimport numpy as np\n\nDATADIR = '\/home\/aistudio\/work\/palm\/PALM-Training400\/PALM-Training400'\nDATADIR2 = '\/home\/aistudio\/work\/palm\/PALM-Validation400'\nCSVFILE = '\/home\/aistudio\/labels.csv'\n\n# \u5b9a\u4e49\u8bad\u7ec3\u8fc7\u7a0b\ndef train_pm(model, optimizer):\n    # \u5f00\u542f0\u53f7GPU\u8bad\u7ec3\n    use_gpu = True\n    paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')\n\n    print('start training ... ')\n    model.train()\n    epoch_num = 5\n    # \u5b9a\u4e49\u6570\u636e\u8bfb\u53d6\u5668\uff0c\u8bad\u7ec3\u6570\u636e\u8bfb\u53d6\u5668\u548c\u9a8c\u8bc1\u6570\u636e\u8bfb\u53d6\u5668\n    train_loader = data_loader(DATADIR, batch_size=10, mode='train')\n    valid_loader = valid_data_loader(DATADIR2, CSVFILE)\n    for epoch in range(epoch_num):\n        for batch_id, data in enumerate(train_loader()):\n            x_data, y_data = data\n            img = paddle.to_tensor(x_data)\n            label = paddle.to_tensor(y_data)\n            # \u8fd0\u884c\u6a21\u578b\u524d\u5411\u8ba1\u7b97\uff0c\u5f97\u5230\u9884\u6d4b\u503c\n            logits = model(img)\n            loss = F.binary_cross_entropy_with_logits(logits, label)\n            avg_loss = paddle.mean(loss)\n\n            if batch_id % 10 == 0:\n                print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy()))\n            # \u53cd\u5411\u4f20\u64ad\uff0c\u66f4\u65b0\u6743\u91cd\uff0c\u6e05\u9664\u68af\u5ea6\n            avg_loss.backward()\n            optimizer.step()\n            optimizer.clear_grad()\n\n        model.eval()\n        accuracies = &#91;]\n        losses = &#91;]\n        for batch_id, data in enumerate(valid_loader()):\n            x_data, y_data = data\n            img = paddle.to_tensor(x_data)\n            label = paddle.to_tensor(y_data)\n            # \u8fd0\u884c\u6a21\u578b\u524d\u5411\u8ba1\u7b97\uff0c\u5f97\u5230\u9884\u6d4b\u503c\n            logits = model(img)\n            # \u4e8c\u5206\u7c7b\uff0csigmoid\u8ba1\u7b97\u540e\u7684\u7ed3\u679c\u4ee50.5\u4e3a\u9608\u503c\u5206\u4e24\u4e2a\u7c7b\u522b\n            # \u8ba1\u7b97sigmoid\u540e\u7684\u9884\u6d4b\u6982\u7387\uff0c\u8fdb\u884closs\u8ba1\u7b97\n            pred = F.sigmoid(logits)\n            loss = F.binary_cross_entropy_with_logits(logits, label)\n            # \u8ba1\u7b97\u9884\u6d4b\u6982\u7387\u5c0f\u4e8e0.5\u7684\u7c7b\u522b\n            pred2 = pred * (-1.0) + 1.0\n            # \u5f97\u5230\u4e24\u4e2a\u7c7b\u522b\u7684\u9884\u6d4b\u6982\u7387\uff0c\u5e76\u6cbf\u7b2c\u4e00\u4e2a\u7ef4\u5ea6\u7ea7\u8054\n            pred = paddle.concat(&#91;pred2, pred], axis=1)\n            acc = paddle.metric.accuracy(pred, paddle.cast(label, dtype='int64'))\n\n            accuracies.append(acc.numpy())\n            losses.append(loss.numpy())\n        print(\"&#91;validation] accuracy\/loss: {}\/{}\".format(np.mean(accuracies), np.mean(losses)))\n        model.train()\n\n        paddle.save(model.state_dict(), 'palm.pdparams')\n        paddle.save(optimizer.state_dict(), 'palm.pdopt')\n\n\n# \u5b9a\u4e49\u8bc4\u4f30\u8fc7\u7a0b\ndef evaluation(model, params_file_path):\n\n    # \u5f00\u542f0\u53f7GPU\u9884\u4f30\n    use_gpu = True\n    paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')\n\n    print('start evaluation .......')\n\n    #\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\n    model_state_dict = paddle.load(params_file_path)\n    model.load_dict(model_state_dict)\n\n    model.eval()\n    eval_loader = data_loader(DATADIR, \n                        batch_size=10, mode='eval')\n\n    acc_set = &#91;]\n    avg_loss_set = &#91;]\n    for batch_id, data in enumerate(eval_loader()):\n        x_data, y_data = data\n        img = paddle.to_tensor(x_data)\n        label = paddle.to_tensor(y_data)\n        y_data = y_data.astype(np.int64)\n        label_64 = paddle.to_tensor(y_data)\n        # \u8ba1\u7b97\u9884\u6d4b\u548c\u7cbe\u5ea6\n        prediction, acc = model(img, label_64)\n        # \u8ba1\u7b97\u635f\u5931\u51fd\u6570\u503c\n        loss = F.binary_cross_entropy_with_logits(prediction, label)\n        avg_loss = paddle.mean(loss)\n        acc_set.append(float(acc.numpy()))\n        avg_loss_set.append(float(avg_loss.numpy()))\n    # \u6c42\u5e73\u5747\u7cbe\u5ea6\n    acc_val_mean = np.array(acc_set).mean()\n    avg_loss_val_mean = np.array(avg_loss_set).mean()\n\n    print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))<\/code><\/pre>\n\n\n\n<p>\u5b9a\u4e49LeNet\uff0c\u7531\u4e8e\u8f93\u5165\u7684\u4e0d\u518d\u662f \u5355\u901a\u9053\u7070\u5ea6\u56fe\u7247\uff0c\u6240\u4ee5\u7b2c\u4e00\u5c42\u5377\u79ef\u7684in_channels\u6539\u62103\uff0c\u6700\u540e\u4e09\u5c42\u5377\u79ef\u540e\u4e5f\u4e0d\u662f\u5f97\u5230120\u00d71\u00d71\u7684\u7ed3\u679c\u800c\u662f120\u00d750\u00d750\uff0c\u6240\u4ee5\u7ebf\u6027\u5c42\u7684\u8f93\u5165\u53d8\u6210\u4e86300000\uff0c\u53ef\u60f3\u800c\u77e5\u9884\u6d4b\u7ed3\u679c\u4e0d\u4f1a\u7406\u60f3\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># -*- coding:utf-8 -*-\n\n# \u5bfc\u5165\u9700\u8981\u7684\u5305\nimport paddle\nimport numpy as np\nfrom paddle.nn import Conv2D, MaxPool2D, Linear, Dropout\nimport paddle.nn.functional as F\n\n# \u5b9a\u4e49 LeNet \u7f51\u7edc\u7ed3\u6784\nclass LeNet(paddle.nn.Layer):\n    def __init__(self, num_classes=1):\n        super(LeNet, self).__init__()\n\n        # \u521b\u5efa\u5377\u79ef\u548c\u6c60\u5316\u5c42\u5757\uff0c\u6bcf\u4e2a\u5377\u79ef\u5c42\u4f7f\u7528Sigmoid\u6fc0\u6d3b\u51fd\u6570\uff0c\u540e\u9762\u8ddf\u7740\u4e00\u4e2a2x2\u7684\u6c60\u5316\n        self.conv1 = Conv2D(in_channels=3, out_channels=6, kernel_size=5)\n        self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)\n        self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)\n        self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)\n        # \u521b\u5efa\u7b2c3\u4e2a\u5377\u79ef\u5c42\n        self.conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)\n        # \u521b\u5efa\u5168\u8fde\u63a5\u5c42\uff0c\u7b2c\u4e00\u4e2a\u5168\u8fde\u63a5\u5c42\u7684\u8f93\u51fa\u795e\u7ecf\u5143\u4e2a\u6570\u4e3a64\n        self.fc1 = Linear(in_features=300000, out_features=64)\n        # \u7b2c\u4e8c\u4e2a\u5168\u8fde\u63a5\u5c42\u8f93\u51fa\u795e\u7ecf\u5143\u4e2a\u6570\u4e3a\u5206\u7c7b\u6807\u7b7e\u7684\u7c7b\u522b\u6570\n        self.fc2 = Linear(in_features=64, out_features=num_classes)\n\n    # \u7f51\u7edc\u7684\u524d\u5411\u8ba1\u7b97\u8fc7\u7a0b\n    def forward(self, x, label=None):\n        x = self.conv1(x)\n        x = F.sigmoid(x)\n        x = self.max_pool1(x)\n        x = self.conv2(x)\n        x = F.sigmoid(x)\n        x = self.max_pool2(x)\n        x = self.conv3(x)\n        x = F.sigmoid(x)\n        x = paddle.reshape(x, &#91;x.shape&#91;0], -1])\n        x = self.fc1(x)\n        x = F.sigmoid(x)\n        x = self.fc2(x)\n        if label is not None:\n            acc = paddle.metric.accuracy(input=x, label=label)\n            return x, acc\n        else:\n            return x<\/code><\/pre>\n\n\n\n<pre class=\"wp-block-code\"><code># \u521b\u5efa\u6a21\u578b\nmodel = LeNet(num_classes=1)\n# \u542f\u52a8\u8bad\u7ec3\u8fc7\u7a0b\nopt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameters=model.parameters())\ntrain_pm(model, optimizer=opt)\nevaluation(model, params_file_path=\"palm.pdparams\")\n\nstart training ... \nepoch: 0, batch_id: 0, loss is: &#91;0.50568914]\nepoch: 0, batch_id: 10, loss is: &#91;0.6118419]\nepoch: 0, batch_id: 20, loss is: &#91;0.66790974]\nepoch: 0, batch_id: 30, loss is: &#91;0.68811446]\n&#91;validation] accuracy\/loss: 0.4725000262260437\/0.6936749219894409\nepoch: 1, batch_id: 0, loss is: &#91;0.69677556]\nepoch: 1, batch_id: 10, loss is: &#91;0.7022215]\nepoch: 1, batch_id: 20, loss is: &#91;0.68310237]\nepoch: 1, batch_id: 30, loss is: &#91;0.70623994]\n&#91;validation] accuracy\/loss: 0.5275000333786011\/0.6917704343795776\nepoch: 2, batch_id: 0, loss is: &#91;0.6938783]\nepoch: 2, batch_id: 10, loss is: &#91;0.693455]\nepoch: 2, batch_id: 20, loss is: &#91;0.6738912]\nepoch: 2, batch_id: 30, loss is: &#91;0.68051213]\n&#91;validation] accuracy\/loss: 0.5275000333786011\/0.69181889295578\nepoch: 3, batch_id: 0, loss is: &#91;0.6810262]\nepoch: 3, batch_id: 10, loss is: &#91;0.7284224]\nepoch: 3, batch_id: 20, loss is: &#91;0.6831607]\nepoch: 3, batch_id: 30, loss is: &#91;0.7108837]\n&#91;validation] accuracy\/loss: 0.5275000333786011\/0.691852867603302\nepoch: 4, batch_id: 0, loss is: &#91;0.69604385]\nepoch: 4, batch_id: 10, loss is: &#91;0.69544876]\nepoch: 4, batch_id: 20, loss is: &#91;0.7562486]\nepoch: 4, batch_id: 30, loss is: &#91;0.69045544]\n&#91;validation] accuracy\/loss: 0.5275000333786011\/0.6916558146476746\nstart evaluation .......\nloss=0.6911752998828888, acc=0.4675000052899122\n<\/code><\/pre>\n\n\n\n<p>\u901a\u8fc7\u8fd0\u884c\u7ed3\u679c\u53ef\u4ee5\u770b\u51fa\uff0c\u5728\u773c\u75be\u7b5b\u67e5\u6570\u636e\u96c6iChallenge-PM\u4e0a\uff0cLeNet\u7684loss\u5f88\u96be\u4e0b\u964d\uff0c\u6a21\u578b\u6ca1\u6709\u6536\u655b\u3002\u8fd9\u662f\u56e0\u4e3aMNIST\u6570\u636e\u96c6\u7684\u56fe\u7247\u5c3a\u5bf8\u6bd4\u8f83\u5c0f\uff0828\u00d72828\\times2828\u00d728\uff09\uff0c\u4f46\u662f\u773c\u75be\u7b5b\u67e5\u6570\u636e\u96c6\u56fe\u7247\u5c3a\u5bf8\u6bd4\u8f83\u5927\uff08\u539f\u59cb\u56fe\u7247\u5c3a\u5bf8\u7ea6\u4e3a2000\u00d720002000 \\times 20002000\u00d72000\uff0c\u7ecf\u8fc7\u7f29\u653e\u4e4b\u540e\u53d8\u6210224\u00d7224224 \\times 224224\u00d7224\uff09\uff0cLeNet\u6a21\u578b\u5f88\u96be\u8fdb\u884c\u6709\u6548\u5206\u7c7b\u3002\u8fd9\u8bf4\u660e\u5728\u56fe\u7247\u5c3a\u5bf8\u6bd4\u8f83\u5927\u65f6\uff0cLeNet\u5728\u56fe\u50cf\u5206\u7c7b\u4efb\u52a1\u4e0a\u5b58\u5728\u5c40\u9650\u6027\u3002<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">AlexNet<\/h2>\n\n\n\n<p>\u901a\u8fc7\u4e0a\u9762\u7684\u5b9e\u9645\u8bad\u7ec3\u53ef\u4ee5\u770b\u5230\uff0c\u867d\u7136LeNet\u5728\u624b\u5199\u6570\u5b57\u8bc6\u522b\u6570\u636e\u96c6\u4e0a\u53d6\u5f97\u4e86\u5f88\u597d\u7684\u7ed3\u679c\uff0c\u4f46\u5728\u66f4\u5927\u7684\u6570\u636e\u96c6\u4e0a\u8868\u73b0\u5374\u5e76\u4e0d\u597d\u3002\u81ea\u4ece1998\u5e74LeNet\u95ee\u4e16\u4ee5\u6765\uff0c\u63a5\u4e0b\u6765\u5341\u51e0\u5e74\u7684\u65f6\u95f4\u91cc\uff0c\u795e\u7ecf\u7f51\u7edc\u5e76\u6ca1\u6709\u5728\u8ba1\u7b97\u673a\u89c6\u89c9\u9886\u57df\u53d6\u5f97\u5f88\u597d\u7684\u7ed3\u679c\uff0c\u53cd\u800c\u4e00\u5ea6\u88ab\u5176\u5b83\u7b97\u6cd5\u6240\u8d85\u8d8a\u3002\u539f\u56e0\u4e3b\u8981\u6709\u4e24\u65b9\u9762\uff0c\u4e00\u662f\u795e\u7ecf\u7f51\u7edc\u7684\u8ba1\u7b97\u6bd4\u8f83\u590d\u6742\uff0c\u5bf9\u5f53\u65f6\u8ba1\u7b97\u673a\u7684\u7b97\u529b\u6765\u8bf4\uff0c\u8bad\u7ec3\u795e\u7ecf\u7f51\u7edc\u662f\u4ef6\u975e\u5e38\u8017\u65f6\u7684\u4e8b\u60c5\uff1b\u53e6\u4e00\u65b9\u9762\uff0c\u5f53\u65f6\u8fd8\u6ca1\u6709\u4e13\u95e8\u9488\u5bf9\u795e\u7ecf\u7f51\u7edc\u505a\u7b97\u6cd5\u548c\u8bad\u7ec3\u6280\u5de7\u7684\u4f18\u5316\uff0c\u795e\u7ecf\u7f51\u7edc\u7684\u6536\u655b\u662f\u4ef6\u975e\u5e38\u56f0\u96be\u7684\u4e8b\u60c5\u3002<\/p>\n\n\n\n<p>\u968f\u7740\u6280\u672f\u7684\u8fdb\u6b65\u548c\u53d1\u5c55\uff0c\u8ba1\u7b97\u673a\u7684\u7b97\u529b\u8d8a\u6765\u8d8a\u5f3a\u5927\uff0c\u5c24\u5176\u662f\u5728GPU\u5e76\u884c\u8ba1\u7b97\u80fd\u529b\u7684\u63a8\u52a8\u4e0b\uff0c\u590d\u6742\u795e\u7ecf\u7f51\u7edc\u7684\u8ba1\u7b97\u4e5f\u53d8\u5f97\u66f4\u52a0\u5bb9\u6613\u5b9e\u65bd\u3002\u53e6\u4e00\u65b9\u9762\uff0c\u4e92\u8054\u7f51\u4e0a\u6d8c\u73b0\u51fa\u8d8a\u6765\u8d8a\u591a\u7684\u6570\u636e\uff0c\u6781\u5927\u7684\u4e30\u5bcc\u4e86\u6570\u636e\u5e93\u3002\u540c\u65f6\u4e5f\u6709\u8d8a\u6765\u8d8a\u591a\u7684\u7814\u7a76\u4eba\u5458\u5f00\u59cb\u4e13\u95e8\u9488\u5bf9\u795e\u7ecf\u7f51\u7edc\u505a\u7b97\u6cd5\u548c\u6a21\u578b\u7684\u4f18\u5316\uff0cAlex Krizhevsky\u7b49\u4eba\u63d0\u51fa\u7684AlexNet\u4ee5\u5f88\u5927\u4f18\u52bf\u83b7\u5f97\u4e862012\u5e74ImageNet\u6bd4\u8d5b\u7684\u51a0\u519b\u3002\u8fd9\u4e00\u6210\u679c\u6781\u5927\u7684\u6fc0\u53d1\u4e86\u4ea7\u4e1a\u754c\u5bf9\u795e\u7ecf\u7f51\u7edc\u7684\u5174\u8da3\uff0c\u5f00\u521b\u4e86\u4f7f\u7528\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u89e3\u51b3\u56fe\u50cf\u95ee\u9898\u7684\u9014\u5f84\uff0c\u968f\u540e\u4e5f\u5728\u8fd9\u4e00\u9886\u57df\u6d8c\u73b0\u51fa\u8d8a\u6765\u8d8a\u591a\u7684\u4f18\u79c0\u6210\u679c\u3002<\/p>\n\n\n\n<p>AlexNet\u4e0eLeNet\u76f8\u6bd4\uff0c\u5177\u6709\u66f4\u6df1\u7684\u7f51\u7edc\u7ed3\u6784\uff0c\u5305\u542b5\u5c42\u5377\u79ef\u548c3\u5c42\u5168\u8fde\u63a5\uff0c\u540c\u65f6\u4f7f\u7528\u4e86\u5982\u4e0b\u4e09\u79cd\u65b9\u6cd5\u6539\u8fdb\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\"><li>\u6570\u636e\u589e\u5e7f\uff1a\u6df1\u5ea6\u5b66\u4e60\u4e2d\u5e38\u7528\u7684\u4e00\u79cd\u5904\u7406\u65b9\u5f0f\uff0c\u901a\u8fc7\u5bf9\u8bad\u7ec3\u968f\u673a\u52a0\u4e00\u4e9b\u53d8\u5316\uff0c\u6bd4\u5982\u5e73\u79fb\u3001\u7f29\u653e\u3001\u88c1\u526a\u3001\u65cb\u8f6c\u3001\u7ffb\u8f6c\u6216\u8005\u589e\u51cf\u4eae\u5ea6\u7b49\uff0c\u4ea7\u751f\u4e00\u7cfb\u5217\u8ddf\u539f\u59cb\u56fe\u7247\u76f8\u4f3c\u4f46\u53c8\u4e0d\u5b8c\u5168\u76f8\u540c\u7684\u6837\u672c\uff0c\u4ece\u800c\u6269\u5927\u8bad\u7ec3\u6570\u636e\u96c6\u3002\u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u53ef\u4ee5\u968f\u673a\u6539\u53d8\u8bad\u7ec3\u6837\u672c\uff0c\u907f\u514d\u6a21\u578b\u8fc7\u5ea6\u4f9d\u8d56\u4e8e\u67d0\u4e9b\u5c5e\u6027\uff0c\u80fd\u4ece\u4e00\u5b9a\u7a0b\u5ea6\u4e0a\u6291\u5236\u8fc7\u62df\u5408\u3002<\/li><li>\u4f7f\u7528Dropout\u6291\u5236\u8fc7\u62df\u5408\u3002<\/li><li>\u4f7f\u7528ReLU\u6fc0\u6d3b\u51fd\u6570\u51cf\u5c11\u68af\u5ea6\u6d88\u5931\u73b0\u8c61\u3002<\/li><\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code># -*- coding:utf-8 -*-\n\n# \u5bfc\u5165\u9700\u8981\u7684\u5305\nimport paddle\nimport numpy as np\nfrom paddle.nn import Conv2D, MaxPool2D, Linear, Dropout\n## \u7ec4\u7f51\nimport paddle.nn.functional as F\n\n# \u5b9a\u4e49 AlexNet \u7f51\u7edc\u7ed3\u6784\nclass AlexNet(paddle.nn.Layer):\n    def __init__(self, num_classes=1):\n        super(AlexNet, self).__init__()\n        # AlexNet\u4e0eLeNet\u4e00\u6837\u4e5f\u4f1a\u540c\u65f6\u4f7f\u7528\u5377\u79ef\u548c\u6c60\u5316\u5c42\u63d0\u53d6\u56fe\u50cf\u7279\u5f81\n        # \u4e0eLeNet\u4e0d\u540c\u7684\u662f\u6fc0\u6d3b\u51fd\u6570\u6362\u6210\u4e86\u2018relu\u2019\n        self.conv1 = Conv2D(in_channels=3, out_channels=96, kernel_size=11, stride=4, padding=5)\n        self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)\n        self.conv2 = Conv2D(in_channels=96, out_channels=256, kernel_size=5, stride=1, padding=2)\n        self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)\n        self.conv3 = Conv2D(in_channels=256, out_channels=384, kernel_size=3, stride=1, padding=1)\n        self.conv4 = Conv2D(in_channels=384, out_channels=384, kernel_size=3, stride=1, padding=1)\n        self.conv5 = Conv2D(in_channels=384, out_channels=256, kernel_size=3, stride=1, padding=1)\n        self.max_pool5 = MaxPool2D(kernel_size=2, stride=2)\n\n        self.fc1 = Linear(in_features=12544, out_features=4096)\n        self.drop_ratio1 = 0.5\n        self.drop1 = Dropout(self.drop_ratio1)\n        self.fc2 = Linear(in_features=4096, out_features=4096)\n        self.drop_ratio2 = 0.5\n        self.drop2 = Dropout(self.drop_ratio2)\n        self.fc3 = Linear(in_features=4096, out_features=num_classes)\n    \n    def forward(self, x):\n        x = self.conv1(x)\n        x = F.relu(x)\n        x = self.max_pool1(x)\n        x = self.conv2(x)\n        x = F.relu(x)\n        x = self.max_pool2(x)\n        x = self.conv3(x)\n        x = F.relu(x)\n        x = self.conv4(x)\n        x = F.relu(x)\n        x = self.conv5(x)\n        x = F.relu(x)\n        x = self.max_pool5(x)\n        x = paddle.reshape(x, &#91;x.shape&#91;0], -1])\n        x = self.fc1(x)\n        x = F.relu(x)\n        # \u5728\u5168\u8fde\u63a5\u4e4b\u540e\u4f7f\u7528dropout\u6291\u5236\u8fc7\u62df\u5408\n        x = self.drop1(x)\n        x = self.fc2(x)\n        x = F.relu(x)\n        # \u5728\u5168\u8fde\u63a5\u4e4b\u540e\u4f7f\u7528dropout\u6291\u5236\u8fc7\u62df\u5408\n        x = self.drop2(x)\n        x = self.fc3(x)\n        return x<\/code><\/pre>\n\n\n\n<pre class=\"wp-block-code\"><code># \u521b\u5efa\u6a21\u578b\nmodel = AlexNet()\n# \u542f\u52a8\u8bad\u7ec3\u8fc7\u7a0b\nopt = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())\n\ntrain_pm(model, optimizer=opt)<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\">pytorch\u5b9e\u73b0<\/h2>\n\n\n\n<p>\u6539\u5199\u6210pytorch\u7248\u672c\u65f6\u9047\u5230\u5982\u4e0b\u51e0\u4e2a\u95ee\u9898<\/p>\n\n\n\n<ul class=\"wp-block-list\"><li>\u51fd\u6570\u63a5\u53e3\u540d\u79f0\u4e0d\u4e00\u81f4<\/li><li>\u8c03\u7528gpu\u65b9\u5f0f\u4e0d\u4e00\u81f4<\/li><li>accuracy\u8ba1\u7b97\u65b9\u5f0f\u4e0d\u4e00\u6837<\/li><li>\u6570\u636e\u8bfb\u53d6\u62a5\u9519<\/li><\/ul>\n\n\n\n<p>\u524d\u4e09\u4e2a\u95ee\u9898\u524d\u4e24\u7ae0\u90fd\u63d0\u53ca\u4e86\uff0c\u4e0d\u8fc7\u8fd9\u6b21\u7684acc\u6211\u4f7f\u7528\u4e86round\u548ctorch.eq\u51fd\u6570\u6765\u5b9e\u73b0\u66f4\u4e3a\u65b9\u4fbf\uff0c\u56e0\u4e3a\u5206\u7c7b\u7ed3\u679c\u5c31\u662f0\u548c1<\/p>\n\n\n\n<p>\u5728\u6bcf\u4e2aepoch\u6700\u540e\u8ba1\u7b97acc\u65f6\u53d1\u751f\u4e86\u6570\u636e\u52a0\u8f7d\u51fd\u6570\u7684\u62a5\u9519\uff0c\u6309\u9053\u7406\u4f7f\u7528paddle\u548cpytorch\u6765\u5b9e\u73b0\u548c\u6570\u636e\u8bfb\u53d6\u51fd\u6570\u662f\u6ca1\u6709\u5173\u7cfb\u7684\uff0c\u4e3a\u4ec0\u4e48\u4f1a\u62a5\u9519\u5462\uff1f\u68c0\u67e5\u4e00\u4e0b\u53d1\u73b0\u662fcsv\u6587\u4ef6\u6700\u540e\u4e00\u884c\u51fa\u73b0\u4e86\u9519\u8bef\uff0c\u8fd9\u79cd\u4e8b\u60c5\u4e0d\u662f\u7b2c\u4e00\u6b21\u89c1\u5230\u4e86\uff0c\u4ee5\u540e\u8f93\u51facsv\u7684\u65f6\u5019\u90fd\u8981\u68c0\u67e5\u6700\u540e\u4e00\u884c\u662f\u4e0d\u662f\u591a\u4e86\u4e00\u884c\u56de\u8f66\u6216\u8005\u591a\u4e86\u9017\u53f7\u7b49\u7b49\u3002<\/p>\n\n\n\n<p>\u53e6\u5916\u56e0\u4e3a\u6211\u7684\u663e\u5361\u662f3060laptop\u6240\u4ee5\u8981\u628abatchsize\u8c03\u5c0f\u4e00\u70b9\uff0c\u8981\u4e0d\u7136\u7206\u663e\u5b58\u4e86<\/p>\n\n\n\n<p>\u53d1\u73b0\u8bad\u7ec3\u65f6\u6709\u65f6\u4f1a\u5361\u5728\u4e0b\u9762\u8fd9\u4e2aacc\uff0c\u5e94\u8be5\u662f\u56f0\u5728\u67d0\u4e2a\u5c40\u90e8\u6700\u4f18\u4e86\uff0c\u91cd\u65b0\u8bad\u7ec3\u4e00\u4e0b\u624d\u884c<\/p>\n\n\n\n<pre class=\"wp-block-preformatted\">[validation] accuracy\/loss: 0.5275000333786011\/0.6918381452560425<\/pre>\n\n\n\n<p>\u6700\u540e\u8bc4\u4f30\u8fc7\u7a0b\uff0ctorch\u7684model\u5e76\u4e0d\u80fd\u76f4\u63a5\u8f93\u51faacc\uff0c\u6240\u4ee5\u8ddfeval\u8fc7\u7a0b\u4e00\u6837\u8ba1\u7b97acc\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>\/\/\u6570\u636e\u8bfb\u53d6\u51fd\u6570\u540cpaddle\n<\/code><\/pre>\n\n\n\n<pre class=\"wp-block-code\"><code># -*- coding: utf-8 -*-\n# \u5bfc\u5165\u9700\u8981\u7684\u5305\nimport torch\nimport numpy as np\nfrom torch.nn import Conv2d, MaxPool2d, Linear\uff0cDropout\nfrom torch import nn,optim\nimport torch.nn.functional as F\nimport os\nimport random\nimport numpy as np\n\nDATADIR = r'E:\\DL_DATA\\iChallenge-PM\\PALM-Training400\\PALM-Training400'\nDATADIR2 = r'E:\\DL_DATA\\iChallenge-PM\\PALM-Validation400'\nCSVFILE = r'E:\\DL_DATA\\iChallenge-PM\\labels.csv'\n\n# \u5b9a\u4e49\u8bad\u7ec3\u8fc7\u7a0b\ndef train_pm(model, optimizer):\n    print('start training ... ')\n    model.cuda()\n    model.train(mode=True)\n    epoch_num = 5\n    # \u5b9a\u4e49\u6570\u636e\u8bfb\u53d6\u5668\uff0c\u8bad\u7ec3\u6570\u636e\u8bfb\u53d6\u5668\u548c\u9a8c\u8bc1\u6570\u636e\u8bfb\u53d6\u5668\n    train_loader = data_loader(DATADIR, batch_size=6,mode='train')\n    valid_loader = valid_data_loader(DATADIR2, CSVFILE)\n    for epoch in range(epoch_num):\n        for batch_id, data in enumerate(train_loader()):\n            x_data, y_data = data\n            img = torch.tensor(x_data).cuda()\n            label = torch.tensor(y_data).cuda()\n            # \u8fd0\u884c\u6a21\u578b\u524d\u5411\u8ba1\u7b97\uff0c\u5f97\u5230\u9884\u6d4b\u503c\n            logits = model(img)\n            loss = F.binary_cross_entropy_with_logits(logits, label).cuda()\n            avg_loss = torch.mean(loss)\n\n            if batch_id % 10 == 0:\n                print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.cpu().detach().numpy()))\n            # \u53cd\u5411\u4f20\u64ad\uff0c\u66f4\u65b0\u6743\u91cd\uff0c\u6e05\u9664\u68af\u5ea6\n            avg_loss.backward()\n            optimizer.step()\n            optimizer.zero_grad()\n\n        model.eval()\n        accuracies = &#91;]\n        losses = &#91;]\n        for batch_id, data in enumerate(valid_loader()):\n            x_data, y_data = data\n            img = torch.tensor(x_data).cuda()\n            label = torch.tensor(y_data).cuda()\n            # \u8fd0\u884c\u6a21\u578b\u524d\u5411\u8ba1\u7b97\uff0c\u5f97\u5230\u9884\u6d4b\u503c\n            logits = model(img)\n            # \u4e8c\u5206\u7c7b\uff0csigmoid\u8ba1\u7b97\u540e\u7684\u7ed3\u679c\u4ee50.5\u4e3a\u9608\u503c\u5206\u4e24\u4e2a\u7c7b\u522b\n            # \u8ba1\u7b97sigmoid\u540e\u7684\u9884\u6d4b\u6982\u7387\uff0c\u8fdb\u884closs\u8ba1\u7b97\n            pred = torch.sigmoid(logits)\n            loss = F.binary_cross_entropy_with_logits(logits, label)\n            pred_label=pred.round().squeeze(dim=-1)\n            acc=torch.eq(pred_label,label.squeeze(dim=-1)).float().mean()\n            accuracies.append(acc.cpu().detach().numpy())\n            losses.append(loss.cpu().detach().numpy())\n        print(\"&#91;validation] accuracy\/loss: {}\/{}\".format(np.mean(accuracies), np.mean(losses)))\n        model.train()\n        torch.save(model.state_dict(), 'AlexNet.pdparams')\n        torch.cuda.empty_cache()\n\n# \u5b9a\u4e49\u8bc4\u4f30\u8fc7\u7a0b\ndef evaluation(model, params_file_path):\n\n    # \u5f00\u542f0\u53f7GPU\u9884\u4f30\n    model.cuda()\n    print('start evaluation .......')\n\n    #\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\n    model.load_state_dict(torch.load(params_file_path))\n\n    model.eval()\n    eval_loader = data_loader(DATADIR, \n                        batch_size=10, mode='eval')\n    \n    acc_set = &#91;]\n    avg_loss_set = &#91;]\n    for batch_id, data in enumerate(eval_loader()):\n        x_data, y_data = data\n        img = torch.tensor(x_data).cuda()\n        label = torch.tensor(y_data).cuda()\n        y_data = y_data.astype(np.int64)\n        label_64 = torch.tensor(y_data)\n        # \u8ba1\u7b97\u9884\u6d4b\u548c\u7cbe\u5ea6\n        logits = model(img)\n        # \u8ba1\u7b97\u635f\u5931\u51fd\u6570\u503c\n        pred = torch.sigmoid(logits)\n        loss = F.binary_cross_entropy_with_logits(logits, label)\n        pred_label=pred.round().squeeze(dim=-1)\n        acc=torch.eq(pred_label,label.squeeze(dim=-1)).float().mean()\n        avg_loss = torch.mean(loss)\n        acc_set.append(float(acc.cpu().detach().numpy()))\n        avg_loss_set.append(float(avg_loss.cpu().detach().numpy()))\n    # \u6c42\u5e73\u5747\u7cbe\u5ea6\n    acc_val_mean = np.array(acc_set).mean()\n    avg_loss_val_mean = np.array(avg_loss_set).mean()\n\n    print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))\n\n# \u5b9a\u4e49 AlexNet \u7f51\u7edc\u7ed3\u6784\nclass AlexNet(nn.Module):\n    def __init__(self, num_classes=1):\n        super(AlexNet, self).__init__()\n        # AlexNet\u4e0eLeNet\u4e00\u6837\u4e5f\u4f1a\u540c\u65f6\u4f7f\u7528\u5377\u79ef\u548c\u6c60\u5316\u5c42\u63d0\u53d6\u56fe\u50cf\u7279\u5f81\n        # \u4e0eLeNet\u4e0d\u540c\u7684\u662f\u6fc0\u6d3b\u51fd\u6570\u6362\u6210\u4e86\u2018relu\u2019\n        self.conv1 = Conv2d(in_channels=3, out_channels=96, kernel_size=11, stride=4, padding=5)\n        self.max_pool1 = MaxPool2d(kernel_size=2, stride=2)\n        self.conv2 = Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=1, padding=2)\n        self.max_pool2 = MaxPool2d(kernel_size=2, stride=2)\n        self.conv3 = Conv2d(in_channels=256, out_channels=384, kernel_size=3, stride=1, padding=1)\n        self.conv4 = Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, padding=1)\n        self.conv5 = Conv2d(in_channels=384, out_channels=256, kernel_size=3, stride=1, padding=1)\n        self.max_pool5 = MaxPool2d(kernel_size=2, stride=2)\n\n        self.fc1 = Linear(in_features=12544, out_features=4096)\n        self.drop_ratio1 = 0.5\n        self.drop1 = Dropout(self.drop_ratio1)\n        self.fc2 = Linear(in_features=4096, out_features=4096)\n        self.drop_ratio2 = 0.5\n        self.drop2 = Dropout(self.drop_ratio2)\n        self.fc3 = Linear(in_features=4096, out_features=num_classes)\n    \n    def forward(self, x):\n        x = self.conv1(x)\n        x = F.relu(x)\n        x = self.max_pool1(x)\n        x = self.conv2(x)\n        x = F.relu(x)\n        x = self.max_pool2(x)\n        x = self.conv3(x)\n        x = F.relu(x)\n        x = self.conv4(x)\n        x = F.relu(x)\n        x = self.conv5(x)\n        x = F.relu(x)\n        x = self.max_pool5(x)\n        x = torch.reshape(x, &#91;x.shape&#91;0], -1])\n        x = self.fc1(x)\n        x = F.relu(x)\n        # \u5728\u5168\u8fde\u63a5\u4e4b\u540e\u4f7f\u7528dropout\u6291\u5236\u8fc7\u62df\u5408\n        x = self.drop1(x)\n        x = self.fc2(x)\n        x = F.relu(x)\n        # \u5728\u5168\u8fde\u63a5\u4e4b\u540e\u4f7f\u7528dropout\u6291\u5236\u8fc7\u62df\u5408\n        x = self.drop2(x)\n        x = self.fc3(x)\n        return x\n\n# \u521b\u5efa\u6a21\u578b\nmodel = AlexNet(num_classes=1)\n# \u542f\u52a8\u8bad\u7ec3\u8fc7\u7a0b\nopt = optim.Adam(lr=0.001, params=model.parameters())\ntrain_pm(model, optimizer=opt)\nevaluation(model, params_file_path=\"AlexNet.pdparams\")<\/code><\/pre>\n\n\n\n<pre class=\"wp-block-preformatted\">start training ... \nepoch: 0, batch_id: 0, loss is: 0.6922525763511658\nepoch: 0, batch_id: 10, loss is: 0.7914059162139893\nepoch: 0, batch_id: 20, loss is: 0.7226778864860535\nepoch: 0, batch_id: 30, loss is: 0.5134126543998718\nepoch: 0, batch_id: 40, loss is: 0.6680147647857666\nepoch: 0, batch_id: 50, loss is: 0.09653188288211823\nepoch: 0, batch_id: 60, loss is: 2.5109710693359375\n[validation] accuracy\/loss: 0.5649999976158142\/0.911982536315918\nepoch: 1, batch_id: 0, loss is: 1.3442769050598145\nepoch: 1, batch_id: 10, loss is: 0.6957622766494751\nepoch: 1, batch_id: 20, loss is: 0.19908615946769714\nepoch: 1, batch_id: 30, loss is: 0.6529017686843872\nepoch: 1, batch_id: 40, loss is: 0.4269561767578125\nepoch: 1, batch_id: 50, loss is: 0.1539572924375534\nepoch: 1, batch_id: 60, loss is: 0.38534554839134216\n[validation] accuracy\/loss: 0.9024999737739563\/0.3134824335575104\nepoch: 2, batch_id: 0, loss is: 0.1563398689031601\nepoch: 2, batch_id: 10, loss is: 0.6282495260238647\nepoch: 2, batch_id: 20, loss is: 0.3328627049922943\nepoch: 2, batch_id: 30, loss is: 0.12043571472167969\nepoch: 2, batch_id: 40, loss is: 0.33190542459487915\nepoch: 2, batch_id: 50, loss is: 0.16366970539093018\nepoch: 2, batch_id: 60, loss is: 0.3514131009578705\n[validation] accuracy\/loss: 0.8675000071525574\/0.3479907512664795\nepoch: 3, batch_id: 0, loss is: 0.47956809401512146\nepoch: 3, batch_id: 10, loss is: 0.07707101106643677\nepoch: 3, batch_id: 20, loss is: 0.21194897592067719\nepoch: 3, batch_id: 30, loss is: 0.11190598458051682\nepoch: 3, batch_id: 40, loss is: 0.004018225707113743\nepoch: 3, batch_id: 50, loss is: 0.5130555033683777\nepoch: 3, batch_id: 60, loss is: 0.38815149664878845\n[validation] accuracy\/loss: 0.9200000762939453\/0.28641197085380554\nepoch: 4, batch_id: 0, loss is: 0.40335366129875183\nepoch: 4, batch_id: 10, loss is: 0.3855357766151428\nepoch: 4, batch_id: 20, loss is: 0.16769567131996155\nepoch: 4, batch_id: 30, loss is: 0.6962148547172546\nepoch: 4, batch_id: 40, loss is: 1.2973268032073975\nepoch: 4, batch_id: 50, loss is: 0.15464788675308228\nepoch: 4, batch_id: 60, loss is: 0.012425403110682964\n[validation] accuracy\/loss: 0.8999999761581421\/0.2859897017478943\nstart evaluation .......\nloss=0.32200332209467886, acc=0.8800000078976155<\/pre>\n\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"576\" src=\"http:\/\/www.gislxz.com\/wp-content\/uploads\/2022\/08\/IMG_20220814_021426-1024x576.png\" alt=\"\" class=\"wp-image-410\" srcset=\"https:\/\/www.gislxz.com\/wp-content\/uploads\/2022\/08\/IMG_20220814_021426-1024x576.png 1024w, https:\/\/www.gislxz.com\/wp-content\/uploads\/2022\/08\/IMG_20220814_021426-300x169.png 300w, https:\/\/www.gislxz.com\/wp-content\/uploads\/2022\/08\/IMG_20220814_021426-768x432.png 768w, https:\/\/www.gislxz.com\/wp-content\/uploads\/2022\/08\/IMG_20220814_021426-1536x864.png 1536w, https:\/\/www.gislxz.com\/wp-content\/uploads\/2022\/08\/IMG_20220814_021426-2048x1152.png 2048w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><figcaption>engage kiss\u8fd9\u4e00\u96c6\u4e5f\u592a\u641e\u4e86<\/figcaption><\/figure><\/div>\n","protected":false},"excerpt":{"rendered":"<p>\u773c\u75be\u8bc6\u522b\u6570\u636e\u96c6iChallenge-PM\u4e0eAlexNet<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"_jetpack_memberships_contains_paid_content":false,"footnotes":""},"categories":[21],"tags":[],"class_list":["post-406","post","type-post","status-publish","format-standard","hentry","category-21"],"jetpack_featured_media_url":"","jetpack_sharing_enabled":true,"_links":{"self":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts\/406","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/comments?post=406"}],"version-history":[{"count":3,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts\/406\/revisions"}],"predecessor-version":[{"id":414,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts\/406\/revisions\/414"}],"wp:attachment":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/media?parent=406"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/categories?post=406"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/tags?post=406"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}