{"id":328,"date":"2022-07-29T20:08:38","date_gmt":"2022-07-29T12:08:38","guid":{"rendered":"http:\/\/www.gislxz.top\/?p=328"},"modified":"2022-07-29T20:08:40","modified_gmt":"2022-07-29T12:08:40","slug":"%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0%e7%ac%94%e8%ae%b0%ef%bc%882%ef%bc%89","status":"publish","type":"post","link":"https:\/\/www.gislxz.com\/index.php\/2022\/07\/29\/%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0%e7%ac%94%e8%ae%b0%ef%bc%882%ef%bc%89\/","title":{"rendered":"\u6df1\u5ea6\u5b66\u4e60\u7b14\u8bb0\uff082\uff09"},"content":{"rendered":"\n<p>\u5b98\u65b9\u6559\u7a0b\u4e2d\u4e0a\u4e00\u7ae0\u7528numpy\u4ece\u5e95\u5c42\u5b9e\u73b0\u4e86\u57fa\u4e8e\u591a\u5143\u7ebf\u6027\u56de\u5f52\u7684\u623f\u4ef7\u9884\u6d4b\u6a21\u578b\uff0c\u8fd9\u4e00\u7ae0\u4f7f\u7528paddle\u6846\u67b6\u91cd\u5199\u3002<\/p>\n\n\n\n<p><a href=\"https:\/\/aistudio.baidu.com\/aistudio\/projectdetail\/1587351\" target=\"_blank\"  rel=\"nofollow\" >\u4f7f\u7528\u98de\u6868\u91cd\u5199\u623f\u4ef7\u9884\u6d4b\u6a21\u578b<\/a><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>#\u52a0\u8f7d\u98de\u6868\u3001Numpy\u548c\u76f8\u5173\u7c7b\u5e93\r\nimport paddle\r\nfrom paddle.nn import Linear\r\nimport paddle.nn.functional as F\r\nimport numpy as np\r\nimport os\r\nimport random\n\n#\u52a0\u8f7d\u6570\u636e\u51fd\u6570\u4e0e\u4e0a\u4e00\u7ae0\u57fa\u672c\u4e00\u81f4\ndef load_data():\r\n    # \u4ece\u6587\u4ef6\u5bfc\u5165\u6570\u636e\r\n    datafile = '.\/work\/housing.data'\r\n    data = np.fromfile(datafile, sep=' ', dtype=np.float32)\r\n\r\n    # \u6bcf\u6761\u6570\u636e\u5305\u62ec14\u9879\uff0c\u5176\u4e2d\u524d\u976213\u9879\u662f\u5f71\u54cd\u56e0\u7d20\uff0c\u7b2c14\u9879\u662f\u76f8\u5e94\u7684\u623f\u5c4b\u4ef7\u683c\u4e2d\u4f4d\u6570\r\n    feature_names = &#91; 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', \\\r\n                      'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]\r\n    feature_num = len(feature_names)\r\n\r\n    # \u5c06\u539f\u59cb\u6570\u636e\u8fdb\u884cReshape\uff0c\u53d8\u6210&#91;N, 14]\u8fd9\u6837\u7684\u5f62\u72b6\r\n    data = data.reshape(&#91;data.shape&#91;0] \/\/ feature_num, feature_num])\r\n\r\n    # \u5c06\u539f\u6570\u636e\u96c6\u62c6\u5206\u6210\u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\r\n    # \u8fd9\u91cc\u4f7f\u752880%\u7684\u6570\u636e\u505a\u8bad\u7ec3\uff0c20%\u7684\u6570\u636e\u505a\u6d4b\u8bd5\r\n    # \u6d4b\u8bd5\u96c6\u548c\u8bad\u7ec3\u96c6\u5fc5\u987b\u662f\u6ca1\u6709\u4ea4\u96c6\u7684\r\n    ratio = 0.8\r\n    offset = int(data.shape&#91;0] * ratio)\r\n    training_data = data&#91;:offset]\r\n\r\n    # \u8ba1\u7b97train\u6570\u636e\u96c6\u7684\u6700\u5927\u503c\uff0c\u6700\u5c0f\u503c\uff0c\u5e73\u5747\u503c\r\n    maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), \\\r\n                                 training_data.sum(axis=0) \/ training_data.shape&#91;0]\r\n    \r\n    # \u8bb0\u5f55\u6570\u636e\u7684\u5f52\u4e00\u5316\u53c2\u6570\uff0c\u5728\u9884\u6d4b\u65f6\u5bf9\u6570\u636e\u505a\u5f52\u4e00\u5316\r\n    global max_values\r\n    global min_values\r\n    global avg_values\r\n    max_values = maximums\r\n    min_values = minimums\r\n    avg_values = avgs\r\n\r\n    # \u5bf9\u6570\u636e\u8fdb\u884c\u5f52\u4e00\u5316\u5904\u7406\r\n    for i in range(feature_num):\r\n        data&#91;:, i] = (data&#91;:, i] - avgs&#91;i]) \/ (maximums&#91;i] - minimums&#91;i])\r\n\r\n    # \u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\u7684\u5212\u5206\u6bd4\u4f8b\r\n    training_data = data&#91;:offset]\r\n    test_data = data&#91;offset:]\r\n    return training_data, test_data\n\n#\u4f7f\u7528paddle\u521b\u5efa\u6a21\u578b\u7f51\u7edc\u7c7b\nclass Regressor(paddle.nn.Layer):\r\n\r\n    # self\u4ee3\u8868\u7c7b\u7684\u5b9e\u4f8b\u81ea\u8eab\r\n    def __init__(self):\r\n        # \u521d\u59cb\u5316\u7236\u7c7b\u4e2d\u7684\u4e00\u4e9b\u53c2\u6570\r\n        super(Regressor, self).__init__()\r\n        \r\n        # \u5b9a\u4e49\u4e00\u5c42\u5168\u8fde\u63a5\u5c42\uff0c\u8f93\u5165\u7ef4\u5ea6\u662f13\uff0c\u8f93\u51fa\u7ef4\u5ea6\u662f1\r\n        self.fc = Linear(in_features=13, out_features=1)\r\n    \r\n    # \u7f51\u7edc\u7684\u524d\u5411\u8ba1\u7b97\r\n    def forward(self, inputs):\r\n        x = self.fc(inputs)\r\n        return x\n\n#\u8bad\u7ec3\u914d\u7f6e\u4ee3\u7801\n# \u58f0\u660e\u5b9a\u4e49\u597d\u7684\u7ebf\u6027\u56de\u5f52\u6a21\u578b\r\nmodel = Regressor()\r\n# \u5f00\u542f\u6a21\u578b\u8bad\u7ec3\u6a21\u5f0f\r\nmodel.train()\r\n# \u52a0\u8f7d\u6570\u636e\r\ntraining_data, test_data = load_data()\r\n# \u5b9a\u4e49\u4f18\u5316\u7b97\u6cd5\uff0c\u4f7f\u7528\u968f\u673a\u68af\u5ea6\u4e0b\u964dSGD\r\n# \u5b66\u4e60\u7387\u8bbe\u7f6e\u4e3a0.01\r\nopt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())\r\n\n#\u5b9e\u73b0\u8bad\u7ec3\u4ee3\u7801\nEPOCH_NUM = 10   # \u8bbe\u7f6e\u5916\u5c42\u5faa\u73af\u6b21\u6570\r\nBATCH_SIZE = 10  # \u8bbe\u7f6ebatch\u5927\u5c0f\r\n\r\n# \u5b9a\u4e49\u5916\u5c42\u5faa\u73af\r\nfor epoch_id in range(EPOCH_NUM):\r\n    # \u5728\u6bcf\u8f6e\u8fed\u4ee3\u5f00\u59cb\u4e4b\u524d\uff0c\u5c06\u8bad\u7ec3\u6570\u636e\u7684\u987a\u5e8f\u968f\u673a\u7684\u6253\u4e71\r\n    np.random.shuffle(training_data)\r\n    # \u5c06\u8bad\u7ec3\u6570\u636e\u8fdb\u884c\u62c6\u5206\uff0c\u6bcf\u4e2abatch\u5305\u542b10\u6761\u6570\u636e\r\n    mini_batches = &#91;training_data&#91;k:k+BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)]\r\n    # \u5b9a\u4e49\u5185\u5c42\u5faa\u73af\r\n    for iter_id, mini_batch in enumerate(mini_batches):\r\n        x = np.array(mini_batch&#91;:, :-1]) # \u83b7\u5f97\u5f53\u524d\u6279\u6b21\u8bad\u7ec3\u6570\u636e\r\n        y = np.array(mini_batch&#91;:, -1:]) # \u83b7\u5f97\u5f53\u524d\u6279\u6b21\u8bad\u7ec3\u6807\u7b7e\uff08\u771f\u5b9e\u623f\u4ef7\uff09\r\n        # \u5c06numpy\u6570\u636e\u8f6c\u4e3a\u98de\u6868\u52a8\u6001\u56fetensor\u5f62\u5f0f\r\n        house_features = paddle.to_tensor(x)\r\n        prices = paddle.to_tensor(y)\r\n        \r\n        # \u524d\u5411\u8ba1\u7b97\r\n        predicts = model(house_features)\r\n        \r\n        # \u8ba1\u7b97\u635f\u5931\r\n        loss = F.square_error_cost(predicts, label=prices)\r\n        avg_loss = paddle.mean(loss)\r\n        if iter_id%20==0:\r\n            print(\"epoch: {}, iter: {}, loss is: {}\".format(epoch_id, iter_id, avg_loss.numpy()))\r\n        \r\n        # \u53cd\u5411\u4f20\u64ad\r\n        avg_loss.backward()\r\n        # \u6700\u5c0f\u5316loss,\u66f4\u65b0\u53c2\u6570\r\n        opt.step()\r\n        # \u6e05\u9664\u68af\u5ea6\r\n        opt.clear_grad()\n\n# \u4fdd\u5b58\u6a21\u578b\u53c2\u6570\uff0c\u6587\u4ef6\u540d\u4e3aLR_model.pdparams\r\npaddle.save(model.state_dict(), 'LR_model.pdparams')\r\nprint(\"\u6a21\u578b\u4fdd\u5b58\u6210\u529f\uff0c\u6a21\u578b\u53c2\u6570\u4fdd\u5b58\u5728LR_model.pdparams\u4e2d\")\n\ndef load_one_example():\r\n    # \u4ece\u4e0a\u8fb9\u5df2\u52a0\u8f7d\u7684\u6d4b\u8bd5\u96c6\u4e2d\uff0c\u968f\u673a\u9009\u62e9\u4e00\u6761\u4f5c\u4e3a\u6d4b\u8bd5\u6570\u636e\r\n    idx = np.random.randint(0, test_data.shape&#91;0])\r\n    idx = -10\r\n    one_data, label = test_data&#91;idx, :-1], test_data&#91;idx, -1]\r\n    # \u4fee\u6539\u8be5\u6761\u6570\u636eshape\u4e3a&#91;1,13]\r\n    one_data =  one_data.reshape(&#91;1,-1])\r\n    return one_data, label\n\n# \u53c2\u6570\u4e3a\u4fdd\u5b58\u6a21\u578b\u53c2\u6570\u7684\u6587\u4ef6\u5730\u5740\r\nmodel_dict = paddle.load('LR_model.pdparams')\r\nmodel.load_dict(model_dict)\r\nmodel.eval()\r\n\r\n# \u53c2\u6570\u4e3a\u6570\u636e\u96c6\u7684\u6587\u4ef6\u5730\u5740\r\none_data, label = load_one_example()\r\n# \u5c06\u6570\u636e\u8f6c\u4e3a\u52a8\u6001\u56fe\u7684variable\u683c\u5f0f \r\none_data = paddle.to_tensor(one_data)\r\npredict = model(one_data)\r\n\r\n# \u5bf9\u7ed3\u679c\u505a\u53cd\u5f52\u4e00\u5316\u5904\u7406\r\npredict = predict * (max_values&#91;-1] - min_values&#91;-1]) + avg_values&#91;-1]\r\n# \u5bf9label\u6570\u636e\u505a\u53cd\u5f52\u4e00\u5316\u5904\u7406\r\nlabel = label * (max_values&#91;-1] - min_values&#91;-1]) + avg_values&#91;-1]\r\n\r\nprint(\"Inference result is {}, the corresponding label is {}\".format(predict.numpy(), label))<\/code><\/pre>\n\n\n\n<p>\u5e38\u770b\u5f00\u6e90\u4ee3\u7801\u4ee5\u53ca\u5927\u4f6c\u7684\u4ee3\u7801\u8fd8\u662f\u80fd\u5b66\u5230\u5f88\u591a\u4e1c\u897f\u7684\uff0c\u6bd4\u5982<\/p>\n\n\n\n<p>mini_batches = [training_data[k:k+BATCH_SIZE] <strong>for<\/strong> k <strong>in<\/strong> range(0, len(training_data), BATCH_SIZE)]\u8fd9\u53e5\u5c31\u5f88\u7cbe\u70bc<\/p>\n\n\n\n<p>\u4e0b\u9762\u7528pytorch\u91cd\u65b0\u5199\u4e00\u904d<\/p>\n\n\n\n<p>\u9996\u5148\u5728\u6a21\u5757\u5f15\u5165\u4e2d\u5c31\u6709\u4e00\u4e9b\u533a\u522b\uff0cpytorch\u4e2d\u81ea\u5b9a\u4e49\u7f51\u7edc\u7684\u57fa\u7c7b\u5728nn.Module\u4e2d\uff0c\u540c\u65f6\u4f18\u5316\u5668\u7c7boptim\u9700\u8981\u5bfc\u5165<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import torch\r\nfrom torch.nn import Module\r\nfrom torch import optim\r\r\nimport numpy as np\r\nimport random<\/code><\/pre>\n\n\n\n<p>\u52a0\u8f7d\u6570\u636e\u6ca1\u6709\u533a\u522b<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>def load_data():\r\n    # \u4ece\u6587\u4ef6\u5bfc\u5165\u6570\u636e\r\n    datafile = r'E:\\NLPDATA\\housing.data'\r\n    data = np.fromfile(datafile, sep=' ', dtype=np.float32)\r\n\r\n    # \u6bcf\u6761\u6570\u636e\u5305\u62ec14\u9879\uff0c\u5176\u4e2d\u524d\u976213\u9879\u662f\u5f71\u54cd\u56e0\u7d20\uff0c\u7b2c14\u9879\u662f\u76f8\u5e94\u7684\u623f\u5c4b\u4ef7\u683c\u4e2d\u4f4d\u6570\r\n    feature_names = &#91; 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', \\\r\n                      'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]\r\n    feature_num = len(feature_names)\r\n\r\n    # \u5c06\u539f\u59cb\u6570\u636e\u8fdb\u884cReshape\uff0c\u53d8\u6210&#91;N, 14]\u8fd9\u6837\u7684\u5f62\u72b6\r\n    data = data.reshape(&#91;data.shape&#91;0] \/\/ feature_num, feature_num])\r\n\r\n    # \u5c06\u539f\u6570\u636e\u96c6\u62c6\u5206\u6210\u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\r\n    # \u8fd9\u91cc\u4f7f\u752880%\u7684\u6570\u636e\u505a\u8bad\u7ec3\uff0c20%\u7684\u6570\u636e\u505a\u6d4b\u8bd5\r\n    # \u6d4b\u8bd5\u96c6\u548c\u8bad\u7ec3\u96c6\u5fc5\u987b\u662f\u6ca1\u6709\u4ea4\u96c6\u7684\r\n    ratio = 0.8\r\n    offset = int(data.shape&#91;0] * ratio)\r\n    training_data = data&#91;:offset]\r\n\r\n    # \u8ba1\u7b97train\u6570\u636e\u96c6\u7684\u6700\u5927\u503c\uff0c\u6700\u5c0f\u503c\uff0c\u5e73\u5747\u503c\r\n    maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), \\\r\n                                 training_data.sum(axis=0) \/ training_data.shape&#91;0]\r\n    \r\n    # \u8bb0\u5f55\u6570\u636e\u7684\u5f52\u4e00\u5316\u53c2\u6570\uff0c\u5728\u9884\u6d4b\u65f6\u5bf9\u6570\u636e\u505a\u5f52\u4e00\u5316\r\n    global max_values\r\n    global min_values\r\n    global avg_values\r\n    max_values = maximums\r\n    min_values = minimums\r\n    avg_values = avgs\r\n\r\n    # \u5bf9\u6570\u636e\u8fdb\u884c\u5f52\u4e00\u5316\u5904\u7406\r\n    for i in range(feature_num):\r\n        data&#91;:, i] = (data&#91;:, i] - avgs&#91;i]) \/ (maximums&#91;i] - minimums&#91;i])\r\n\r\n    # \u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\u7684\u5212\u5206\u6bd4\u4f8b\r\n    training_data = data&#91;:offset]\r\n    test_data = data&#91;offset:]\r\n    return training_data, test_data<\/code><\/pre>\n\n\n\n<p>\u7f51\u7edc\u7c7b\u5b9a\u4e49\uff0ctorch\u7ee7\u627f\u7684\u662fmodule\u4f5c\u4e3a\u57fa\u7c7b\uff0c\u8bed\u6cd5\u90fd\u662f\u4e00\u81f4\u7684<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>class Regressor(Module):\r\n\r\n    # self\u4ee3\u8868\u7c7b\u7684\u5b9e\u4f8b\u81ea\u8eab\r\n    def __init__(self):\r\n        # \u521d\u59cb\u5316\u7236\u7c7b\u4e2d\u7684\u4e00\u4e9b\u53c2\u6570\r\n        super(Regressor, self).__init__()\r\n        \r\n        # \u5b9a\u4e49\u4e00\u5c42\u5168\u8fde\u63a5\u5c42\uff0c\u8f93\u5165\u7ef4\u5ea6\u662f13\uff0c\u8f93\u51fa\u7ef4\u5ea6\u662f1\r\n        self.fc = Linear(in_features=13, out_features=1)\r\n    \r\n    # \u7f51\u7edc\u7684\u524d\u5411\u8ba1\u7b97\r\n    def forward(self, inputs):\r\n        x = self.fc(inputs)\r\n        return x<\/code><\/pre>\n\n\n\n<p>torch.nn.functional\u4e2d\u6ca1\u6709\u65b9\u5dee\u51fd\u6570\uff08\u4e5f\u6709\u53ef\u80fd\u662f\u6211\u6ca1\u627e\u5230\uff09\uff0c\u8f6c\u7528nn\u4e2d\u7684MSELoss\u5e73\u5747\u65b9\u5dee\u51fd\u6570\u3002\u540c\u65f6\u4f18\u5316\u5668\u53c2\u6570\u4e5f\u6709\u4e00\u5b9a\u533a\u522b\uff0ctorch\u57fa\u672c\u90fd\u662f\u7b80\u5199<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># \u58f0\u660e\u5b9a\u4e49\u597d\u7684\u7ebf\u6027\u56de\u5f52\u6a21\u578b\r\nmodel = Regressor()\r\n# \u5f00\u542f\u6a21\u578b\u8bad\u7ec3\u6a21\u5f0f\r\nmodel.train()\r\n# \u52a0\u8f7d\u6570\u636e\r\ntraining_data, test_data = load_data()\r\n\r#\u8bbe\u7f6e\u635f\u5931\u51fd\u6570\ncriterion = nn.MSELoss()\r\n# \u5b9a\u4e49\u4f18\u5316\u7b97\u6cd5\uff0c\u4f7f\u7528\u968f\u673a\u68af\u5ea6\u4e0b\u964dSGD\n# \u5b66\u4e60\u7387\u8bbe\u7f6e\u4e3a0.01opt = optim.SGD(lr=0.01, params=model.parameters()) #pytorch\u4e2d\u5b66\u4e60\u7387\u662flr\uff0cpaddle\u662flearning_rate,pytorch\u4e2d\u8981\u8bad\u7ec3\u7684\u53c2\u6570\u662fparams\uff0cpaddle\u662fparameters<\/code><\/pre>\n\n\n\n<p>\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u533a\u522b\u662f\u635f\u5931\u51fd\u6570\u7684\u53c2\u6570\u4e0d\u540c\uff0c\u4ee5\u53catensor\u4e0enumpy\u4e92\u76f8\u8f6c\u6362\u7684\u51fd\u6570\u540d\u4e0d\u540c\u3002\u9664\u6b64\u4ee5\u5916torch\u7684\u6e05\u7a7a\u68af\u5ea6\u662fzero_grad\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>EPOCH_NUM = 10   # \u8bbe\u7f6e\u5916\u5c42\u5faa\u73af\u6b21\u6570\r\nBATCH_SIZE = 10  # \u8bbe\u7f6ebatch\u5927\u5c0f\r\n\r\n# \u5b9a\u4e49\u5916\u5c42\u5faa\u73af\r\nfor epoch_id in range(EPOCH_NUM):\r\n    # \u5728\u6bcf\u8f6e\u8fed\u4ee3\u5f00\u59cb\u4e4b\u524d\uff0c\u5c06\u8bad\u7ec3\u6570\u636e\u7684\u987a\u5e8f\u968f\u673a\u7684\u6253\u4e71\r\n    np.random.shuffle(training_data)\r\n    # \u5c06\u8bad\u7ec3\u6570\u636e\u8fdb\u884c\u62c6\u5206\uff0c\u6bcf\u4e2abatch\u5305\u542b10\u6761\u6570\u636e\r\n    mini_batches = &#91;training_data&#91;k:k+BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)]\r\n    # \u5b9a\u4e49\u5185\u5c42\u5faa\u73af\r\n    for iter_id, mini_batch in enumerate(mini_batches):\r\n        x = np.array(mini_batch&#91;:, :-1]) # \u83b7\u5f97\u5f53\u524d\u6279\u6b21\u8bad\u7ec3\u6570\u636e\r\n        y = np.array(mini_batch&#91;:, -1:]) # \u83b7\u5f97\u5f53\u524d\u6279\u6b21\u8bad\u7ec3\u6807\u7b7e\uff08\u771f\u5b9e\u623f\u4ef7\uff09\r\n        # \u5c06numpy\u6570\u636e\u8f6c\u4e3atorch.tensor\u5f62\u5f0f\r\n        house_features = torch.tensor(x)\r\n        prices = torch.tensor(y)\r\n        \r\n        # \u524d\u5411\u8ba1\u7b97\r\n        predicts = model(house_features)\r\n        \r\n        # \u8ba1\u7b97\u635f\u5931\r\n        loss=criterion(predicts,prices)\r\n        avg_loss = torch.mean(loss)\r\n        if iter_id%20==0:\r\n            print(\"epoch: {}, iter: {}, loss is: {}\".format(epoch_id, iter_id, avg_loss.detach().numpy()))\r\n        \r\n        # \u53cd\u5411\u4f20\u64ad\r\n        avg_loss.backward()\r\n        # \u6700\u5c0f\u5316loss,\u66f4\u65b0\u53c2\u6570\r\n        opt.step()\r\n        # \u6e05\u9664\u68af\u5ea6\r\n        opt.zero_grad()<\/code><\/pre>\n\n\n\n<p>\u4fdd\u5b58\u6a21\u578b<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>print(model.state_dict().keys())                                # \u8f93\u51fa\u6a21\u578b\u53c2\u6570\u540d\u79f0\r\r\n# \u4fdd\u5b58\u6a21\u578b\u53c2\u6570\u5230\u8def\u5f84\".\/data\/model_parameter.pkl\"\r\ntorch.save(model.state_dict(), \".\/data\/model_parameter.pkl\")\r\nnew_model = Model()                                                    # \u8c03\u7528\u6a21\u578bModel\r\nnew_model.load_state_dict(torch.load(\".\/data\/model_parameter.pkl\"))    # \u52a0\u8f7d\u6a21\u578b\u53c2\u6570     \r\nnew_model.forward(input)                                               # \u8fdb\u884c\u4f7f\u7528<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>paddle\u4e0epytorch\u5b9e\u73b0\u591a\u5143\u7ebf\u6027\u56de\u5f52<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"_jetpack_memberships_contains_paid_content":false,"footnotes":""},"categories":[21],"tags":[],"class_list":["post-328","post","type-post","status-publish","format-standard","hentry","category-21"],"jetpack_featured_media_url":"","jetpack_sharing_enabled":true,"_links":{"self":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts\/328","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/comments?post=328"}],"version-history":[{"count":1,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts\/328\/revisions"}],"predecessor-version":[{"id":329,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/posts\/328\/revisions\/329"}],"wp:attachment":[{"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/media?parent=328"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/categories?post=328"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.gislxz.com\/index.php\/wp-json\/wp\/v2\/tags?post=328"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}