Matlab梯度下降解决评分矩阵分解

for iter = 1:num_iters
   
    %梯度下降 用户向量
    for i = 1:m
        %返回有0有1 是逻辑值
       ratedIndex1 = R_training(i,:)~=0 ;
       %U(i,:) * V'  第i个用户分别对每个电影的评分
       
       %sumVec1  第i个用户分别对每个电影的评分 减去真实值
       sumVec1 = ratedIndex1 .* (U(i,:) * V' - R_training(i,:));
       product1 = sumVec1 * V;
       derivative1 = product1 + lambda_u * U(i,:);
       old_U(i,:) = U(i,:) - theta * derivative1;
    end
    
    %梯度下降 电影向量
    for j = 1:n
       ratedIndex2 = R_training(:,j)~=0;
       sumVec2 = ratedIndex2 .* (U * V(j,:)' - R_training(:,j));
       product2 = sumVec2' * U;
       derivative2 = product2 + lambda_v * V(j,:);
       old_V(j,:) = V(j,:) - theta * derivative2;
    end
    
    U = old_U;
    V = old_V;
    RMSE(i,1) = CompRMSE(train_vec,U,V);
    RMSE(i,2) = CompRMSE(probe_vec,U,V);

end

  ......................................................................

SGD解决

function [ recItems ] = mf_gd( trainMatrix, featureNumber, maxEpoch, learnRate, lambdaU, lambdaV, k)

%get the size the train matrix
[userNumber,itemNumber] = size(trainMatrix);

%init user factors and item factors
Ut = 0.01 * randn(userNumber, featureNumber);
Vt = 0.01 * randn(itemNumber, featureNumber);
%逻辑1和0
logitMatrix = trainMatrix > 0;

%calculate the gradient of user factors and item factors
%and user sgd to optimize the risk function
%alternative update user factors and item factors alternative
for round = 1:maxEpoch,
   dU = -(logitMatrix  .* trainMatrix)  * Vt + (Ut * Vt' .* logitMatrix ) * Vt + lambdaU * Ut;
   dV = -(logitMatrix' .* trainMatrix') * Ut + (Vt * Ut' .* logitMatrix') * Ut + lambdaV * Vt;
   Ut = Ut - learnRate * dU * 2;
   Vt = Vt - learnRate * dV * 2;
end

%predict the rating of each item given by each user
predictMatrix = Ut * Vt';

%sort the score of items for each user
[sortedMatrix, sortedItems] = sort(predictMatrix, 2, 'descend');

%get the top-k items for each suer
recItems = sortedItems(:, 1:k);
end

  

原文地址:https://www.cnblogs.com/hxsyl/p/4899248.html