Source code for elliot.evaluation.metrics.accuracy.map.map

"""
This is the implementation of the Mean Average Precision metric.
It proceeds from a user-wise computation, and average the values over the users.
"""

__version__ = '0.3.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'

import numpy as np
from elliot.evaluation.metrics.base_metric import BaseMetric


[docs]class MAP(BaseMetric): r""" Mean Average Precision This class represents the implementation of the Mean Average Precision recommendation metric. Passing 'MAP' to the metrics list will enable the computation of the metric. For further details, please refer to the `link <http://sdsawtelle.github.io/blog/output/mean-average-precision-MAP-for-recommender-systems.html#MAP-for-Recommender-Algorithms>`_ Note: In this case the normalization factor used is :math:`\frac{1}{\min (m,N)}`, which prevents your AP score from being unfairly suppressed when your number of recommendations couldn't possibly capture all the correct ones. .. math:: \begin{align*} \mathrm{AP@N} &= \frac{1}{\mathrm{min}(m,N)}\sum_{k=1}^N P(k) \cdot rel(k) \\ \mathrm{MAP@N}& = \frac{1}{|U|}\sum_{u=1}^{|U|}(\mathrm{AP@N})_u \end{align*} To compute the metric, add it to the config file adopting the following pattern: .. code:: yaml simple_metrics: [MAP] """ def __init__(self, recommendations, config, params, eval_objects): """ Constructor :param recommendations: list of recommendations in the form {user: [(item1,value1),...]} :param config: SimpleNameSpace that represents the configuration of the experiment :param params: Parameters of the model :param eval_objects: list of objects that may be useful for the computation of the different metrics """ super().__init__(recommendations, config, params, eval_objects) self._cutoff = self._evaluation_objects.cutoff self._relevance = self._evaluation_objects.relevance.binary_relevance
[docs] @staticmethod def name(): """ Metric Name Getter :return: returns the public name of the metric """ return "MAP"
@staticmethod def __user_ap(user_recommendations, cutoff, user_relevant_items): """ Per User Average Precision :param user_recommendations: list of user recommendation in the form [(item1,value1),...] :param cutoff: numerical threshold to limit the recommendation list :param user_relevant_items: list of user relevant items in the form [item1,...] :return: the value of the Precision metric for the specific user """ return np.average([MAP.__user_precision(user_recommendations[:cutoff], (n+1), user_relevant_items) for n in range(cutoff)]) @staticmethod def __user_precision(user_recommendations, cutoff, user_relevant_items): """ Per User Precision :param user_recommendations: list of user recommendation in the form [(item1,value1),...] :param cutoff: numerical threshold to limit the recommendation list :param user_relevant_items: list of user relevant items in the form [item1,...] :return: the value of the Precision metric for the specific user """ return sum([1 for i in user_recommendations[:cutoff] if i[0] in user_relevant_items]) / cutoff # def eval(self): # """ # Evaluation function # :return: the overall averaged value of Mean Average Precision # """ # return np.average( # [MAP.__user_ap(u_r, self._cutoff, self._relevant_items[u]) # for u, u_r in self._recommendations.items() if len(self._relevant_items[u])] # )
[docs] def eval_user_metric(self): """ Evaluation function :return: the overall averaged value of Mean Average Precision per user """ return {u: MAP.__user_ap(u_r, self._cutoff, self._relevance.get_user_rel(u)) for u, u_r in self._recommendations.items() if len(self._relevance.get_user_rel(u))}