@article{Haghighatkhah_Meulemans_Speckmann_Urhausen_Verbeek_2022, title={Obstructing Classification via Projection}, volume={1}, url={https://www.cgt-journal.org/index.php/cgt/article/view/8}, DOI={10.57717/cgt.v1i1.8}, abstractNote={<p>Machine learning and data mining techniques are effective tools to classify large amounts of data.<br />But they tend to preserve any inherent bias in the data, for example, with regards to gender or race. Removing such bias from data or the learned representations is quite challenging. In this paper we study a geometric problem which models a possible approach for bias removal. Our input is a set of points P in Euclidean space R<sup>d</sup> and each point is labeled with k binary-valued properties. A priori we assume that it is `easy to classify the data according to each property. Our goal is to obstruct the classification according to one property by a suitable projection to a lower-dimensional Euclidean space R<sup>m</sup> (m < d), while classification according to all other properties remains easy.</p> <p>What it means for classification to be easy depends on the classification model used. <br />We first consider classification by linear separability as employed by support vector machines. We use Kirchberger’s Theorem to show that, under certain conditions, a simple projection to R<sup>d-1</sup> suffices to eliminate the linear separability of one of the properties whilst maintaining the linear separability of the other properties. We also study the problem of maximizing the linear ``inseparability of the chosen property.<br />Second, we consider more complex forms of separability and prove a connection between the number of projections required to obstruct classification and the Helly-type properties of such separabilities.<br /><br /></p>}, number={1}, journal={Computing in Geometry and Topology}, author={Haghighatkhah, Pantea and Meulemans, Wouter and Speckmann, Bettina and Urhausen, Jérôme and Verbeek, Kevin}, year={2022}, month={Oct.}, pages={2:1–2:21} }