{"@context":{"@language":"en","@vocab":"https://schema.org/","arrayShape":"cr:arrayShape","citeAs":"cr:citeAs","column":"cr:column","conformsTo":"dct:conformsTo","cr":"http://mlcommons.org/croissant/","data":{"@id":"cr:data","@type":"@json"},"dataBiases":"cr:dataBiases","dataCollection":"cr:dataCollection","dataType":{"@id":"cr:dataType","@type":"@vocab"},"dct":"http://purl.org/dc/terms/","extract":"cr:extract","field":"cr:field","fileProperty":"cr:fileProperty","fileObject":"cr:fileObject","fileSet":"cr:fileSet","format":"cr:format","includes":"cr:includes","isArray":"cr:isArray","isLiveDataset":"cr:isLiveDataset","jsonPath":"cr:jsonPath","key":"cr:key","md5":"cr:md5","parentField":"cr:parentField","path":"cr:path","personalSensitiveInformation":"cr:personalSensitiveInformation","recordSet":"cr:recordSet","references":"cr:references","regex":"cr:regex","repeated":"cr:repeated","replace":"cr:replace","sc":"https://schema.org/","separator":"cr:separator","source":"cr:source","subField":"cr:subField","transform":"cr:transform"},"@type":"sc:Dataset","distribution":[{"@type":"cr:FileObject","@id":"repo","name":"repo","description":"The Hugging Face git repository.","contentUrl":"https://huggingface.co/datasets/unibuc-cs/MAVOS-DD/tree/refs%2Fconvert%2Fparquet","encodingFormat":"git+https","sha256":"https://github.com/mlcommons/croissant/issues/80"},{"@type":"cr:FileSet","@id":"parquet-files-for-config-default","containedIn":{"@id":"repo"},"encodingFormat":"application/x-parquet","includes":"default/*/*.parquet"}],"recordSet":[{"@type":"cr:RecordSet","dataType":"cr:Split","key":{"@id":"default_splits/split_name"},"@id":"default_splits","name":"default_splits","description":"Splits for the default config.","field":[{"@type":"cr:Field","@id":"default_splits/split_name","dataType":"sc:Text"}],"data":[{"default_splits/split_name":"train"}]},{"@type":"cr:RecordSet","@id":"default","description":"unibuc-cs/MAVOS-DD - 'default' subset\n\nAdditional information:\n- 1 skipped column: video","field":[{"@type":"cr:Field","@id":"default/split","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"fileProperty":"fullpath"},"transform":{"regex":"default/(?:partial-)?(train)/.+parquet$"}},"references":{"field":{"@id":"default_splits/split_name"}}},{"@type":"cr:Field","@id":"default/label","dataType":"sc:Integer","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"column":"label"}}}]}],"conformsTo":"http://mlcommons.org/croissant/1.1","name":"MAVOS-DD","description":"LICENSE: This dataset is released under the CC BY-NC-SA 4.0 license.\nThis repository contains MAVOS-DD an open-set benchmark for multilingual audio-video deepfake detection.\nBelow, you can find the code the obtain the subsets described in the paper: train, validation, open-set model, open-set language and open-set full:\nfrom datasets import Dataset, concatenate_datasets\nmetadata = Dataset('MAVOS-DD')\nmetadata_indomain = metadata.filter(lambda sample: sample['split']=='test' and not… See the full description on the dataset page: https://huggingface.co/datasets/unibuc-cs/MAVOS-DD.","alternateName":["unibuc-cs/MAVOS-DD"],"creator":{"@type":"Organization","name":"Department of Computer Science, University of Bucharest","url":"https://huggingface.co/unibuc-cs"},"keywords":["Arabic","Romanian","English","German","Hindi","Spanish","Russian","10K - 100K","Video","Datasets","Croissant","🇺🇸 Region: US"],"url":"https://huggingface.co/datasets/unibuc-cs/MAVOS-DD"}