{"@context":{"@language":"en","@vocab":"https://schema.org/","arrayShape":"cr:arrayShape","citeAs":"cr:citeAs","column":"cr:column","conformsTo":"dct:conformsTo","cr":"http://mlcommons.org/croissant/","data":{"@id":"cr:data","@type":"@json"},"dataBiases":"cr:dataBiases","dataCollection":"cr:dataCollection","dataType":{"@id":"cr:dataType","@type":"@vocab"},"dct":"http://purl.org/dc/terms/","extract":"cr:extract","field":"cr:field","fileProperty":"cr:fileProperty","fileObject":"cr:fileObject","fileSet":"cr:fileSet","format":"cr:format","includes":"cr:includes","isArray":"cr:isArray","isLiveDataset":"cr:isLiveDataset","jsonPath":"cr:jsonPath","key":"cr:key","md5":"cr:md5","parentField":"cr:parentField","path":"cr:path","personalSensitiveInformation":"cr:personalSensitiveInformation","recordSet":"cr:recordSet","references":"cr:references","regex":"cr:regex","repeated":"cr:repeated","replace":"cr:replace","sc":"https://schema.org/","separator":"cr:separator","source":"cr:source","subField":"cr:subField","transform":"cr:transform"},"@type":"sc:Dataset","distribution":[{"@type":"cr:FileObject","@id":"repo","name":"repo","description":"The Hugging Face git repository.","contentUrl":"https://huggingface.co/datasets/lixinhao/VideoEval/tree/refs%2Fconvert%2Fparquet","encodingFormat":"git+https","sha256":"https://github.com/mlcommons/croissant/issues/80"},{"@type":"cr:FileSet","@id":"parquet-files-for-config-default","containedIn":{"@id":"repo"},"encodingFormat":"application/x-parquet","includes":"default/*/*.parquet"}],"recordSet":[{"@type":"cr:RecordSet","dataType":"cr:Split","key":{"@id":"default_splits/split_name"},"@id":"default_splits","name":"default_splits","description":"Splits for the default config.","field":[{"@type":"cr:Field","@id":"default_splits/split_name","dataType":"sc:Text"}],"data":[{"default_splits/split_name":"train"},{"default_splits/split_name":"validation"},{"default_splits/split_name":"test"}]},{"@type":"cr:RecordSet","@id":"default","description":"lixinhao/VideoEval - 'default' subset\n\nAdditional information:\n- 3 splits: train, validation, test","field":[{"@type":"cr:Field","@id":"default/split","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"fileProperty":"fullpath"},"transform":{"regex":"default/(?:partial-)?(train|validation|test)/.+parquet$"}},"references":{"field":{"@id":"default_splits/split_name"}}},{"@type":"cr:Field","@id":"default/text","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"column":"text"}}}]}],"conformsTo":"http://mlcommons.org/croissant/1.1","name":"VideoEval","description":"\n\t\n\t\t\n\t\tDataset Card for VideoEval\n\t\n\n\n\t\n\t\t\n\t\tVidTAB\n\t\n\n\n\t\n\t\t\n\t\tAction Recognition in Dark\n\t\n\nYou could download all videos from ARID at https://opendatalab.com/OpenDataLab/Action_Recognition_in_the_Dark.\nYou just need to use the mp4 video in the video folder and then use the annotations we provided.\n\n\t\n\t\t\n\t\n\t\n\t\tAction Recognition in Long Video\n\t\n\nYou could download all videos from Breakfast at https://serre-lab.clps.brown.edu/resource/breakfast-actions-dataset/.\nYou just need to use the mp4… See the full description on the dataset page: https://huggingface.co/datasets/lixinhao/VideoEval.","alternateName":["lixinhao/VideoEval"],"creator":{"@type":"Person","name":"Xinhao Li","url":"https://huggingface.co/lixinhao"},"keywords":["feature-extraction","apache-2.0","10K - 100K","text","Text","Datasets","Croissant","arxiv:2407.06491","🇺🇸 Region: US"],"license":"https://choosealicense.com/licenses/apache-2.0/","url":"https://huggingface.co/datasets/lixinhao/VideoEval"}