metadata
dataset_info:
features:
- name: text
dtype: string
- name: stars
dtype: float64
- name: text_chunks
sequence: string
- name: date
dtype: string
- name: user_name
dtype: string
- name: business_name
dtype: string
- name: domain
dtype: string
- name: link
dtype: string
- name: prompt
dtype: string
- name: prompt_type
dtype: string
- name: prompt_comparison
dtype: string
- name: title
dtype: string
- name: author
dtype: string
- name: release_date
dtype: string
- name: abstract
dtype: string
- name: author_highlights
dtype: string
- name: subjareas
sequence: string
- name: time
dtype: timestamp[us]
- name: type
dtype: string
- name: submitter
dtype: string
- name: authors
dtype: string
- name: journal_ref
dtype: string
- name: categories
dtype: string
- name: created_date
dtype: string
- name: venue
dtype: string
- name: year
dtype: string
- name: PMID
dtype: string
- name: question
dtype: string
- name: context
dtype: string
- name: label
dtype: int64
- name: article
dtype: string
- name: highlights
dtype: string
- name: summary
dtype: string
- name: subreddit
dtype: string
- name: url
dtype: string
- name: sections
dtype: string
splits:
- name: train
num_bytes: 29304328138
num_examples: 1072984
download_size: 17258777569
dataset_size: 29304328138
configs:
- config_name: default
data_files:
- split: train
path: data/train-*