orionweller commited on
Commit
1851e07
·
verified ·
1 Parent(s): 4e1988b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +41 -0
  2. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10877-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  3. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10877-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  4. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11435-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  5. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11435-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  6. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14337-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  7. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17296-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  8. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17296-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  9. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18860-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  10. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18860-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  11. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  12. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19975-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19975-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22015-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22015-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22380-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22380-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22440-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22731-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24358-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28066-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28066-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31340-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31340-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36578-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36578-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3749-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3802-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38432-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38432-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39383-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39383-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3983-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3983-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40391-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40391-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42571-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44344-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44344-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44809-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44809-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45078-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45078-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49113-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49113-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
.gitattributes CHANGED
@@ -13588,3 +13588,44 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24105-tokenized-c
13588
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81341-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13589
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27136-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13590
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65654-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13588
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81341-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13589
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27136-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13590
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65654-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13591
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49113-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13592
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95891-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13593
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14337-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13594
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22380-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13595
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13596
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60044-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13597
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_84696-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13598
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13599
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49113-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13600
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22380-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13601
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60044-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13602
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64657-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13603
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61713-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13604
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22440-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13605
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24358-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13606
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39383-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13607
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22731-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13608
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79921-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13609
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13610
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72527-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13611
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_87636-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13612
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80223-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13613
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39383-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13614
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13615
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60999-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13616
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60999-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13617
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_87636-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13618
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66629-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13619
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13620
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72527-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13621
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80223-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13622
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13623
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13624
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66629-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13625
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31340-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13626
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93538-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13627
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31340-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13628
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13629
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13630
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13631
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_56988-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10877-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108773, "hashes": {}}, "samples": 44195, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47851784, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11391367, "hashes": {}}, "samples": 7458, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8118611, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10877-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37651506,
3
+ "num_truncated_tokens": 37621904
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11435-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108313, "hashes": {}}, "samples": 44162, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47608443, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10266251, "hashes": {}}, "samples": 6797, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7312439, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11435-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37110088,
3
+ "num_truncated_tokens": 37082327
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14337-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190dd108bb68049033894e9747dd99215cf222c45bee504d837cf0f5a87f21da
3
+ size 67108756
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17296-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108567, "hashes": {}}, "samples": 44010, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47551961, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10918465, "hashes": {}}, "samples": 7211, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7805300, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17296-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37428237,
3
+ "num_truncated_tokens": 37399618
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18860-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108445, "hashes": {}}, "samples": 44109, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47810913, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12122533, "hashes": {}}, "samples": 7942, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8677455, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18860-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38004657,
3
+ "num_truncated_tokens": 37974311
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37390305,
3
+ "num_truncated_tokens": 37362158
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19975-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108753, "hashes": {}}, "samples": 43561, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47655103, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17057052, "hashes": {}}, "samples": 10774, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12089163, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19975-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40401152,
3
+ "num_truncated_tokens": 40367700
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22015-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107560, "hashes": {}}, "samples": 43358, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47648684, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17000287, "hashes": {}}, "samples": 10918, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12088032, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22015-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40374046,
3
+ "num_truncated_tokens": 40340274
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22380-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5722b71fd52606caee99d3a548a32d2fa0d5cd2d1f77f304c3443c32796ed863
3
+ size 67107823
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22380-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6589bc983d281df51222d55c6ae5fc6a646eb88ef7073c454a39e576049b234
3
+ size 13594832
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22440-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01d989ef1992bf9505b54d22fee31c959e961b555b3cd3c46a61e586d6a47b18
3
+ size 67107562
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22731-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45fe699cfc3d159e049d463b162c243dfcf834064f3b22d297f1c34eb6e5d28b
3
+ size 67107031
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24358-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cbbe31b49858011c30aa0a580fea980bad9eec6fd4af4063bff6444edab0591
3
+ size 67108058
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5fc4766ff9836721561422ec320b5c99484c40e5af7d171bdd1b5849d739a29
3
+ size 67108601
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f68e2c2fb701976785d338c4d1d8086faaa8247015ea81f57466ab881bfe5458
3
+ size 20817076
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28066-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108350, "hashes": {}}, "samples": 44045, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47566748, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10487292, "hashes": {}}, "samples": 6949, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7457295, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28066-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37219591,
3
+ "num_truncated_tokens": 37191200
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31340-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b76b314068c5a9c5978e461e3205d30871849c8a1ed72599f69e036cc107f33
3
+ size 67108797
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31340-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ed36b82e895de3691442a4066dc74bd14128531a38b117e261002bb5c6c93e8
3
+ size 11210433
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64d78dc3743c0693dcce3f2ba14780a51e57b0a6d0f706b86ef4fad5fbeca0da
3
+ size 67108429
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a719d9708d1b1ac256d78e9a2fd7bfe1f641d6c2c41e09a1f549e1af88fabbce
3
+ size 19705781
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36578-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107960, "hashes": {}}, "samples": 42939, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47544847, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18871225, "hashes": {}}, "samples": 12143, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13380151, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36578-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41284607,
3
+ "num_truncated_tokens": 41249647
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3749-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36378561,
3
+ "num_truncated_tokens": 36351204
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3802-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107786, "hashes": {}}, "samples": 43130, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47730373, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17831956, "hashes": {}}, "samples": 11463, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12755100, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38432-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108495, "hashes": {}}, "samples": 43611, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47577623, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15436628, "hashes": {}}, "samples": 9968, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10935692, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38432-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39614336,
3
+ "num_truncated_tokens": 39581552
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39383-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9750f16cd8cc5387f8d838073d28501e45b9b37861604f5fddf0ce8cf1c665cf
3
+ size 67108350
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39383-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d79d4ab57263d6249a78f082cbb8acc9e1e0d21035b517383aa13a3c330971c
3
+ size 11293019
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3983-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108387, "hashes": {}}, "samples": 44691, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47811680, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9218271, "hashes": {}}, "samples": 6191, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6569357, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3983-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36588831,
3
+ "num_truncated_tokens": 36559980
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40391-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108863, "hashes": {}}, "samples": 44024, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47816171, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11502020, "hashes": {}}, "samples": 7746, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8260540, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40391-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37703317,
3
+ "num_truncated_tokens": 37673397
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42571-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106921, "hashes": {}}, "samples": 44290, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47735176, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9135749, "hashes": {}}, "samples": 6060, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6490912, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36563038,
3
+ "num_truncated_tokens": 36536021
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44344-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107548, "hashes": {}}, "samples": 42663, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47529325, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21408451, "hashes": {}}, "samples": 13486, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15132034, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44344-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42519868,
3
+ "num_truncated_tokens": 42482970
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44809-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106925, "hashes": {}}, "samples": 43159, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47479598, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18682233, "hashes": {}}, "samples": 11984, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13258718, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44809-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41187850,
3
+ "num_truncated_tokens": 41152527
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45078-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107886, "hashes": {}}, "samples": 42605, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47686337, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20178307, "hashes": {}}, "samples": 13007, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14328814, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45078-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41921646,
3
+ "num_truncated_tokens": 41885758
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49113-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fba47368550018d09e77bc3185b02ab1f4ac802ccb4df104176b340466910d17
3
+ size 67108640
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49113-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27849a15fec6f3247d80fb4047b9d73793cac2607a5846233d7015f4347f9c37
3
+ size 10860253