diff --git "a/LLM_quantization.ipynb" "b/LLM_quantization.ipynb"
new file mode 100644--- /dev/null
+++ "b/LLM_quantization.ipynb"
@@ -0,0 +1,8284 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "gpuType": "T4"
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ },
+ "accelerator": "GPU",
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "23f3d222dda34579a2d10f9cb0345f42": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_c70d11ef14824f30a5b2cb5211f3fa8f",
+ "IPY_MODEL_890b893d943e43bba871aba4b4228985",
+ "IPY_MODEL_05d47408587045b9a2de09a6d5f98a86"
+ ],
+ "layout": "IPY_MODEL_ed32d6d5a1994b5f869173e2f8720a1b"
+ }
+ },
+ "c70d11ef14824f30a5b2cb5211f3fa8f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_967824760cca4dd383a53742854629c1",
+ "placeholder": "",
+ "style": "IPY_MODEL_cd05c438545d49218989401a63f4c3e0",
+ "value": "Fetching 14 files: 100%"
+ }
+ },
+ "890b893d943e43bba871aba4b4228985": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_71a2ad80c40941fb926901ed6d35781e",
+ "max": 14,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_2c8d1fe46cba4278b12b70befaf538c7",
+ "value": 14
+ }
+ },
+ "05d47408587045b9a2de09a6d5f98a86": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_a8274e16b3bc4defba8bcb99720011cc",
+ "placeholder": "",
+ "style": "IPY_MODEL_75bfc069d9404dd0a64f778a61811bc9",
+ "value": " 14/14 [04:03<00:00, 67.67s/it]"
+ }
+ },
+ "ed32d6d5a1994b5f869173e2f8720a1b": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "967824760cca4dd383a53742854629c1": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "cd05c438545d49218989401a63f4c3e0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "71a2ad80c40941fb926901ed6d35781e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2c8d1fe46cba4278b12b70befaf538c7": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "a8274e16b3bc4defba8bcb99720011cc": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "75bfc069d9404dd0a64f778a61811bc9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "d7748e881b29423392742b2033a5d90a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_2a33b1b6f957453ca4fb859f0e0dfa37",
+ "IPY_MODEL_36eff02ed8b34ec8a7fb466311be758c",
+ "IPY_MODEL_aa3e275af3ad478383d77662ce27e9d6"
+ ],
+ "layout": "IPY_MODEL_848f36c2e49b4e468ded57bf44c2c703"
+ }
+ },
+ "2a33b1b6f957453ca4fb859f0e0dfa37": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c25b167e65b24069a45a7e14d8d7d425",
+ "placeholder": "",
+ "style": "IPY_MODEL_36a5cf21100044c0811d5edc217c0250",
+ "value": "config.json: 100%"
+ }
+ },
+ "36eff02ed8b34ec8a7fb466311be758c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_beee422233b746959cd1aa9dc7730ad3",
+ "max": 571,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_6ba695bbfbc3415c8ffc700732915036",
+ "value": 571
+ }
+ },
+ "aa3e275af3ad478383d77662ce27e9d6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_84b4af49f7d349589efba60511e9cd81",
+ "placeholder": "",
+ "style": "IPY_MODEL_b96a2123a3bc48d7bf82573726646135",
+ "value": " 571/571 [00:00<00:00, 7.48kB/s]"
+ }
+ },
+ "848f36c2e49b4e468ded57bf44c2c703": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c25b167e65b24069a45a7e14d8d7d425": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "36a5cf21100044c0811d5edc217c0250": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "beee422233b746959cd1aa9dc7730ad3": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "6ba695bbfbc3415c8ffc700732915036": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "84b4af49f7d349589efba60511e9cd81": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "b96a2123a3bc48d7bf82573726646135": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "6b00291e98f64133812ab66039c744fa": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_c50af39363644a25ad1005495ac0b35f",
+ "IPY_MODEL_ad2ad4dd4e444b9b8e5d3deec082f838",
+ "IPY_MODEL_79b6fbecb1794b1e92e7bba62583ea47"
+ ],
+ "layout": "IPY_MODEL_9077350cfcec46feb1cf02ef8b29c9bb"
+ }
+ },
+ "c50af39363644a25ad1005495ac0b35f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_efbcd5adc0f644a381f65c143b2a2119",
+ "placeholder": "",
+ "style": "IPY_MODEL_9825f53be4fc4452b1503319e380fa77",
+ "value": "README.md: 100%"
+ }
+ },
+ "ad2ad4dd4e444b9b8e5d3deec082f838": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f723c15cbbde47e6ab57e35671a9a85e",
+ "max": 1392,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_256b926b3b0c4190b8ddc74c2b2f2955",
+ "value": 1392
+ }
+ },
+ "79b6fbecb1794b1e92e7bba62583ea47": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_ae83ef6286a44632a4dab0d90ee5ca39",
+ "placeholder": "",
+ "style": "IPY_MODEL_1610ee9e3e7948139275a21d840565fb",
+ "value": " 1.39k/1.39k [00:00<00:00, 19.5kB/s]"
+ }
+ },
+ "9077350cfcec46feb1cf02ef8b29c9bb": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "efbcd5adc0f644a381f65c143b2a2119": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "9825f53be4fc4452b1503319e380fa77": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "f723c15cbbde47e6ab57e35671a9a85e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "256b926b3b0c4190b8ddc74c2b2f2955": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "ae83ef6286a44632a4dab0d90ee5ca39": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1610ee9e3e7948139275a21d840565fb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "02d36996ff564b8ea2b24acce22bf4c3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_9e9cb229405c447d948032d6a2cc82bb",
+ "IPY_MODEL_13935436da6d4fa8be901ecb7dcb928a",
+ "IPY_MODEL_b3249b5f47e649539ed21841b87d9db2"
+ ],
+ "layout": "IPY_MODEL_8d124f06fd5842d4b99fc011b355312f"
+ }
+ },
+ "9e9cb229405c447d948032d6a2cc82bb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_bd235d7d8f7c4f03bed28d7aa692731e",
+ "placeholder": "",
+ "style": "IPY_MODEL_f2b85643c27f4dcc979ed2757c4c0c86",
+ "value": ".gitattributes: 100%"
+ }
+ },
+ "13935436da6d4fa8be901ecb7dcb928a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_17c7c1af613046f3a50c3097fac7cba4",
+ "max": 1519,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_dd1e36b86f9440eb8cf35a260204d5ae",
+ "value": 1519
+ }
+ },
+ "b3249b5f47e649539ed21841b87d9db2": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_7099ff2f7cf84035b71eee27e7f7f52f",
+ "placeholder": "",
+ "style": "IPY_MODEL_480b82fe42c544429196dc052de582af",
+ "value": " 1.52k/1.52k [00:00<00:00, 19.0kB/s]"
+ }
+ },
+ "8d124f06fd5842d4b99fc011b355312f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "bd235d7d8f7c4f03bed28d7aa692731e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "f2b85643c27f4dcc979ed2757c4c0c86": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "17c7c1af613046f3a50c3097fac7cba4": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "dd1e36b86f9440eb8cf35a260204d5ae": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "7099ff2f7cf84035b71eee27e7f7f52f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "480b82fe42c544429196dc052de582af": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "65893b35c8da4e74af4472d8b9bbb051": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_8cba36d0838849d8933fdf03ec7b1dd9",
+ "IPY_MODEL_bf3e0d482d3e4b0899c1834522c65559",
+ "IPY_MODEL_95533da54ba9498fb280653fb6485af0"
+ ],
+ "layout": "IPY_MODEL_f92494f93252496497635102d83536b6"
+ }
+ },
+ "8cba36d0838849d8933fdf03ec7b1dd9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_4d65c6a547704f249fc318e923f04704",
+ "placeholder": "",
+ "style": "IPY_MODEL_46d9ffcfc0a9468893e2213f561c5a10",
+ "value": "model.safetensors.index.json: 100%"
+ }
+ },
+ "bf3e0d482d3e4b0899c1834522c65559": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_bb9f12685c6a40fdb6bdb29e7a7a05d5",
+ "max": 25125,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_636240a25c6c44a99405ebd05871f442",
+ "value": 25125
+ }
+ },
+ "95533da54ba9498fb280653fb6485af0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f347c43229fe4e9da44771d749f2e7a2",
+ "placeholder": "",
+ "style": "IPY_MODEL_66afca1c879a49248ff216ece0be1d99",
+ "value": " 25.1k/25.1k [00:00<00:00, 366kB/s]"
+ }
+ },
+ "f92494f93252496497635102d83536b6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "4d65c6a547704f249fc318e923f04704": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "46d9ffcfc0a9468893e2213f561c5a10": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "bb9f12685c6a40fdb6bdb29e7a7a05d5": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "636240a25c6c44a99405ebd05871f442": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "f347c43229fe4e9da44771d749f2e7a2": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "66afca1c879a49248ff216ece0be1d99": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "bb15d5514a14438db029a6729d5bb175": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_f1d41a43c6ed4589884f0dc1a4182814",
+ "IPY_MODEL_30f3b1c5172a457d8cfcd08d9a3ca523",
+ "IPY_MODEL_bcd12847b1224974aedc61818997d4b6"
+ ],
+ "layout": "IPY_MODEL_8a08c3225d9a464c9359089c416378dd"
+ }
+ },
+ "f1d41a43c6ed4589884f0dc1a4182814": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_32de1fa8fa184cae93fe2338180df84b",
+ "placeholder": "",
+ "style": "IPY_MODEL_97c707df0f61483bb572d02c98cb6cdf",
+ "value": "generation_config.json: 100%"
+ }
+ },
+ "30f3b1c5172a457d8cfcd08d9a3ca523": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_fa49ad10de3c4fc5a2a36cc8b79cf23c",
+ "max": 116,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_9bea0631354b4d7892e2e45e3b2b2724",
+ "value": 116
+ }
+ },
+ "bcd12847b1224974aedc61818997d4b6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_d138e40aa4064fadb0c05c4dc6ee4755",
+ "placeholder": "",
+ "style": "IPY_MODEL_1216882c5608465b9a855b24864c7db9",
+ "value": " 116/116 [00:00<00:00, 1.69kB/s]"
+ }
+ },
+ "8a08c3225d9a464c9359089c416378dd": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "32de1fa8fa184cae93fe2338180df84b": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "97c707df0f61483bb572d02c98cb6cdf": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "fa49ad10de3c4fc5a2a36cc8b79cf23c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "9bea0631354b4d7892e2e45e3b2b2724": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "d138e40aa4064fadb0c05c4dc6ee4755": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1216882c5608465b9a855b24864c7db9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "839a1da33ddf471bbab5d3b87f51efb4": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_3ffe0bc7dd9d42bc81c94bb106ce65f3",
+ "IPY_MODEL_57ec54a8fbe146fabd90dcd0b59d4bf6",
+ "IPY_MODEL_6e9b3bf6c8e54ec58a12096936b4c9a9"
+ ],
+ "layout": "IPY_MODEL_1bc71f236d3d4669bdf175599941928f"
+ }
+ },
+ "3ffe0bc7dd9d42bc81c94bb106ce65f3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_0871b46e9ca44c099f46b61fd10d8947",
+ "placeholder": "",
+ "style": "IPY_MODEL_b023b10fca444599a58c63881f09157a",
+ "value": "model-00002-of-00002.safetensors: 100%"
+ }
+ },
+ "57ec54a8fbe146fabd90dcd0b59d4bf6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_939ab887808b4926abe9c8526d598b0b",
+ "max": 4540516344,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_5247aa80e65645f9902d5072066f05c6",
+ "value": 4540516344
+ }
+ },
+ "6e9b3bf6c8e54ec58a12096936b4c9a9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_36143a2f1503478db02a4f33a564d07d",
+ "placeholder": "",
+ "style": "IPY_MODEL_663f6d68fbe3445e9af76dad7f794d46",
+ "value": " 4.54G/4.54G [02:12<00:00, 61.0MB/s]"
+ }
+ },
+ "1bc71f236d3d4669bdf175599941928f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0871b46e9ca44c099f46b61fd10d8947": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "b023b10fca444599a58c63881f09157a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "939ab887808b4926abe9c8526d598b0b": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "5247aa80e65645f9902d5072066f05c6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "36143a2f1503478db02a4f33a564d07d": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "663f6d68fbe3445e9af76dad7f794d46": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "ca4f1098fd464eaf88d8bd79d188e217": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_e111c8e5a1d643e7a612a5379c2d0737",
+ "IPY_MODEL_d4d6095877db4c64b7be15d32dd70fed",
+ "IPY_MODEL_0714e1a064bd4497b9cf67634029beb5"
+ ],
+ "layout": "IPY_MODEL_763d69f218b949a69f83cf1b48b74f25"
+ }
+ },
+ "e111c8e5a1d643e7a612a5379c2d0737": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_13413bde902f482ba409f2a99da9474c",
+ "placeholder": "",
+ "style": "IPY_MODEL_450e88ab3c5640029ca4ac2e0b166212",
+ "value": "pytorch_model.bin.index.json: 100%"
+ }
+ },
+ "d4d6095877db4c64b7be15d32dd70fed": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_fb15d3b6521d4f77b43f4376d7fe9200",
+ "max": 23950,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_4df9f5ca0043412ab8c9d9f8f84eb2b0",
+ "value": 23950
+ }
+ },
+ "0714e1a064bd4497b9cf67634029beb5": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_a797767e3c39496c87e6a151b51f2c8e",
+ "placeholder": "",
+ "style": "IPY_MODEL_065d785517794c718948c65ca43a892c",
+ "value": " 23.9k/23.9k [00:00<00:00, 165kB/s]"
+ }
+ },
+ "763d69f218b949a69f83cf1b48b74f25": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "13413bde902f482ba409f2a99da9474c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "450e88ab3c5640029ca4ac2e0b166212": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "fb15d3b6521d4f77b43f4376d7fe9200": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "4df9f5ca0043412ab8c9d9f8f84eb2b0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "a797767e3c39496c87e6a151b51f2c8e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "065d785517794c718948c65ca43a892c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "64e68b3bb09f4c8ba731516f8621fbde": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_f0539918726e485db18ccaef32d93b24",
+ "IPY_MODEL_3d98cbe8d4734b288e44a243ad847974",
+ "IPY_MODEL_3c88be08f8f8405a84ddcfadbd5c8a72"
+ ],
+ "layout": "IPY_MODEL_d6218f6c3d6341a2b49780f891ad5dec"
+ }
+ },
+ "f0539918726e485db18ccaef32d93b24": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c92966c239b643d49816567c885c6845",
+ "placeholder": "",
+ "style": "IPY_MODEL_8aa1c233cc7442d596a7515ee5eedc6e",
+ "value": "model-00001-of-00002.safetensors: 100%"
+ }
+ },
+ "3d98cbe8d4734b288e44a243ad847974": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_e75ae2c6dced4b36946ebdcd4b42fecf",
+ "max": 9942981696,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_d6cb7cf03c6848fb99b87e204e7ab0f7",
+ "value": 9942981696
+ }
+ },
+ "3c88be08f8f8405a84ddcfadbd5c8a72": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f9e115c82bac41078d45faaaf2e389c2",
+ "placeholder": "",
+ "style": "IPY_MODEL_0226795777744ed6b86f06b1ed5d3561",
+ "value": " 9.94G/9.94G [04:02<00:00, 139MB/s]"
+ }
+ },
+ "d6218f6c3d6341a2b49780f891ad5dec": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c92966c239b643d49816567c885c6845": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8aa1c233cc7442d596a7515ee5eedc6e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "e75ae2c6dced4b36946ebdcd4b42fecf": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d6cb7cf03c6848fb99b87e204e7ab0f7": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "f9e115c82bac41078d45faaaf2e389c2": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0226795777744ed6b86f06b1ed5d3561": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "c69e49b2d7ec4f0da3ac058163ecd611": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_be08b53b228649deabde650a6ac02d98",
+ "IPY_MODEL_c4e9e33057374ea6a20a9e05565d4ba1",
+ "IPY_MODEL_96882073eb4f4c1ebaa020fb4a98eaa3"
+ ],
+ "layout": "IPY_MODEL_a301e452137e419da6689f2bf25bbc95"
+ }
+ },
+ "be08b53b228649deabde650a6ac02d98": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_0335a1023ffc47d9afe85139799b183e",
+ "placeholder": "",
+ "style": "IPY_MODEL_8c431fdb80524f8491278c287f8e8a4a",
+ "value": "pytorch_model-00001-of-00002.bin: 100%"
+ }
+ },
+ "c4e9e33057374ea6a20a9e05565d4ba1": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_d82ed86e9dfb44139a8a0b4101893c4f",
+ "max": 9943028044,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_d542f40f9e9e4f418d8dda6d579d2235",
+ "value": 9943028044
+ }
+ },
+ "96882073eb4f4c1ebaa020fb4a98eaa3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_08e66be693594ba294944d1a44c32ea6",
+ "placeholder": "",
+ "style": "IPY_MODEL_36167812e2d84bc2b714e436bba07bbe",
+ "value": " 9.94G/9.94G [04:02<00:00, 119MB/s]"
+ }
+ },
+ "a301e452137e419da6689f2bf25bbc95": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0335a1023ffc47d9afe85139799b183e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8c431fdb80524f8491278c287f8e8a4a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "d82ed86e9dfb44139a8a0b4101893c4f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d542f40f9e9e4f418d8dda6d579d2235": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "08e66be693594ba294944d1a44c32ea6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "36167812e2d84bc2b714e436bba07bbe": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "26294b2e9780401bbb5f7020563f0f8a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_aa0cd9e2925b4826a5837e09de058f58",
+ "IPY_MODEL_ffb3a24c9299455eaa1f0da453a7db14",
+ "IPY_MODEL_b91cb1561cfc4f3d83c01f34e07ebf69"
+ ],
+ "layout": "IPY_MODEL_396162050ec540e3a078a382f818ec51"
+ }
+ },
+ "aa0cd9e2925b4826a5837e09de058f58": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_48989317a22245ca80c68e3fb1c360f6",
+ "placeholder": "",
+ "style": "IPY_MODEL_2c44aff83f694426bc0273a087b116f3",
+ "value": "special_tokens_map.json: 100%"
+ }
+ },
+ "ffb3a24c9299455eaa1f0da453a7db14": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_85c2eb3f0fb149e1b359d6e41ef338c8",
+ "max": 72,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_79a252d2da6d49dca0dbfe6a35145b01",
+ "value": 72
+ }
+ },
+ "b91cb1561cfc4f3d83c01f34e07ebf69": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c2a6fb874f754e74a38536637be49875",
+ "placeholder": "",
+ "style": "IPY_MODEL_ddc39fe423434502806381b73e51e332",
+ "value": " 72.0/72.0 [00:00<00:00, 630B/s]"
+ }
+ },
+ "396162050ec540e3a078a382f818ec51": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "48989317a22245ca80c68e3fb1c360f6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2c44aff83f694426bc0273a087b116f3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "85c2eb3f0fb149e1b359d6e41ef338c8": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "79a252d2da6d49dca0dbfe6a35145b01": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "c2a6fb874f754e74a38536637be49875": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "ddc39fe423434502806381b73e51e332": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "165ea34074754da9a661c0c7fe269a8d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_b69a90e1b86045de937277b8560b4c91",
+ "IPY_MODEL_e0af1ca261094d8d988d1c46d2b3a611",
+ "IPY_MODEL_656126c69ce14f8ab2dad4d22a705c7d"
+ ],
+ "layout": "IPY_MODEL_e2af374c4168420e8ec9ed38dc30c800"
+ }
+ },
+ "b69a90e1b86045de937277b8560b4c91": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_b0d282547da1490ab84c9bee1b0ef9bd",
+ "placeholder": "",
+ "style": "IPY_MODEL_1c22b3ba13854b8ebaa7d2285f726286",
+ "value": "tokenizer.json: 100%"
+ }
+ },
+ "e0af1ca261094d8d988d1c46d2b3a611": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_6a47f38a10f343e0b52f3174b651e0e6",
+ "max": 1795303,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_e540e872147c4316a1e334604565c26b",
+ "value": 1795303
+ }
+ },
+ "656126c69ce14f8ab2dad4d22a705c7d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_3f85b873cef3494a863f756f43f3ed29",
+ "placeholder": "",
+ "style": "IPY_MODEL_1980dbc4cb3c470faa093b0d269ff1ab",
+ "value": " 1.80M/1.80M [00:00<00:00, 7.22MB/s]"
+ }
+ },
+ "e2af374c4168420e8ec9ed38dc30c800": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "b0d282547da1490ab84c9bee1b0ef9bd": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1c22b3ba13854b8ebaa7d2285f726286": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "6a47f38a10f343e0b52f3174b651e0e6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "e540e872147c4316a1e334604565c26b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "3f85b873cef3494a863f756f43f3ed29": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1980dbc4cb3c470faa093b0d269ff1ab": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "316068d4afd045e08e830863653e1dee": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_6b0c1667256341deb99e683ce1c33aa1",
+ "IPY_MODEL_e2778a5febba4c65a6a1ce6bd2b60a85",
+ "IPY_MODEL_6ebce36fbf8d4873bbf917ede621333a"
+ ],
+ "layout": "IPY_MODEL_727114e11a9d4b269d9bf37556247eed"
+ }
+ },
+ "6b0c1667256341deb99e683ce1c33aa1": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_879f6f1c40814b91aff892e0855dae65",
+ "placeholder": "",
+ "style": "IPY_MODEL_9c216c310d1b4b7d8a8e9ebaf9b5febd",
+ "value": "pytorch_model-00002-of-00002.bin: 100%"
+ }
+ },
+ "e2778a5febba4c65a6a1ce6bd2b60a85": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_5d53bc18976a46c8abcb9a005a769768",
+ "max": 5064823659,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_91dfef3a31a24aadb87e3b8c2517e3b3",
+ "value": 5064823659
+ }
+ },
+ "6ebce36fbf8d4873bbf917ede621333a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_9985ddbbfa174ed7bf1170c380bde598",
+ "placeholder": "",
+ "style": "IPY_MODEL_0edd137a6ee74e4cb56b9d6b8c2ef771",
+ "value": " 5.06G/5.06G [02:26<00:00, 77.4MB/s]"
+ }
+ },
+ "727114e11a9d4b269d9bf37556247eed": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "879f6f1c40814b91aff892e0855dae65": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "9c216c310d1b4b7d8a8e9ebaf9b5febd": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "5d53bc18976a46c8abcb9a005a769768": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "91dfef3a31a24aadb87e3b8c2517e3b3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "9985ddbbfa174ed7bf1170c380bde598": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0edd137a6ee74e4cb56b9d6b8c2ef771": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "30d42641169f4e3aa61912fb85f12259": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_6cdaeed0cedb45b6874e96af6536cab3",
+ "IPY_MODEL_2aa3cd9638774138a64367a6626f2c63",
+ "IPY_MODEL_43526c0e02ee427e8badc79f18527860"
+ ],
+ "layout": "IPY_MODEL_562c7b208ba04dc980b0714324561c50"
+ }
+ },
+ "6cdaeed0cedb45b6874e96af6536cab3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_6aee271546b54d1286d1ff6af8402862",
+ "placeholder": "",
+ "style": "IPY_MODEL_4fa6b455191845ef8ae07453bb0750a0",
+ "value": "tokenizer_config.json: 100%"
+ }
+ },
+ "2aa3cd9638774138a64367a6626f2c63": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_291ee14374cd4a68828f418eca2218ea",
+ "max": 967,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_10399c4305004ced8a33c2de8fb0ec5c",
+ "value": 967
+ }
+ },
+ "43526c0e02ee427e8badc79f18527860": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_aceb2b19d29f42edb6014d7184267ffe",
+ "placeholder": "",
+ "style": "IPY_MODEL_29a2f5b301e84cb69a9d1747254554ea",
+ "value": " 967/967 [00:00<00:00, 6.58kB/s]"
+ }
+ },
+ "562c7b208ba04dc980b0714324561c50": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "6aee271546b54d1286d1ff6af8402862": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "4fa6b455191845ef8ae07453bb0750a0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "291ee14374cd4a68828f418eca2218ea": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "10399c4305004ced8a33c2de8fb0ec5c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "aceb2b19d29f42edb6014d7184267ffe": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "29a2f5b301e84cb69a9d1747254554ea": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "c2c7359f900c4380b6cc12956ea0a442": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_efdcb1bf19874703b3d26d2ce46353be",
+ "IPY_MODEL_35f49255dff64bb7b22852ec6465d801",
+ "IPY_MODEL_626c2341558a4cef8b96d914bfba23ca"
+ ],
+ "layout": "IPY_MODEL_5bf60326dfea44d298093e81db160593"
+ }
+ },
+ "efdcb1bf19874703b3d26d2ce46353be": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_5af079a85a5449d3b5f8250df69ff056",
+ "placeholder": "",
+ "style": "IPY_MODEL_a51e5049b7e3428fb3741ba308cbb86f",
+ "value": "tokenizer.model: 100%"
+ }
+ },
+ "35f49255dff64bb7b22852ec6465d801": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_9f0b4b1716e2488f82a651bcff1db433",
+ "max": 493443,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_b011eeeeb8a644c3bb65d728c7a8eb08",
+ "value": 493443
+ }
+ },
+ "626c2341558a4cef8b96d914bfba23ca": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c1e86b1385c64c1aa74f11fc7dd65a39",
+ "placeholder": "",
+ "style": "IPY_MODEL_7ca4621df5a847f49cc4082b6a1824b9",
+ "value": " 493k/493k [00:00<00:00, 3.39MB/s]"
+ }
+ },
+ "5bf60326dfea44d298093e81db160593": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "5af079a85a5449d3b5f8250df69ff056": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "a51e5049b7e3428fb3741ba308cbb86f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "9f0b4b1716e2488f82a651bcff1db433": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "b011eeeeb8a644c3bb65d728c7a8eb08": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "c1e86b1385c64c1aa74f11fc7dd65a39": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "7ca4621df5a847f49cc4082b6a1824b9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "8b582650a2bb4f0c8b23fec484455145": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "VBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "VBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "VBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_b887ba97ceaa4ea6b5248ef24f001bee",
+ "IPY_MODEL_fbe702574326465d9373ccee2bc10776",
+ "IPY_MODEL_2e6564e983604376ab875e12654704f9",
+ "IPY_MODEL_43372cd04dfc45a3a1d11130d5c569f6"
+ ],
+ "layout": "IPY_MODEL_c476742c654444bba8bf3256c57e17d4"
+ }
+ },
+ "c0650664063e4ddda7614aa58f3b263e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_44aaaaace1d7494a88a36e4d99301e4a",
+ "placeholder": "",
+ "style": "IPY_MODEL_446cc3d328b74f6ea0ef83dc521fc43b",
+ "value": "
Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file. "
+ }
+ },
+ "3f7ca1727e2044eebfd9c624d0cb0a6b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "PasswordModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "PasswordModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "PasswordView",
+ "continuous_update": true,
+ "description": "Token:",
+ "description_tooltip": null,
+ "disabled": false,
+ "layout": "IPY_MODEL_7ef3d07ae70c48889f523e70b9674df6",
+ "placeholder": "",
+ "style": "IPY_MODEL_0c80d3ed00f5438f85c211c2f8f94fae",
+ "value": ""
+ }
+ },
+ "ebd75acc454c439cbee44f87aad9d90f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "CheckboxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "CheckboxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "CheckboxView",
+ "description": "Add token as git credential?",
+ "description_tooltip": null,
+ "disabled": false,
+ "indent": true,
+ "layout": "IPY_MODEL_20fcc68df8044924a0534082febdc904",
+ "style": "IPY_MODEL_45c73e3c2c9b4a12b108d92a737da4dc",
+ "value": true
+ }
+ },
+ "ec10867538bc44b08e669eebc7acc587": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ButtonModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ButtonModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ButtonView",
+ "button_style": "",
+ "description": "Login",
+ "disabled": false,
+ "icon": "",
+ "layout": "IPY_MODEL_d2a042df2217420291701d88945b66a9",
+ "style": "IPY_MODEL_e8e425e4155949569e452e930b8149ab",
+ "tooltip": ""
+ }
+ },
+ "41640c4d962743cf8b2c1d350f4088ed": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_ee5ae591712b4b0eb10763cbf819bf80",
+ "placeholder": "",
+ "style": "IPY_MODEL_89ebd0e461b2425890446cf78a5d7581",
+ "value": "\nPro Tip: If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks. "
+ }
+ },
+ "c476742c654444bba8bf3256c57e17d4": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": "center",
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": "flex",
+ "flex": null,
+ "flex_flow": "column",
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": "50%"
+ }
+ },
+ "44aaaaace1d7494a88a36e4d99301e4a": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "446cc3d328b74f6ea0ef83dc521fc43b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "7ef3d07ae70c48889f523e70b9674df6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0c80d3ed00f5438f85c211c2f8f94fae": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "20fcc68df8044924a0534082febdc904": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "45c73e3c2c9b4a12b108d92a737da4dc": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "d2a042df2217420291701d88945b66a9": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "e8e425e4155949569e452e930b8149ab": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ButtonStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ButtonStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "button_color": null,
+ "font_weight": ""
+ }
+ },
+ "ee5ae591712b4b0eb10763cbf819bf80": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "89ebd0e461b2425890446cf78a5d7581": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "a52f35475a4a423ebdb07c4ea20a64eb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "LabelModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "LabelModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "LabelView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_3a516fa148244be3a0e520ebfac77f6f",
+ "placeholder": "",
+ "style": "IPY_MODEL_9bee710e7117458c9cf4891c67730e15",
+ "value": "Connecting..."
+ }
+ },
+ "3a516fa148244be3a0e520ebfac77f6f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "9bee710e7117458c9cf4891c67730e15": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "b887ba97ceaa4ea6b5248ef24f001bee": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "LabelModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "LabelModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "LabelView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_720855a488684d54a69f6a2936e0dece",
+ "placeholder": "",
+ "style": "IPY_MODEL_848c73ab6eca424f832ab7c8b8cecc9f",
+ "value": "Token is valid (permission: write)."
+ }
+ },
+ "fbe702574326465d9373ccee2bc10776": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "LabelModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "LabelModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "LabelView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_a152922e7048481eb6ced042f9adcc50",
+ "placeholder": "",
+ "style": "IPY_MODEL_7629bacd5e454eed8cf4de5f7c09f9ae",
+ "value": "Your token has been saved in your configured git credential helpers (store)."
+ }
+ },
+ "2e6564e983604376ab875e12654704f9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "LabelModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "LabelModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "LabelView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f3c16713eda743cda445ee81b3e5a105",
+ "placeholder": "",
+ "style": "IPY_MODEL_e60dcb146fb84525be11e6f0b4d4c3cb",
+ "value": "Your token has been saved to /root/.cache/huggingface/token"
+ }
+ },
+ "43372cd04dfc45a3a1d11130d5c569f6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "LabelModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "LabelModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "LabelView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_9e97f9d91ca14e70afc15b2964f106a3",
+ "placeholder": "",
+ "style": "IPY_MODEL_214ac890bb5a49f5bb2aa76de81ef351",
+ "value": "Login successful"
+ }
+ },
+ "720855a488684d54a69f6a2936e0dece": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "848c73ab6eca424f832ab7c8b8cecc9f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "a152922e7048481eb6ced042f9adcc50": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "7629bacd5e454eed8cf4de5f7c09f9ae": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "f3c16713eda743cda445ee81b3e5a105": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "e60dcb146fb84525be11e6f0b4d4c3cb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "9e97f9d91ca14e70afc15b2964f106a3": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "214ac890bb5a49f5bb2aa76de81ef351": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "0f34055941fb4529a69a42bb1c3ae612": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_6d19cf6ef2d14dc78bceeebbaf18029f",
+ "IPY_MODEL_fb5f030e4e404aca92c685bd209f51b7",
+ "IPY_MODEL_1b85406f639840d5833e9b79b1f0758a"
+ ],
+ "layout": "IPY_MODEL_1a448fdc57ee41d1835eaa43f7f302a6"
+ }
+ },
+ "6d19cf6ef2d14dc78bceeebbaf18029f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_dd3f9660334b437e8b4ae8f51fedf4c0",
+ "placeholder": "",
+ "style": "IPY_MODEL_de5b5bb583324d37b3c75951af5a4d19",
+ "value": "Q4_K_M.gguf: 100%"
+ }
+ },
+ "fb5f030e4e404aca92c685bd209f51b7": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_4e0f463c2e3c4a10af6aa0639734a453",
+ "max": 4368439008,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_09224dc62a524deca1e28efe1a2ce117",
+ "value": 4368439008
+ }
+ },
+ "1b85406f639840d5833e9b79b1f0758a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_9752afa43c7a436088881032deb3fba6",
+ "placeholder": "",
+ "style": "IPY_MODEL_ae832d4490b344fcbf42ed3476ffd8d0",
+ "value": " 4.37G/4.37G [01:52<00:00, 48.8MB/s]"
+ }
+ },
+ "1a448fdc57ee41d1835eaa43f7f302a6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "dd3f9660334b437e8b4ae8f51fedf4c0": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "de5b5bb583324d37b3c75951af5a4d19": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "4e0f463c2e3c4a10af6aa0639734a453": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "09224dc62a524deca1e28efe1a2ce117": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "9752afa43c7a436088881032deb3fba6": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "ae832d4490b344fcbf42ed3476ffd8d0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ }
+ }
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "code",
+ "source": [
+ "!pip install git+https://github.com/huggingface/transformers@72958fc\n"
+ ],
+ "metadata": {
+ "id": "DQq_5HvXGjSL",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "outputId": "9721ddf4-6f50-4b5e-f681-b3c7e7db4ddd"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Collecting git+https://github.com/huggingface/transformers@72958fc\n",
+ " Cloning https://github.com/huggingface/transformers (to revision 72958fc) to /tmp/pip-req-build-a0e3oczz\n",
+ " Running command git clone --filter=blob:none --quiet https://github.com/huggingface/transformers /tmp/pip-req-build-a0e3oczz\n",
+ "\u001b[33m WARNING: Did not find branch or tag '72958fc', assuming revision or ref.\u001b[0m\u001b[33m\n",
+ "\u001b[0m Running command git checkout -q 72958fc\n",
+ " Resolved https://github.com/huggingface/transformers to commit 72958fc\n",
+ " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (3.13.1)\n",
+ "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (0.20.3)\n",
+ "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (1.25.2)\n",
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (24.0)\n",
+ "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (6.0.1)\n",
+ "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (2023.12.25)\n",
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (2.31.0)\n",
+ "Collecting tokenizers<0.15,>=0.14 (from transformers==4.34.0.dev0)\n",
+ " Downloading tokenizers-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.8 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (0.4.2)\n",
+ "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0.dev0) (4.66.2)\n",
+ "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers==4.34.0.dev0) (2023.6.0)\n",
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers==4.34.0.dev0) (4.10.0)\n",
+ "Collecting huggingface-hub<1.0,>=0.16.4 (from transformers==4.34.0.dev0)\n",
+ " Downloading huggingface_hub-0.17.3-py3-none-any.whl (295 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m295.0/295.0 kB\u001b[0m \u001b[31m29.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.34.0.dev0) (3.3.2)\n",
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.34.0.dev0) (3.6)\n",
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.34.0.dev0) (2.0.7)\n",
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.34.0.dev0) (2024.2.2)\n",
+ "Building wheels for collected packages: transformers\n",
+ " Building wheel for transformers (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
+ " Created wheel for transformers: filename=transformers-4.34.0.dev0-py3-none-any.whl size=7746153 sha256=5d06c444fc6a74d8d62306e9861068122d6355bd1d841f08cfaee24756fc3010\n",
+ " Stored in directory: /tmp/pip-ephem-wheel-cache-sfex8g8c/wheels/23/ce/0d/6a09b63fdbb78ba584b74faff8b6a4443da3fdc573090a07f3\n",
+ "Successfully built transformers\n",
+ "Installing collected packages: huggingface-hub, tokenizers, transformers\n",
+ " Attempting uninstall: huggingface-hub\n",
+ " Found existing installation: huggingface-hub 0.20.3\n",
+ " Uninstalling huggingface-hub-0.20.3:\n",
+ " Successfully uninstalled huggingface-hub-0.20.3\n",
+ " Attempting uninstall: tokenizers\n",
+ " Found existing installation: tokenizers 0.15.2\n",
+ " Uninstalling tokenizers-0.15.2:\n",
+ " Successfully uninstalled tokenizers-0.15.2\n",
+ " Attempting uninstall: transformers\n",
+ " Found existing installation: transformers 4.38.2\n",
+ " Uninstalling transformers-4.38.2:\n",
+ " Successfully uninstalled transformers-4.38.2\n",
+ "Successfully installed huggingface-hub-0.17.3 tokenizers-0.14.1 transformers-4.34.0.dev0\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "N_Lc1X1YNdqe",
+ "outputId": "13bbc6f5-b79f-49df-b835-40fa2ef5c47b"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Cloning into 'llama.cpp'...\n",
+ "remote: Enumerating objects: 20779, done.\u001b[K\n",
+ "remote: Counting objects: 100% (6454/6454), done.\u001b[K\n",
+ "remote: Compressing objects: 100% (348/348), done.\u001b[K\n",
+ "remote: Total 20779 (delta 6280), reused 6166 (delta 6106), pack-reused 14325\u001b[K\n",
+ "Receiving objects: 100% (20779/20779), 23.80 MiB | 8.72 MiB/s, done.\n",
+ "Resolving deltas: 100% (14678/14678), done.\n"
+ ]
+ }
+ ],
+ "source": [
+ "!git clone https://github.com/ggerganov/llama.cpp\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!cd llama.cpp && LLAMA_CUBLAS=1 make && pip install -r requirements/requirements-convert-hf-to-gguf.txt"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "V3sUZjqnOIJg",
+ "outputId": "a8599618-4426-47ab-ba8d-9764ceb3e5a4"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "I ccache not found. Consider installing it for faster compilation.\n",
+ "I llama.cpp build info: \n",
+ "I UNAME_S: Linux\n",
+ "I UNAME_P: x86_64\n",
+ "I UNAME_M: x86_64\n",
+ "I CFLAGS: -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wdouble-promotion \n",
+ "I CXXFLAGS: -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include \n",
+ "I NVCCFLAGS: -std=c++11 -O3 -use_fast_math --forward-unknown-to-host-compiler -arch=native -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_MMV_Y=1 -DK_QUANTS_PER_ITERATION=2 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 \n",
+ "I LDFLAGS: -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "I CC: cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\n",
+ "I CXX: g++ (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\n",
+ "I NVCC: Build cuda_12.2.r12.2/compiler.33191640_0\n",
+ "\n",
+ "cc -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wdouble-promotion -c ggml.c -o ggml.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c llama.cpp -o llama.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/common.cpp -o common.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/sampling.cpp -o sampling.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/grammar-parser.cpp -o grammar-parser.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/build-info.cpp -o build-info.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/console.cpp -o console.o\n",
+ "nvcc -std=c++11 -O3 -use_fast_math --forward-unknown-to-host-compiler -arch=native -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_MMV_Y=1 -DK_QUANTS_PER_ITERATION=2 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -Xcompiler \"-std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Wno-pedantic\" -c ggml-cuda.cu -o ggml-cuda.o\n",
+ "cc -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wdouble-promotion -c ggml-alloc.c -o ggml-alloc.o\n",
+ "cc -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wdouble-promotion -c ggml-backend.c -o ggml-backend.o\n",
+ "cc -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wdouble-promotion -c ggml-quants.c -o ggml-quants.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c unicode.cpp -o unicode.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/main/main.cpp -o examples/main/main.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o console.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/main/main.o -o main -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "\n",
+ "==== Run ./main -h for help. ====\n",
+ "\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/quantize/quantize.cpp -o examples/quantize/quantize.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include build-info.o ggml.o llama.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/quantize/quantize.o -o quantize -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/quantize-stats/quantize-stats.cpp -o examples/quantize-stats/quantize-stats.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include build-info.o ggml.o llama.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/quantize-stats/quantize-stats.o -o quantize-stats -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/perplexity/perplexity.cpp -o examples/perplexity/perplexity.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/perplexity/perplexity.o -o perplexity -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/imatrix/imatrix.cpp -o examples/imatrix/imatrix.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/imatrix/imatrix.o -o imatrix -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/embedding/embedding.cpp -o examples/embedding/embedding.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/embedding/embedding.o -o embedding -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c pocs/vdot/vdot.cpp -o pocs/vdot/vdot.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o pocs/vdot/vdot.o -o vdot -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c pocs/vdot/q8dot.cpp -o pocs/vdot/q8dot.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o pocs/vdot/q8dot.o -o q8dot -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/train.cpp -o train.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/train-text-from-scratch/train-text-from-scratch.cpp -o examples/train-text-from-scratch/train-text-from-scratch.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o train.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/train-text-from-scratch/train-text-from-scratch.o -o train-text-from-scratch -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp -o examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.o -o convert-llama2c-to-ggml -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/simple/simple.cpp -o examples/simple/simple.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/simple/simple.o -o simple -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/batched/batched.cpp -o examples/batched/batched.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/batched/batched.o -o batched -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/batched-bench/batched-bench.cpp -o examples/batched-bench/batched-bench.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include build-info.o ggml.o llama.o common.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/batched-bench/batched-bench.o -o batched-bench -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/save-load-state/save-load-state.cpp -o examples/save-load-state/save-load-state.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/save-load-state/save-load-state.o -o save-load-state -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c common/json-schema-to-grammar.cpp -o json-schema-to-grammar.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/server/server.cpp -o examples/server/server.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include json-schema-to-grammar.o ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o -Iexamples/server examples/server/server.o -o server -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/gguf/gguf.cpp -o examples/gguf/gguf.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/gguf/gguf.o -o gguf -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/llama-bench/llama-bench.cpp -o examples/llama-bench/llama-bench.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/llama-bench/llama-bench.o -o llama-bench -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -static -fPIC -c examples/llava/llava.cpp -o libllava.a -Wno-cast-qual\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/llava/llava-cli.cpp -o examples/llava/llava-cli.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/llava/clip.cpp -o examples/llava/clip.o -Wno-cast-qual\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/llava/llava.cpp -o examples/llava/llava.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/llava/llava-cli.o examples/llava/clip.o examples/llava/llava.o -o llava-cli -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/baby-llama/baby-llama.cpp -o examples/baby-llama/baby-llama.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o train.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/baby-llama/baby-llama.o -o baby-llama -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/beam-search/beam-search.cpp -o examples/beam-search/beam-search.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/beam-search/beam-search.o -o beam-search -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/speculative/speculative.cpp -o examples/speculative/speculative.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/speculative/speculative.o -o speculative -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/infill/infill.cpp -o examples/infill/infill.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o console.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/infill/infill.o -o infill -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/tokenize/tokenize.cpp -o examples/tokenize/tokenize.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/tokenize/tokenize.o -o tokenize -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/benchmark/benchmark-matmult.cpp -o examples/benchmark/benchmark-matmult.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include build-info.o ggml.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/benchmark/benchmark-matmult.o -o benchmark-matmult -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/parallel/parallel.cpp -o examples/parallel/parallel.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/parallel/parallel.o -o parallel -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/finetune/finetune.cpp -o examples/finetune/finetune.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o train.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/finetune/finetune.o -o finetune -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/export-lora/export-lora.cpp -o examples/export-lora/export-lora.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/export-lora/export-lora.o -o export-lora -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/lookahead/lookahead.cpp -o examples/lookahead/lookahead.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/lookahead/lookahead.o -o lookahead -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/lookup/lookup.cpp -o examples/lookup/lookup.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/lookup/lookup.o -o lookup -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/passkey/passkey.cpp -o examples/passkey/passkey.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/passkey/passkey.o -o passkey -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -c examples/gritlm/gritlm.cpp -o examples/gritlm/gritlm.o\n",
+ "g++ -std=c++11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -pthread -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include ggml.o llama.o common.o sampling.o grammar-parser.o build-info.o ggml-cuda.o ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o examples/gritlm/gritlm.o -o gritlm -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/usr/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/lib/wsl/lib \n",
+ "cc -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/usr/local/cuda/targets/x86_64-linux/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wdouble-promotion -c tests/test-c.c -o tests/test-c.o\n",
+ "Collecting numpy~=1.24.4 (from -r requirements/./requirements-convert.txt (line 1))\n",
+ " Downloading numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (17.3 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m23.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: sentencepiece~=0.1.98 in /usr/local/lib/python3.10/dist-packages (from -r requirements/./requirements-convert.txt (line 2)) (0.1.99)\n",
+ "Collecting transformers<5.0.0,>=4.35.2 (from -r requirements/./requirements-convert.txt (line 3))\n",
+ " Downloading transformers-4.39.0-py3-none-any.whl (8.8 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.8/8.8 MB\u001b[0m \u001b[31m91.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting gguf>=0.1.0 (from -r requirements/./requirements-convert.txt (line 4))\n",
+ " Downloading gguf-0.6.0-py3-none-any.whl (23 kB)\n",
+ "Collecting protobuf<5.0.0,>=4.21.0 (from -r requirements/./requirements-convert.txt (line 5))\n",
+ " Downloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl (294 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m294.6/294.6 kB\u001b[0m \u001b[31m29.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting torch~=2.1.1 (from -r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading torch-2.1.2-cp310-cp310-manylinux1_x86_64.whl (670.2 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m670.2/670.2 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting einops~=0.7.0 (from -r requirements/requirements-convert-hf-to-gguf.txt (line 3))\n",
+ " Downloading einops-0.7.0-py3-none-any.whl (44 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.6/44.6 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (3.13.1)\n",
+ "Collecting huggingface-hub<1.0,>=0.19.3 (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3))\n",
+ " Downloading huggingface_hub-0.21.4-py3-none-any.whl (346 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m346.4/346.4 kB\u001b[0m \u001b[31m42.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (24.0)\n",
+ "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (6.0.1)\n",
+ "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (2023.12.25)\n",
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (2.31.0)\n",
+ "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (0.14.1)\n",
+ "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (0.4.2)\n",
+ "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (4.66.2)\n",
+ "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (4.10.0)\n",
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (1.12)\n",
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (3.2.1)\n",
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (3.1.3)\n",
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (2023.6.0)\n",
+ "Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m65.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m68.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m97.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cudnn-cu12==8.9.2.26 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m731.7/731.7 MB\u001b[0m \u001b[31m1.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cublas-cu12==12.1.3.1 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cufft-cu12==11.0.2.54 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m8.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-curand-cu12==10.3.2.106 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m10.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m8.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m6.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-nccl-cu12==2.18.1 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl (209.8 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m209.8/209.8 MB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-nvtx-cu12==12.1.105 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting triton==2.1.0 (from torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading triton-2.1.0-0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (89.2 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m89.2/89.2 MB\u001b[0m \u001b[31m8.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2))\n",
+ " Downloading nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl (21.1 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m75.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hINFO: pip is looking at multiple versions of tokenizers to determine which version is compatible with other requirements. This could take a while.\n",
+ "Collecting tokenizers<0.19,>=0.14 (from transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3))\n",
+ " Downloading tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.6/3.6 MB\u001b[0m \u001b[31m97.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (2.1.5)\n",
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (3.3.2)\n",
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (3.6)\n",
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (2.0.7)\n",
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers<5.0.0,>=4.35.2->-r requirements/./requirements-convert.txt (line 3)) (2024.2.2)\n",
+ "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch~=2.1.1->-r requirements/requirements-convert-hf-to-gguf.txt (line 2)) (1.3.0)\n",
+ "Installing collected packages: triton, protobuf, nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, numpy, einops, nvidia-cusparse-cu12, nvidia-cudnn-cu12, huggingface-hub, gguf, tokenizers, nvidia-cusolver-cu12, transformers, torch\n",
+ " Attempting uninstall: triton\n",
+ " Found existing installation: triton 2.2.0\n",
+ " Uninstalling triton-2.2.0:\n",
+ " Successfully uninstalled triton-2.2.0\n",
+ " Attempting uninstall: protobuf\n",
+ " Found existing installation: protobuf 3.20.3\n",
+ " Uninstalling protobuf-3.20.3:\n",
+ " Successfully uninstalled protobuf-3.20.3\n",
+ " Attempting uninstall: numpy\n",
+ " Found existing installation: numpy 1.25.2\n",
+ " Uninstalling numpy-1.25.2:\n",
+ " Successfully uninstalled numpy-1.25.2\n",
+ " Attempting uninstall: huggingface-hub\n",
+ " Found existing installation: huggingface-hub 0.17.3\n",
+ " Uninstalling huggingface-hub-0.17.3:\n",
+ " Successfully uninstalled huggingface-hub-0.17.3\n",
+ " Attempting uninstall: tokenizers\n",
+ " Found existing installation: tokenizers 0.14.1\n",
+ " Uninstalling tokenizers-0.14.1:\n",
+ " Successfully uninstalled tokenizers-0.14.1\n",
+ " Attempting uninstall: transformers\n",
+ " Found existing installation: transformers 4.34.0.dev0\n",
+ " Uninstalling transformers-4.34.0.dev0:\n",
+ " Successfully uninstalled transformers-4.34.0.dev0\n",
+ " Attempting uninstall: torch\n",
+ " Found existing installation: torch 2.2.1+cu121\n",
+ " Uninstalling torch-2.2.1+cu121:\n",
+ " Successfully uninstalled torch-2.2.1+cu121\n",
+ "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
+ "tensorflow-metadata 1.14.0 requires protobuf<4.21,>=3.20.3, but you have protobuf 4.25.3 which is incompatible.\n",
+ "torchaudio 2.2.1+cu121 requires torch==2.2.1, but you have torch 2.1.2 which is incompatible.\n",
+ "torchtext 0.17.1 requires torch==2.2.1, but you have torch 2.1.2 which is incompatible.\n",
+ "torchvision 0.17.1+cu121 requires torch==2.2.1, but you have torch 2.1.2 which is incompatible.\u001b[0m\u001b[31m\n",
+ "\u001b[0mSuccessfully installed einops-0.7.0 gguf-0.6.0 huggingface-hub-0.21.4 numpy-1.24.4 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.18.1 nvidia-nvjitlink-cu12-12.4.99 nvidia-nvtx-cu12-12.1.105 protobuf-4.25.3 tokenizers-0.15.2 torch-2.1.2 transformers-4.39.0 triton-2.1.0\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from huggingface_hub import snapshot_download"
+ ],
+ "metadata": {
+ "id": "ZgZt7o9aOQcO"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "model_name = \"mistralai/Mistral-7B-v0.1\"#\"Qwen/Qwen1.5-1.8B\""
+ ],
+ "metadata": {
+ "id": "ULQLpp-IOc_2"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "methods = ['q4_k_m']"
+ ],
+ "metadata": {
+ "id": "WsX-vKWROc7t"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "base_model = \"./original_model/\"\n",
+ "quantized_path = \"./quantized_model/\""
+ ],
+ "metadata": {
+ "id": "afmpVpuQOc4i"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "snapshot_download(repo_id=model_name, local_dir=base_model , local_dir_use_symlinks=False)\n",
+ "original_model = quantized_path+'/FP16.gguf'"
+ ],
+ "metadata": {
+ "id": "EIddgL5SOc1G",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 621,
+ "referenced_widgets": [
+ "23f3d222dda34579a2d10f9cb0345f42",
+ "c70d11ef14824f30a5b2cb5211f3fa8f",
+ "890b893d943e43bba871aba4b4228985",
+ "05d47408587045b9a2de09a6d5f98a86",
+ "ed32d6d5a1994b5f869173e2f8720a1b",
+ "967824760cca4dd383a53742854629c1",
+ "cd05c438545d49218989401a63f4c3e0",
+ "71a2ad80c40941fb926901ed6d35781e",
+ "2c8d1fe46cba4278b12b70befaf538c7",
+ "a8274e16b3bc4defba8bcb99720011cc",
+ "75bfc069d9404dd0a64f778a61811bc9",
+ "d7748e881b29423392742b2033a5d90a",
+ "2a33b1b6f957453ca4fb859f0e0dfa37",
+ "36eff02ed8b34ec8a7fb466311be758c",
+ "aa3e275af3ad478383d77662ce27e9d6",
+ "848f36c2e49b4e468ded57bf44c2c703",
+ "c25b167e65b24069a45a7e14d8d7d425",
+ "36a5cf21100044c0811d5edc217c0250",
+ "beee422233b746959cd1aa9dc7730ad3",
+ "6ba695bbfbc3415c8ffc700732915036",
+ "84b4af49f7d349589efba60511e9cd81",
+ "b96a2123a3bc48d7bf82573726646135",
+ "6b00291e98f64133812ab66039c744fa",
+ "c50af39363644a25ad1005495ac0b35f",
+ "ad2ad4dd4e444b9b8e5d3deec082f838",
+ "79b6fbecb1794b1e92e7bba62583ea47",
+ "9077350cfcec46feb1cf02ef8b29c9bb",
+ "efbcd5adc0f644a381f65c143b2a2119",
+ "9825f53be4fc4452b1503319e380fa77",
+ "f723c15cbbde47e6ab57e35671a9a85e",
+ "256b926b3b0c4190b8ddc74c2b2f2955",
+ "ae83ef6286a44632a4dab0d90ee5ca39",
+ "1610ee9e3e7948139275a21d840565fb",
+ "02d36996ff564b8ea2b24acce22bf4c3",
+ "9e9cb229405c447d948032d6a2cc82bb",
+ "13935436da6d4fa8be901ecb7dcb928a",
+ "b3249b5f47e649539ed21841b87d9db2",
+ "8d124f06fd5842d4b99fc011b355312f",
+ "bd235d7d8f7c4f03bed28d7aa692731e",
+ "f2b85643c27f4dcc979ed2757c4c0c86",
+ "17c7c1af613046f3a50c3097fac7cba4",
+ "dd1e36b86f9440eb8cf35a260204d5ae",
+ "7099ff2f7cf84035b71eee27e7f7f52f",
+ "480b82fe42c544429196dc052de582af",
+ "65893b35c8da4e74af4472d8b9bbb051",
+ "8cba36d0838849d8933fdf03ec7b1dd9",
+ "bf3e0d482d3e4b0899c1834522c65559",
+ "95533da54ba9498fb280653fb6485af0",
+ "f92494f93252496497635102d83536b6",
+ "4d65c6a547704f249fc318e923f04704",
+ "46d9ffcfc0a9468893e2213f561c5a10",
+ "bb9f12685c6a40fdb6bdb29e7a7a05d5",
+ "636240a25c6c44a99405ebd05871f442",
+ "f347c43229fe4e9da44771d749f2e7a2",
+ "66afca1c879a49248ff216ece0be1d99",
+ "bb15d5514a14438db029a6729d5bb175",
+ "f1d41a43c6ed4589884f0dc1a4182814",
+ "30f3b1c5172a457d8cfcd08d9a3ca523",
+ "bcd12847b1224974aedc61818997d4b6",
+ "8a08c3225d9a464c9359089c416378dd",
+ "32de1fa8fa184cae93fe2338180df84b",
+ "97c707df0f61483bb572d02c98cb6cdf",
+ "fa49ad10de3c4fc5a2a36cc8b79cf23c",
+ "9bea0631354b4d7892e2e45e3b2b2724",
+ "d138e40aa4064fadb0c05c4dc6ee4755",
+ "1216882c5608465b9a855b24864c7db9",
+ "839a1da33ddf471bbab5d3b87f51efb4",
+ "3ffe0bc7dd9d42bc81c94bb106ce65f3",
+ "57ec54a8fbe146fabd90dcd0b59d4bf6",
+ "6e9b3bf6c8e54ec58a12096936b4c9a9",
+ "1bc71f236d3d4669bdf175599941928f",
+ "0871b46e9ca44c099f46b61fd10d8947",
+ "b023b10fca444599a58c63881f09157a",
+ "939ab887808b4926abe9c8526d598b0b",
+ "5247aa80e65645f9902d5072066f05c6",
+ "36143a2f1503478db02a4f33a564d07d",
+ "663f6d68fbe3445e9af76dad7f794d46",
+ "ca4f1098fd464eaf88d8bd79d188e217",
+ "e111c8e5a1d643e7a612a5379c2d0737",
+ "d4d6095877db4c64b7be15d32dd70fed",
+ "0714e1a064bd4497b9cf67634029beb5",
+ "763d69f218b949a69f83cf1b48b74f25",
+ "13413bde902f482ba409f2a99da9474c",
+ "450e88ab3c5640029ca4ac2e0b166212",
+ "fb15d3b6521d4f77b43f4376d7fe9200",
+ "4df9f5ca0043412ab8c9d9f8f84eb2b0",
+ "a797767e3c39496c87e6a151b51f2c8e",
+ "065d785517794c718948c65ca43a892c",
+ "64e68b3bb09f4c8ba731516f8621fbde",
+ "f0539918726e485db18ccaef32d93b24",
+ "3d98cbe8d4734b288e44a243ad847974",
+ "3c88be08f8f8405a84ddcfadbd5c8a72",
+ "d6218f6c3d6341a2b49780f891ad5dec",
+ "c92966c239b643d49816567c885c6845",
+ "8aa1c233cc7442d596a7515ee5eedc6e",
+ "e75ae2c6dced4b36946ebdcd4b42fecf",
+ "d6cb7cf03c6848fb99b87e204e7ab0f7",
+ "f9e115c82bac41078d45faaaf2e389c2",
+ "0226795777744ed6b86f06b1ed5d3561",
+ "c69e49b2d7ec4f0da3ac058163ecd611",
+ "be08b53b228649deabde650a6ac02d98",
+ "c4e9e33057374ea6a20a9e05565d4ba1",
+ "96882073eb4f4c1ebaa020fb4a98eaa3",
+ "a301e452137e419da6689f2bf25bbc95",
+ "0335a1023ffc47d9afe85139799b183e",
+ "8c431fdb80524f8491278c287f8e8a4a",
+ "d82ed86e9dfb44139a8a0b4101893c4f",
+ "d542f40f9e9e4f418d8dda6d579d2235",
+ "08e66be693594ba294944d1a44c32ea6",
+ "36167812e2d84bc2b714e436bba07bbe",
+ "26294b2e9780401bbb5f7020563f0f8a",
+ "aa0cd9e2925b4826a5837e09de058f58",
+ "ffb3a24c9299455eaa1f0da453a7db14",
+ "b91cb1561cfc4f3d83c01f34e07ebf69",
+ "396162050ec540e3a078a382f818ec51",
+ "48989317a22245ca80c68e3fb1c360f6",
+ "2c44aff83f694426bc0273a087b116f3",
+ "85c2eb3f0fb149e1b359d6e41ef338c8",
+ "79a252d2da6d49dca0dbfe6a35145b01",
+ "c2a6fb874f754e74a38536637be49875",
+ "ddc39fe423434502806381b73e51e332",
+ "165ea34074754da9a661c0c7fe269a8d",
+ "b69a90e1b86045de937277b8560b4c91",
+ "e0af1ca261094d8d988d1c46d2b3a611",
+ "656126c69ce14f8ab2dad4d22a705c7d",
+ "e2af374c4168420e8ec9ed38dc30c800",
+ "b0d282547da1490ab84c9bee1b0ef9bd",
+ "1c22b3ba13854b8ebaa7d2285f726286",
+ "6a47f38a10f343e0b52f3174b651e0e6",
+ "e540e872147c4316a1e334604565c26b",
+ "3f85b873cef3494a863f756f43f3ed29",
+ "1980dbc4cb3c470faa093b0d269ff1ab",
+ "316068d4afd045e08e830863653e1dee",
+ "6b0c1667256341deb99e683ce1c33aa1",
+ "e2778a5febba4c65a6a1ce6bd2b60a85",
+ "6ebce36fbf8d4873bbf917ede621333a",
+ "727114e11a9d4b269d9bf37556247eed",
+ "879f6f1c40814b91aff892e0855dae65",
+ "9c216c310d1b4b7d8a8e9ebaf9b5febd",
+ "5d53bc18976a46c8abcb9a005a769768",
+ "91dfef3a31a24aadb87e3b8c2517e3b3",
+ "9985ddbbfa174ed7bf1170c380bde598",
+ "0edd137a6ee74e4cb56b9d6b8c2ef771",
+ "30d42641169f4e3aa61912fb85f12259",
+ "6cdaeed0cedb45b6874e96af6536cab3",
+ "2aa3cd9638774138a64367a6626f2c63",
+ "43526c0e02ee427e8badc79f18527860",
+ "562c7b208ba04dc980b0714324561c50",
+ "6aee271546b54d1286d1ff6af8402862",
+ "4fa6b455191845ef8ae07453bb0750a0",
+ "291ee14374cd4a68828f418eca2218ea",
+ "10399c4305004ced8a33c2de8fb0ec5c",
+ "aceb2b19d29f42edb6014d7184267ffe",
+ "29a2f5b301e84cb69a9d1747254554ea",
+ "c2c7359f900c4380b6cc12956ea0a442",
+ "efdcb1bf19874703b3d26d2ce46353be",
+ "35f49255dff64bb7b22852ec6465d801",
+ "626c2341558a4cef8b96d914bfba23ca",
+ "5bf60326dfea44d298093e81db160593",
+ "5af079a85a5449d3b5f8250df69ff056",
+ "a51e5049b7e3428fb3741ba308cbb86f",
+ "9f0b4b1716e2488f82a651bcff1db433",
+ "b011eeeeb8a644c3bb65d728c7a8eb08",
+ "c1e86b1385c64c1aa74f11fc7dd65a39",
+ "7ca4621df5a847f49cc4082b6a1824b9"
+ ]
+ },
+ "outputId": "82a57b6e-8988-4988-a2aa-3532bf8444c8"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stderr",
+ "text": [
+ "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_token.py:88: UserWarning: \n",
+ "The secret `HF_TOKEN` does not exist in your Colab secrets.\n",
+ "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n",
+ "You will be able to reuse this secret in all of your notebooks.\n",
+ "Please note that authentication is recommended but still optional to access public models or datasets.\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "Fetching 14 files: 0%| | 0/14 [00:00, ?it/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "23f3d222dda34579a2d10f9cb0345f42"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "config.json: 0%| | 0.00/571 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "d7748e881b29423392742b2033a5d90a"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "README.md: 0%| | 0.00/1.39k [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "6b00291e98f64133812ab66039c744fa"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ ".gitattributes: 0%| | 0.00/1.52k [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "02d36996ff564b8ea2b24acce22bf4c3"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "model.safetensors.index.json: 0%| | 0.00/25.1k [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "65893b35c8da4e74af4472d8b9bbb051"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "generation_config.json: 0%| | 0.00/116 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "bb15d5514a14438db029a6729d5bb175"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "model-00002-of-00002.safetensors: 0%| | 0.00/4.54G [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "839a1da33ddf471bbab5d3b87f51efb4"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "pytorch_model.bin.index.json: 0%| | 0.00/23.9k [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "ca4f1098fd464eaf88d8bd79d188e217"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "model-00001-of-00002.safetensors: 0%| | 0.00/9.94G [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "64e68b3bb09f4c8ba731516f8621fbde"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "pytorch_model-00001-of-00002.bin: 0%| | 0.00/9.94G [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "c69e49b2d7ec4f0da3ac058163ecd611"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "special_tokens_map.json: 0%| | 0.00/72.0 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "26294b2e9780401bbb5f7020563f0f8a"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "tokenizer.json: 0%| | 0.00/1.80M [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "165ea34074754da9a661c0c7fe269a8d"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "pytorch_model-00002-of-00002.bin: 0%| | 0.00/5.06G [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "316068d4afd045e08e830863653e1dee"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "tokenizer_config.json: 0%| | 0.00/967 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "30d42641169f4e3aa61912fb85f12259"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "tokenizer.model: 0%| | 0.00/493k [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "c2c7359f900c4380b6cc12956ea0a442"
+ }
+ },
+ "metadata": {}
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!mkdir ./quantized_model/"
+ ],
+ "metadata": {
+ "id": "WLSnlifnP5My"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!python /content/llama.cpp/convert.py ./original_model/ --outtype f16 --outfile ./quantized_model/FP16.gguf"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "uDfo8wduQJM4",
+ "outputId": "a418d692-f362-4c08-94d7-eca847a2d3b5"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Loading model file original_model/model-00001-of-00002.safetensors\n",
+ "Loading model file original_model/model-00001-of-00002.safetensors\n",
+ "Loading model file original_model/model-00002-of-00002.safetensors\n",
+ "params = Params(n_vocab=32000, n_embd=4096, n_layer=32, n_ctx=32768, n_ff=14336, n_head=32, n_head_kv=8, n_experts=None, n_experts_used=None, f_norm_eps=1e-05, rope_scaling_type=None, f_rope_freq_base=10000.0, f_rope_scale=None, n_orig_ctx=None, rope_finetuned=None, ftype=, path_model=PosixPath('original_model'))\n",
+ "Found vocab files: {'spm': PosixPath('original_model/tokenizer.model'), 'bpe': None, 'hfft': PosixPath('original_model/tokenizer.json')}\n",
+ "Loading vocab file PosixPath('original_model/tokenizer.model'), type 'spm'\n",
+ "Vocab info: \n",
+ "Special vocab info: \n",
+ "Permuting layer 0\n",
+ "Permuting layer 1\n",
+ "Permuting layer 2\n",
+ "Permuting layer 3\n",
+ "Permuting layer 4\n",
+ "Permuting layer 5\n",
+ "Permuting layer 6\n",
+ "Permuting layer 7\n",
+ "Permuting layer 8\n",
+ "Permuting layer 9\n",
+ "Permuting layer 10\n",
+ "Permuting layer 11\n",
+ "Permuting layer 12\n",
+ "Permuting layer 13\n",
+ "Permuting layer 14\n",
+ "Permuting layer 15\n",
+ "Permuting layer 16\n",
+ "Permuting layer 17\n",
+ "Permuting layer 18\n",
+ "Permuting layer 19\n",
+ "Permuting layer 20\n",
+ "Permuting layer 21\n",
+ "Permuting layer 22\n",
+ "Permuting layer 23\n",
+ "Permuting layer 24\n",
+ "Permuting layer 25\n",
+ "Permuting layer 26\n",
+ "Permuting layer 27\n",
+ "Permuting layer 28\n",
+ "Permuting layer 29\n",
+ "Permuting layer 30\n",
+ "Permuting layer 31\n",
+ "model.embed_tokens.weight -> token_embd.weight | BF16 | [32000, 4096]\n",
+ "model.layers.0.input_layernorm.weight -> blk.0.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.0.mlp.down_proj.weight -> blk.0.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.0.mlp.gate_proj.weight -> blk.0.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.0.mlp.up_proj.weight -> blk.0.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.0.post_attention_layernorm.weight -> blk.0.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.0.self_attn.k_proj.weight -> blk.0.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.0.self_attn.o_proj.weight -> blk.0.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.0.self_attn.q_proj.weight -> blk.0.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.0.self_attn.v_proj.weight -> blk.0.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.1.input_layernorm.weight -> blk.1.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.1.mlp.down_proj.weight -> blk.1.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.1.mlp.gate_proj.weight -> blk.1.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.1.mlp.up_proj.weight -> blk.1.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.1.post_attention_layernorm.weight -> blk.1.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.1.self_attn.k_proj.weight -> blk.1.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.1.self_attn.o_proj.weight -> blk.1.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.1.self_attn.q_proj.weight -> blk.1.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.1.self_attn.v_proj.weight -> blk.1.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.10.input_layernorm.weight -> blk.10.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.10.mlp.down_proj.weight -> blk.10.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.10.mlp.gate_proj.weight -> blk.10.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.10.mlp.up_proj.weight -> blk.10.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.10.post_attention_layernorm.weight -> blk.10.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.10.self_attn.k_proj.weight -> blk.10.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.10.self_attn.o_proj.weight -> blk.10.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.10.self_attn.q_proj.weight -> blk.10.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.10.self_attn.v_proj.weight -> blk.10.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.11.input_layernorm.weight -> blk.11.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.11.mlp.down_proj.weight -> blk.11.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.11.mlp.gate_proj.weight -> blk.11.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.11.mlp.up_proj.weight -> blk.11.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.11.post_attention_layernorm.weight -> blk.11.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.11.self_attn.k_proj.weight -> blk.11.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.11.self_attn.o_proj.weight -> blk.11.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.11.self_attn.q_proj.weight -> blk.11.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.11.self_attn.v_proj.weight -> blk.11.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.12.input_layernorm.weight -> blk.12.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.12.mlp.down_proj.weight -> blk.12.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.12.mlp.gate_proj.weight -> blk.12.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.12.mlp.up_proj.weight -> blk.12.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.12.post_attention_layernorm.weight -> blk.12.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.12.self_attn.k_proj.weight -> blk.12.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.12.self_attn.o_proj.weight -> blk.12.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.12.self_attn.q_proj.weight -> blk.12.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.12.self_attn.v_proj.weight -> blk.12.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.13.input_layernorm.weight -> blk.13.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.13.mlp.down_proj.weight -> blk.13.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.13.mlp.gate_proj.weight -> blk.13.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.13.mlp.up_proj.weight -> blk.13.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.13.post_attention_layernorm.weight -> blk.13.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.13.self_attn.k_proj.weight -> blk.13.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.13.self_attn.o_proj.weight -> blk.13.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.13.self_attn.q_proj.weight -> blk.13.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.13.self_attn.v_proj.weight -> blk.13.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.14.input_layernorm.weight -> blk.14.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.14.mlp.down_proj.weight -> blk.14.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.14.mlp.gate_proj.weight -> blk.14.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.14.mlp.up_proj.weight -> blk.14.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.14.post_attention_layernorm.weight -> blk.14.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.14.self_attn.k_proj.weight -> blk.14.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.14.self_attn.o_proj.weight -> blk.14.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.14.self_attn.q_proj.weight -> blk.14.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.14.self_attn.v_proj.weight -> blk.14.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.15.input_layernorm.weight -> blk.15.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.15.mlp.down_proj.weight -> blk.15.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.15.mlp.gate_proj.weight -> blk.15.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.15.mlp.up_proj.weight -> blk.15.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.15.post_attention_layernorm.weight -> blk.15.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.15.self_attn.k_proj.weight -> blk.15.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.15.self_attn.o_proj.weight -> blk.15.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.15.self_attn.q_proj.weight -> blk.15.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.15.self_attn.v_proj.weight -> blk.15.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.16.input_layernorm.weight -> blk.16.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.16.mlp.down_proj.weight -> blk.16.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.16.mlp.gate_proj.weight -> blk.16.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.16.mlp.up_proj.weight -> blk.16.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.16.post_attention_layernorm.weight -> blk.16.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.16.self_attn.k_proj.weight -> blk.16.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.16.self_attn.o_proj.weight -> blk.16.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.16.self_attn.q_proj.weight -> blk.16.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.16.self_attn.v_proj.weight -> blk.16.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.17.input_layernorm.weight -> blk.17.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.17.mlp.down_proj.weight -> blk.17.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.17.mlp.gate_proj.weight -> blk.17.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.17.mlp.up_proj.weight -> blk.17.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.17.post_attention_layernorm.weight -> blk.17.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.17.self_attn.k_proj.weight -> blk.17.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.17.self_attn.o_proj.weight -> blk.17.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.17.self_attn.q_proj.weight -> blk.17.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.17.self_attn.v_proj.weight -> blk.17.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.18.input_layernorm.weight -> blk.18.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.18.mlp.down_proj.weight -> blk.18.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.18.mlp.gate_proj.weight -> blk.18.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.18.mlp.up_proj.weight -> blk.18.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.18.post_attention_layernorm.weight -> blk.18.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.18.self_attn.k_proj.weight -> blk.18.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.18.self_attn.o_proj.weight -> blk.18.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.18.self_attn.q_proj.weight -> blk.18.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.18.self_attn.v_proj.weight -> blk.18.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.19.input_layernorm.weight -> blk.19.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.19.mlp.down_proj.weight -> blk.19.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.19.mlp.gate_proj.weight -> blk.19.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.19.mlp.up_proj.weight -> blk.19.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.19.post_attention_layernorm.weight -> blk.19.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.19.self_attn.k_proj.weight -> blk.19.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.19.self_attn.o_proj.weight -> blk.19.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.19.self_attn.q_proj.weight -> blk.19.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.19.self_attn.v_proj.weight -> blk.19.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.2.input_layernorm.weight -> blk.2.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.2.mlp.down_proj.weight -> blk.2.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.2.mlp.gate_proj.weight -> blk.2.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.2.mlp.up_proj.weight -> blk.2.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.2.post_attention_layernorm.weight -> blk.2.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.2.self_attn.k_proj.weight -> blk.2.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.2.self_attn.o_proj.weight -> blk.2.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.2.self_attn.q_proj.weight -> blk.2.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.2.self_attn.v_proj.weight -> blk.2.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.20.input_layernorm.weight -> blk.20.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.20.mlp.down_proj.weight -> blk.20.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.20.mlp.gate_proj.weight -> blk.20.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.20.mlp.up_proj.weight -> blk.20.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.20.post_attention_layernorm.weight -> blk.20.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.20.self_attn.k_proj.weight -> blk.20.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.20.self_attn.o_proj.weight -> blk.20.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.20.self_attn.q_proj.weight -> blk.20.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.20.self_attn.v_proj.weight -> blk.20.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.21.input_layernorm.weight -> blk.21.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.21.mlp.down_proj.weight -> blk.21.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.21.mlp.gate_proj.weight -> blk.21.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.21.mlp.up_proj.weight -> blk.21.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.21.post_attention_layernorm.weight -> blk.21.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.21.self_attn.k_proj.weight -> blk.21.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.21.self_attn.o_proj.weight -> blk.21.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.21.self_attn.q_proj.weight -> blk.21.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.21.self_attn.v_proj.weight -> blk.21.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.22.self_attn.k_proj.weight -> blk.22.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.22.self_attn.o_proj.weight -> blk.22.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.22.self_attn.q_proj.weight -> blk.22.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.22.self_attn.v_proj.weight -> blk.22.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.3.input_layernorm.weight -> blk.3.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.3.mlp.down_proj.weight -> blk.3.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.3.mlp.gate_proj.weight -> blk.3.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.3.mlp.up_proj.weight -> blk.3.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.3.post_attention_layernorm.weight -> blk.3.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.3.self_attn.k_proj.weight -> blk.3.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.3.self_attn.o_proj.weight -> blk.3.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.3.self_attn.q_proj.weight -> blk.3.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.3.self_attn.v_proj.weight -> blk.3.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.4.input_layernorm.weight -> blk.4.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.4.mlp.down_proj.weight -> blk.4.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.4.mlp.gate_proj.weight -> blk.4.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.4.mlp.up_proj.weight -> blk.4.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.4.post_attention_layernorm.weight -> blk.4.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.4.self_attn.k_proj.weight -> blk.4.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.4.self_attn.o_proj.weight -> blk.4.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.4.self_attn.q_proj.weight -> blk.4.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.4.self_attn.v_proj.weight -> blk.4.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.5.input_layernorm.weight -> blk.5.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.5.mlp.down_proj.weight -> blk.5.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.5.mlp.gate_proj.weight -> blk.5.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.5.mlp.up_proj.weight -> blk.5.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.5.post_attention_layernorm.weight -> blk.5.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.5.self_attn.k_proj.weight -> blk.5.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.5.self_attn.o_proj.weight -> blk.5.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.5.self_attn.q_proj.weight -> blk.5.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.5.self_attn.v_proj.weight -> blk.5.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.6.input_layernorm.weight -> blk.6.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.6.mlp.down_proj.weight -> blk.6.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.6.mlp.gate_proj.weight -> blk.6.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.6.mlp.up_proj.weight -> blk.6.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.6.post_attention_layernorm.weight -> blk.6.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.6.self_attn.k_proj.weight -> blk.6.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.6.self_attn.o_proj.weight -> blk.6.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.6.self_attn.q_proj.weight -> blk.6.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.6.self_attn.v_proj.weight -> blk.6.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.7.input_layernorm.weight -> blk.7.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.7.mlp.down_proj.weight -> blk.7.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.7.mlp.gate_proj.weight -> blk.7.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.7.mlp.up_proj.weight -> blk.7.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.7.post_attention_layernorm.weight -> blk.7.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.7.self_attn.k_proj.weight -> blk.7.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.7.self_attn.o_proj.weight -> blk.7.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.7.self_attn.q_proj.weight -> blk.7.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.7.self_attn.v_proj.weight -> blk.7.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.8.input_layernorm.weight -> blk.8.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.8.mlp.down_proj.weight -> blk.8.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.8.mlp.gate_proj.weight -> blk.8.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.8.mlp.up_proj.weight -> blk.8.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.8.post_attention_layernorm.weight -> blk.8.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.8.self_attn.k_proj.weight -> blk.8.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.8.self_attn.o_proj.weight -> blk.8.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.8.self_attn.q_proj.weight -> blk.8.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.8.self_attn.v_proj.weight -> blk.8.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.9.input_layernorm.weight -> blk.9.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.9.mlp.down_proj.weight -> blk.9.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.9.mlp.gate_proj.weight -> blk.9.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.9.mlp.up_proj.weight -> blk.9.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.9.post_attention_layernorm.weight -> blk.9.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.9.self_attn.k_proj.weight -> blk.9.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.9.self_attn.o_proj.weight -> blk.9.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.9.self_attn.q_proj.weight -> blk.9.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.9.self_attn.v_proj.weight -> blk.9.attn_v.weight | BF16 | [1024, 4096]\n",
+ "lm_head.weight -> output.weight | BF16 | [32000, 4096]\n",
+ "model.layers.22.input_layernorm.weight -> blk.22.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.22.mlp.down_proj.weight -> blk.22.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.22.mlp.gate_proj.weight -> blk.22.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.22.mlp.up_proj.weight -> blk.22.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.22.post_attention_layernorm.weight -> blk.22.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.23.input_layernorm.weight -> blk.23.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.23.mlp.down_proj.weight -> blk.23.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.23.mlp.gate_proj.weight -> blk.23.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.23.mlp.up_proj.weight -> blk.23.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.23.post_attention_layernorm.weight -> blk.23.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.23.self_attn.k_proj.weight -> blk.23.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.23.self_attn.o_proj.weight -> blk.23.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.23.self_attn.q_proj.weight -> blk.23.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.23.self_attn.v_proj.weight -> blk.23.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.24.input_layernorm.weight -> blk.24.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.24.mlp.down_proj.weight -> blk.24.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.24.mlp.gate_proj.weight -> blk.24.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.24.mlp.up_proj.weight -> blk.24.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.24.post_attention_layernorm.weight -> blk.24.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.24.self_attn.k_proj.weight -> blk.24.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.24.self_attn.o_proj.weight -> blk.24.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.24.self_attn.q_proj.weight -> blk.24.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.24.self_attn.v_proj.weight -> blk.24.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.25.input_layernorm.weight -> blk.25.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.25.mlp.down_proj.weight -> blk.25.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.25.mlp.gate_proj.weight -> blk.25.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.25.mlp.up_proj.weight -> blk.25.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.25.post_attention_layernorm.weight -> blk.25.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.25.self_attn.k_proj.weight -> blk.25.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.25.self_attn.o_proj.weight -> blk.25.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.25.self_attn.q_proj.weight -> blk.25.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.25.self_attn.v_proj.weight -> blk.25.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.26.input_layernorm.weight -> blk.26.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.26.mlp.down_proj.weight -> blk.26.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.26.mlp.gate_proj.weight -> blk.26.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.26.mlp.up_proj.weight -> blk.26.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.26.post_attention_layernorm.weight -> blk.26.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.26.self_attn.k_proj.weight -> blk.26.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.26.self_attn.o_proj.weight -> blk.26.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.26.self_attn.q_proj.weight -> blk.26.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.26.self_attn.v_proj.weight -> blk.26.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.27.input_layernorm.weight -> blk.27.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.27.mlp.down_proj.weight -> blk.27.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.27.mlp.gate_proj.weight -> blk.27.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.27.mlp.up_proj.weight -> blk.27.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.27.post_attention_layernorm.weight -> blk.27.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.27.self_attn.k_proj.weight -> blk.27.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.27.self_attn.o_proj.weight -> blk.27.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.27.self_attn.q_proj.weight -> blk.27.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.27.self_attn.v_proj.weight -> blk.27.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.28.input_layernorm.weight -> blk.28.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.28.mlp.down_proj.weight -> blk.28.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.28.mlp.gate_proj.weight -> blk.28.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.28.mlp.up_proj.weight -> blk.28.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.28.post_attention_layernorm.weight -> blk.28.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.28.self_attn.k_proj.weight -> blk.28.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.28.self_attn.o_proj.weight -> blk.28.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.28.self_attn.q_proj.weight -> blk.28.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.28.self_attn.v_proj.weight -> blk.28.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.29.input_layernorm.weight -> blk.29.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.29.mlp.down_proj.weight -> blk.29.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.29.mlp.gate_proj.weight -> blk.29.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.29.mlp.up_proj.weight -> blk.29.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.29.post_attention_layernorm.weight -> blk.29.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.29.self_attn.k_proj.weight -> blk.29.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.29.self_attn.o_proj.weight -> blk.29.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.29.self_attn.q_proj.weight -> blk.29.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.29.self_attn.v_proj.weight -> blk.29.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.30.input_layernorm.weight -> blk.30.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.30.mlp.down_proj.weight -> blk.30.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.30.mlp.gate_proj.weight -> blk.30.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.30.mlp.up_proj.weight -> blk.30.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.30.post_attention_layernorm.weight -> blk.30.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.30.self_attn.k_proj.weight -> blk.30.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.30.self_attn.o_proj.weight -> blk.30.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.30.self_attn.q_proj.weight -> blk.30.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.30.self_attn.v_proj.weight -> blk.30.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.layers.31.input_layernorm.weight -> blk.31.attn_norm.weight | BF16 | [4096]\n",
+ "model.layers.31.mlp.down_proj.weight -> blk.31.ffn_down.weight | BF16 | [4096, 14336]\n",
+ "model.layers.31.mlp.gate_proj.weight -> blk.31.ffn_gate.weight | BF16 | [14336, 4096]\n",
+ "model.layers.31.mlp.up_proj.weight -> blk.31.ffn_up.weight | BF16 | [14336, 4096]\n",
+ "model.layers.31.post_attention_layernorm.weight -> blk.31.ffn_norm.weight | BF16 | [4096]\n",
+ "model.layers.31.self_attn.k_proj.weight -> blk.31.attn_k.weight | BF16 | [1024, 4096]\n",
+ "model.layers.31.self_attn.o_proj.weight -> blk.31.attn_output.weight | BF16 | [4096, 4096]\n",
+ "model.layers.31.self_attn.q_proj.weight -> blk.31.attn_q.weight | BF16 | [4096, 4096]\n",
+ "model.layers.31.self_attn.v_proj.weight -> blk.31.attn_v.weight | BF16 | [1024, 4096]\n",
+ "model.norm.weight -> output_norm.weight | BF16 | [4096]\n",
+ "Writing quantized_model/FP16.gguf, format 1\n",
+ "Ignoring added_tokens.json since model matches vocab size without it.\n",
+ "gguf: This GGUF file is for Little Endian only\n",
+ "gguf: Setting special token type bos to 1\n",
+ "gguf: Setting special token type eos to 2\n",
+ "gguf: Setting special token type unk to 0\n",
+ "gguf: Setting add_bos_token to True\n",
+ "gguf: Setting add_eos_token to False\n",
+ "[ 1/291] Writing tensor token_embd.weight | size 32000 x 4096 | type F16 | T+ 6\n",
+ "[ 2/291] Writing tensor blk.0.attn_norm.weight | size 4096 | type F32 | T+ 6\n",
+ "[ 3/291] Writing tensor blk.0.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 6\n",
+ "[ 4/291] Writing tensor blk.0.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 7\n",
+ "[ 5/291] Writing tensor blk.0.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 7\n",
+ "[ 6/291] Writing tensor blk.0.ffn_norm.weight | size 4096 | type F32 | T+ 8\n",
+ "[ 7/291] Writing tensor blk.0.attn_k.weight | size 1024 x 4096 | type F16 | T+ 8\n",
+ "[ 8/291] Writing tensor blk.0.attn_output.weight | size 4096 x 4096 | type F16 | T+ 8\n",
+ "[ 9/291] Writing tensor blk.0.attn_q.weight | size 4096 x 4096 | type F16 | T+ 8\n",
+ "[ 10/291] Writing tensor blk.0.attn_v.weight | size 1024 x 4096 | type F16 | T+ 8\n",
+ "[ 11/291] Writing tensor blk.1.attn_norm.weight | size 4096 | type F32 | T+ 8\n",
+ "[ 12/291] Writing tensor blk.1.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 11\n",
+ "[ 13/291] Writing tensor blk.1.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 11\n",
+ "[ 14/291] Writing tensor blk.1.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 12\n",
+ "[ 15/291] Writing tensor blk.1.ffn_norm.weight | size 4096 | type F32 | T+ 12\n",
+ "[ 16/291] Writing tensor blk.1.attn_k.weight | size 1024 x 4096 | type F16 | T+ 12\n",
+ "[ 17/291] Writing tensor blk.1.attn_output.weight | size 4096 x 4096 | type F16 | T+ 12\n",
+ "[ 18/291] Writing tensor blk.1.attn_q.weight | size 4096 x 4096 | type F16 | T+ 12\n",
+ "[ 19/291] Writing tensor blk.1.attn_v.weight | size 1024 x 4096 | type F16 | T+ 13\n",
+ "[ 20/291] Writing tensor blk.10.attn_norm.weight | size 4096 | type F32 | T+ 13\n",
+ "[ 21/291] Writing tensor blk.10.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 16\n",
+ "[ 22/291] Writing tensor blk.10.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 16\n",
+ "[ 23/291] Writing tensor blk.10.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 17\n",
+ "[ 24/291] Writing tensor blk.10.ffn_norm.weight | size 4096 | type F32 | T+ 17\n",
+ "[ 25/291] Writing tensor blk.10.attn_k.weight | size 1024 x 4096 | type F16 | T+ 17\n",
+ "[ 26/291] Writing tensor blk.10.attn_output.weight | size 4096 x 4096 | type F16 | T+ 17\n",
+ "[ 27/291] Writing tensor blk.10.attn_q.weight | size 4096 x 4096 | type F16 | T+ 17\n",
+ "[ 28/291] Writing tensor blk.10.attn_v.weight | size 1024 x 4096 | type F16 | T+ 18\n",
+ "[ 29/291] Writing tensor blk.11.attn_norm.weight | size 4096 | type F32 | T+ 18\n",
+ "[ 30/291] Writing tensor blk.11.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 20\n",
+ "[ 31/291] Writing tensor blk.11.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 21\n",
+ "[ 32/291] Writing tensor blk.11.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 21\n",
+ "[ 33/291] Writing tensor blk.11.ffn_norm.weight | size 4096 | type F32 | T+ 21\n",
+ "[ 34/291] Writing tensor blk.11.attn_k.weight | size 1024 x 4096 | type F16 | T+ 21\n",
+ "[ 35/291] Writing tensor blk.11.attn_output.weight | size 4096 x 4096 | type F16 | T+ 21\n",
+ "[ 36/291] Writing tensor blk.11.attn_q.weight | size 4096 x 4096 | type F16 | T+ 22\n",
+ "[ 37/291] Writing tensor blk.11.attn_v.weight | size 1024 x 4096 | type F16 | T+ 22\n",
+ "[ 38/291] Writing tensor blk.12.attn_norm.weight | size 4096 | type F32 | T+ 22\n",
+ "[ 39/291] Writing tensor blk.12.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 28\n",
+ "[ 40/291] Writing tensor blk.12.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 29\n",
+ "[ 41/291] Writing tensor blk.12.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 29\n",
+ "[ 42/291] Writing tensor blk.12.ffn_norm.weight | size 4096 | type F32 | T+ 33\n",
+ "[ 43/291] Writing tensor blk.12.attn_k.weight | size 1024 x 4096 | type F16 | T+ 33\n",
+ "[ 44/291] Writing tensor blk.12.attn_output.weight | size 4096 x 4096 | type F16 | T+ 33\n",
+ "[ 45/291] Writing tensor blk.12.attn_q.weight | size 4096 x 4096 | type F16 | T+ 33\n",
+ "[ 46/291] Writing tensor blk.12.attn_v.weight | size 1024 x 4096 | type F16 | T+ 33\n",
+ "[ 47/291] Writing tensor blk.13.attn_norm.weight | size 4096 | type F32 | T+ 38\n",
+ "[ 48/291] Writing tensor blk.13.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 45\n",
+ "[ 49/291] Writing tensor blk.13.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 46\n",
+ "[ 50/291] Writing tensor blk.13.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 46\n",
+ "[ 51/291] Writing tensor blk.13.ffn_norm.weight | size 4096 | type F32 | T+ 46\n",
+ "[ 52/291] Writing tensor blk.13.attn_k.weight | size 1024 x 4096 | type F16 | T+ 46\n",
+ "[ 53/291] Writing tensor blk.13.attn_output.weight | size 4096 x 4096 | type F16 | T+ 46\n",
+ "[ 54/291] Writing tensor blk.13.attn_q.weight | size 4096 x 4096 | type F16 | T+ 47\n",
+ "[ 55/291] Writing tensor blk.13.attn_v.weight | size 1024 x 4096 | type F16 | T+ 47\n",
+ "[ 56/291] Writing tensor blk.14.attn_norm.weight | size 4096 | type F32 | T+ 47\n",
+ "[ 57/291] Writing tensor blk.14.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 50\n",
+ "[ 58/291] Writing tensor blk.14.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 50\n",
+ "[ 59/291] Writing tensor blk.14.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 50\n",
+ "[ 60/291] Writing tensor blk.14.ffn_norm.weight | size 4096 | type F32 | T+ 51\n",
+ "[ 61/291] Writing tensor blk.14.attn_k.weight | size 1024 x 4096 | type F16 | T+ 51\n",
+ "[ 62/291] Writing tensor blk.14.attn_output.weight | size 4096 x 4096 | type F16 | T+ 51\n",
+ "[ 63/291] Writing tensor blk.14.attn_q.weight | size 4096 x 4096 | type F16 | T+ 51\n",
+ "[ 64/291] Writing tensor blk.14.attn_v.weight | size 1024 x 4096 | type F16 | T+ 51\n",
+ "[ 65/291] Writing tensor blk.15.attn_norm.weight | size 4096 | type F32 | T+ 51\n",
+ "[ 66/291] Writing tensor blk.15.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 54\n",
+ "[ 67/291] Writing tensor blk.15.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 54\n",
+ "[ 68/291] Writing tensor blk.15.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 57\n",
+ "[ 69/291] Writing tensor blk.15.ffn_norm.weight | size 4096 | type F32 | T+ 57\n",
+ "[ 70/291] Writing tensor blk.15.attn_k.weight | size 1024 x 4096 | type F16 | T+ 57\n",
+ "[ 71/291] Writing tensor blk.15.attn_output.weight | size 4096 x 4096 | type F16 | T+ 58\n",
+ "[ 72/291] Writing tensor blk.15.attn_q.weight | size 4096 x 4096 | type F16 | T+ 61\n",
+ "[ 73/291] Writing tensor blk.15.attn_v.weight | size 1024 x 4096 | type F16 | T+ 61\n",
+ "[ 74/291] Writing tensor blk.16.attn_norm.weight | size 4096 | type F32 | T+ 61\n",
+ "[ 75/291] Writing tensor blk.16.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 64\n",
+ "[ 76/291] Writing tensor blk.16.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 65\n",
+ "[ 77/291] Writing tensor blk.16.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 65\n",
+ "[ 78/291] Writing tensor blk.16.ffn_norm.weight | size 4096 | type F32 | T+ 65\n",
+ "[ 79/291] Writing tensor blk.16.attn_k.weight | size 1024 x 4096 | type F16 | T+ 65\n",
+ "[ 80/291] Writing tensor blk.16.attn_output.weight | size 4096 x 4096 | type F16 | T+ 65\n",
+ "[ 81/291] Writing tensor blk.16.attn_q.weight | size 4096 x 4096 | type F16 | T+ 66\n",
+ "[ 82/291] Writing tensor blk.16.attn_v.weight | size 1024 x 4096 | type F16 | T+ 66\n",
+ "[ 83/291] Writing tensor blk.17.attn_norm.weight | size 4096 | type F32 | T+ 66\n",
+ "[ 84/291] Writing tensor blk.17.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 69\n",
+ "[ 85/291] Writing tensor blk.17.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 69\n",
+ "[ 86/291] Writing tensor blk.17.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 69\n",
+ "[ 87/291] Writing tensor blk.17.ffn_norm.weight | size 4096 | type F32 | T+ 70\n",
+ "[ 88/291] Writing tensor blk.17.attn_k.weight | size 1024 x 4096 | type F16 | T+ 70\n",
+ "[ 89/291] Writing tensor blk.17.attn_output.weight | size 4096 x 4096 | type F16 | T+ 70\n",
+ "[ 90/291] Writing tensor blk.17.attn_q.weight | size 4096 x 4096 | type F16 | T+ 70\n",
+ "[ 91/291] Writing tensor blk.17.attn_v.weight | size 1024 x 4096 | type F16 | T+ 70\n",
+ "[ 92/291] Writing tensor blk.18.attn_norm.weight | size 4096 | type F32 | T+ 70\n",
+ "[ 93/291] Writing tensor blk.18.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 73\n",
+ "[ 94/291] Writing tensor blk.18.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 73\n",
+ "[ 95/291] Writing tensor blk.18.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 74\n",
+ "[ 96/291] Writing tensor blk.18.ffn_norm.weight | size 4096 | type F32 | T+ 88\n",
+ "[ 97/291] Writing tensor blk.18.attn_k.weight | size 1024 x 4096 | type F16 | T+ 88\n",
+ "[ 98/291] Writing tensor blk.18.attn_output.weight | size 4096 x 4096 | type F16 | T+ 90\n",
+ "[ 99/291] Writing tensor blk.18.attn_q.weight | size 4096 x 4096 | type F16 | T+ 95\n",
+ "[100/291] Writing tensor blk.18.attn_v.weight | size 1024 x 4096 | type F16 | T+ 95\n",
+ "[101/291] Writing tensor blk.19.attn_norm.weight | size 4096 | type F32 | T+ 95\n",
+ "[102/291] Writing tensor blk.19.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 97\n",
+ "[103/291] Writing tensor blk.19.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 97\n",
+ "[104/291] Writing tensor blk.19.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 98\n",
+ "[105/291] Writing tensor blk.19.ffn_norm.weight | size 4096 | type F32 | T+ 98\n",
+ "[106/291] Writing tensor blk.19.attn_k.weight | size 1024 x 4096 | type F16 | T+ 98\n",
+ "[107/291] Writing tensor blk.19.attn_output.weight | size 4096 x 4096 | type F16 | T+ 98\n",
+ "[108/291] Writing tensor blk.19.attn_q.weight | size 4096 x 4096 | type F16 | T+ 98\n",
+ "[109/291] Writing tensor blk.19.attn_v.weight | size 1024 x 4096 | type F16 | T+ 99\n",
+ "[110/291] Writing tensor blk.2.attn_norm.weight | size 4096 | type F32 | T+ 99\n",
+ "[111/291] Writing tensor blk.2.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 102\n",
+ "[112/291] Writing tensor blk.2.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 102\n",
+ "[113/291] Writing tensor blk.2.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 103\n",
+ "[114/291] Writing tensor blk.2.ffn_norm.weight | size 4096 | type F32 | T+ 103\n",
+ "[115/291] Writing tensor blk.2.attn_k.weight | size 1024 x 4096 | type F16 | T+ 103\n",
+ "[116/291] Writing tensor blk.2.attn_output.weight | size 4096 x 4096 | type F16 | T+ 103\n",
+ "[117/291] Writing tensor blk.2.attn_q.weight | size 4096 x 4096 | type F16 | T+ 103\n",
+ "[118/291] Writing tensor blk.2.attn_v.weight | size 1024 x 4096 | type F16 | T+ 103\n",
+ "[119/291] Writing tensor blk.20.attn_norm.weight | size 4096 | type F32 | T+ 103\n",
+ "[120/291] Writing tensor blk.20.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 106\n",
+ "[121/291] Writing tensor blk.20.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 106\n",
+ "[122/291] Writing tensor blk.20.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 107\n",
+ "[123/291] Writing tensor blk.20.ffn_norm.weight | size 4096 | type F32 | T+ 107\n",
+ "[124/291] Writing tensor blk.20.attn_k.weight | size 1024 x 4096 | type F16 | T+ 107\n",
+ "[125/291] Writing tensor blk.20.attn_output.weight | size 4096 x 4096 | type F16 | T+ 107\n",
+ "[126/291] Writing tensor blk.20.attn_q.weight | size 4096 x 4096 | type F16 | T+ 107\n",
+ "[127/291] Writing tensor blk.20.attn_v.weight | size 1024 x 4096 | type F16 | T+ 108\n",
+ "[128/291] Writing tensor blk.21.attn_norm.weight | size 4096 | type F32 | T+ 108\n",
+ "[129/291] Writing tensor blk.21.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 111\n",
+ "[130/291] Writing tensor blk.21.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 111\n",
+ "[131/291] Writing tensor blk.21.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 113\n",
+ "[132/291] Writing tensor blk.21.ffn_norm.weight | size 4096 | type F32 | T+ 113\n",
+ "[133/291] Writing tensor blk.21.attn_k.weight | size 1024 x 4096 | type F16 | T+ 113\n",
+ "[134/291] Writing tensor blk.21.attn_output.weight | size 4096 x 4096 | type F16 | T+ 113\n",
+ "[135/291] Writing tensor blk.21.attn_q.weight | size 4096 x 4096 | type F16 | T+ 116\n",
+ "[136/291] Writing tensor blk.21.attn_v.weight | size 1024 x 4096 | type F16 | T+ 116\n",
+ "[137/291] Writing tensor blk.22.attn_k.weight | size 1024 x 4096 | type F16 | T+ 116\n",
+ "[138/291] Writing tensor blk.22.attn_output.weight | size 4096 x 4096 | type F16 | T+ 116\n",
+ "[139/291] Writing tensor blk.22.attn_q.weight | size 4096 x 4096 | type F16 | T+ 118\n",
+ "[140/291] Writing tensor blk.22.attn_v.weight | size 1024 x 4096 | type F16 | T+ 118\n",
+ "[141/291] Writing tensor blk.3.attn_norm.weight | size 4096 | type F32 | T+ 118\n",
+ "[142/291] Writing tensor blk.3.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 120\n",
+ "[143/291] Writing tensor blk.3.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 120\n",
+ "[144/291] Writing tensor blk.3.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 121\n",
+ "[145/291] Writing tensor blk.3.ffn_norm.weight | size 4096 | type F32 | T+ 121\n",
+ "[146/291] Writing tensor blk.3.attn_k.weight | size 1024 x 4096 | type F16 | T+ 121\n",
+ "[147/291] Writing tensor blk.3.attn_output.weight | size 4096 x 4096 | type F16 | T+ 121\n",
+ "[148/291] Writing tensor blk.3.attn_q.weight | size 4096 x 4096 | type F16 | T+ 121\n",
+ "[149/291] Writing tensor blk.3.attn_v.weight | size 1024 x 4096 | type F16 | T+ 121\n",
+ "[150/291] Writing tensor blk.4.attn_norm.weight | size 4096 | type F32 | T+ 122\n",
+ "[151/291] Writing tensor blk.4.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 125\n",
+ "[152/291] Writing tensor blk.4.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 125\n",
+ "[153/291] Writing tensor blk.4.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 126\n",
+ "[154/291] Writing tensor blk.4.ffn_norm.weight | size 4096 | type F32 | T+ 131\n",
+ "[155/291] Writing tensor blk.4.attn_k.weight | size 1024 x 4096 | type F16 | T+ 131\n",
+ "[156/291] Writing tensor blk.4.attn_output.weight | size 4096 x 4096 | type F16 | T+ 131\n",
+ "[157/291] Writing tensor blk.4.attn_q.weight | size 4096 x 4096 | type F16 | T+ 131\n",
+ "[158/291] Writing tensor blk.4.attn_v.weight | size 1024 x 4096 | type F16 | T+ 131\n",
+ "[159/291] Writing tensor blk.5.attn_norm.weight | size 4096 | type F32 | T+ 131\n",
+ "[160/291] Writing tensor blk.5.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 139\n",
+ "[161/291] Writing tensor blk.5.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 139\n",
+ "[162/291] Writing tensor blk.5.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 140\n",
+ "[163/291] Writing tensor blk.5.ffn_norm.weight | size 4096 | type F32 | T+ 140\n",
+ "[164/291] Writing tensor blk.5.attn_k.weight | size 1024 x 4096 | type F16 | T+ 140\n",
+ "[165/291] Writing tensor blk.5.attn_output.weight | size 4096 x 4096 | type F16 | T+ 140\n",
+ "[166/291] Writing tensor blk.5.attn_q.weight | size 4096 x 4096 | type F16 | T+ 140\n",
+ "[167/291] Writing tensor blk.5.attn_v.weight | size 1024 x 4096 | type F16 | T+ 140\n",
+ "[168/291] Writing tensor blk.6.attn_norm.weight | size 4096 | type F32 | T+ 141\n",
+ "[169/291] Writing tensor blk.6.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 143\n",
+ "[170/291] Writing tensor blk.6.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 144\n",
+ "[171/291] Writing tensor blk.6.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 144\n",
+ "[172/291] Writing tensor blk.6.ffn_norm.weight | size 4096 | type F32 | T+ 144\n",
+ "[173/291] Writing tensor blk.6.attn_k.weight | size 1024 x 4096 | type F16 | T+ 144\n",
+ "[174/291] Writing tensor blk.6.attn_output.weight | size 4096 x 4096 | type F16 | T+ 144\n",
+ "[175/291] Writing tensor blk.6.attn_q.weight | size 4096 x 4096 | type F16 | T+ 145\n",
+ "[176/291] Writing tensor blk.6.attn_v.weight | size 1024 x 4096 | type F16 | T+ 145\n",
+ "[177/291] Writing tensor blk.7.attn_norm.weight | size 4096 | type F32 | T+ 145\n",
+ "[178/291] Writing tensor blk.7.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 148\n",
+ "[179/291] Writing tensor blk.7.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 151\n",
+ "[180/291] Writing tensor blk.7.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 152\n",
+ "[181/291] Writing tensor blk.7.ffn_norm.weight | size 4096 | type F32 | T+ 152\n",
+ "[182/291] Writing tensor blk.7.attn_k.weight | size 1024 x 4096 | type F16 | T+ 152\n",
+ "[183/291] Writing tensor blk.7.attn_output.weight | size 4096 x 4096 | type F16 | T+ 152\n",
+ "[184/291] Writing tensor blk.7.attn_q.weight | size 4096 x 4096 | type F16 | T+ 153\n",
+ "[185/291] Writing tensor blk.7.attn_v.weight | size 1024 x 4096 | type F16 | T+ 156\n",
+ "[186/291] Writing tensor blk.8.attn_norm.weight | size 4096 | type F32 | T+ 156\n",
+ "[187/291] Writing tensor blk.8.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 158\n",
+ "[188/291] Writing tensor blk.8.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 159\n",
+ "[189/291] Writing tensor blk.8.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 159\n",
+ "[190/291] Writing tensor blk.8.ffn_norm.weight | size 4096 | type F32 | T+ 162\n",
+ "[191/291] Writing tensor blk.8.attn_k.weight | size 1024 x 4096 | type F16 | T+ 162\n",
+ "[192/291] Writing tensor blk.8.attn_output.weight | size 4096 x 4096 | type F16 | T+ 162\n",
+ "[193/291] Writing tensor blk.8.attn_q.weight | size 4096 x 4096 | type F16 | T+ 162\n",
+ "[194/291] Writing tensor blk.8.attn_v.weight | size 1024 x 4096 | type F16 | T+ 163\n",
+ "[195/291] Writing tensor blk.9.attn_norm.weight | size 4096 | type F32 | T+ 163\n",
+ "[196/291] Writing tensor blk.9.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 165\n",
+ "[197/291] Writing tensor blk.9.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 168\n",
+ "[198/291] Writing tensor blk.9.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 168\n",
+ "[199/291] Writing tensor blk.9.ffn_norm.weight | size 4096 | type F32 | T+ 169\n",
+ "[200/291] Writing tensor blk.9.attn_k.weight | size 1024 x 4096 | type F16 | T+ 169\n",
+ "[201/291] Writing tensor blk.9.attn_output.weight | size 4096 x 4096 | type F16 | T+ 169\n",
+ "[202/291] Writing tensor blk.9.attn_q.weight | size 4096 x 4096 | type F16 | T+ 169\n",
+ "[203/291] Writing tensor blk.9.attn_v.weight | size 1024 x 4096 | type F16 | T+ 169\n",
+ "[204/291] Writing tensor output.weight | size 32000 x 4096 | type F16 | T+ 175\n",
+ "[205/291] Writing tensor blk.22.attn_norm.weight | size 4096 | type F32 | T+ 175\n",
+ "[206/291] Writing tensor blk.22.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 175\n",
+ "[207/291] Writing tensor blk.22.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 176\n",
+ "[208/291] Writing tensor blk.22.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 176\n",
+ "[209/291] Writing tensor blk.22.ffn_norm.weight | size 4096 | type F32 | T+ 177\n",
+ "[210/291] Writing tensor blk.23.attn_norm.weight | size 4096 | type F32 | T+ 177\n",
+ "[211/291] Writing tensor blk.23.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 177\n",
+ "[212/291] Writing tensor blk.23.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 179\n",
+ "[213/291] Writing tensor blk.23.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 179\n",
+ "[214/291] Writing tensor blk.23.ffn_norm.weight | size 4096 | type F32 | T+ 181\n",
+ "[215/291] Writing tensor blk.23.attn_k.weight | size 1024 x 4096 | type F16 | T+ 181\n",
+ "[216/291] Writing tensor blk.23.attn_output.weight | size 4096 x 4096 | type F16 | T+ 181\n",
+ "[217/291] Writing tensor blk.23.attn_q.weight | size 4096 x 4096 | type F16 | T+ 182\n",
+ "[218/291] Writing tensor blk.23.attn_v.weight | size 1024 x 4096 | type F16 | T+ 182\n",
+ "[219/291] Writing tensor blk.24.attn_norm.weight | size 4096 | type F32 | T+ 182\n",
+ "[220/291] Writing tensor blk.24.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 184\n",
+ "[221/291] Writing tensor blk.24.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 185\n",
+ "[222/291] Writing tensor blk.24.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 185\n",
+ "[223/291] Writing tensor blk.24.ffn_norm.weight | size 4096 | type F32 | T+ 185\n",
+ "[224/291] Writing tensor blk.24.attn_k.weight | size 1024 x 4096 | type F16 | T+ 185\n",
+ "[225/291] Writing tensor blk.24.attn_output.weight | size 4096 x 4096 | type F16 | T+ 185\n",
+ "[226/291] Writing tensor blk.24.attn_q.weight | size 4096 x 4096 | type F16 | T+ 185\n",
+ "[227/291] Writing tensor blk.24.attn_v.weight | size 1024 x 4096 | type F16 | T+ 186\n",
+ "[228/291] Writing tensor blk.25.attn_norm.weight | size 4096 | type F32 | T+ 186\n",
+ "[229/291] Writing tensor blk.25.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 189\n",
+ "[230/291] Writing tensor blk.25.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 189\n",
+ "[231/291] Writing tensor blk.25.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 190\n",
+ "[232/291] Writing tensor blk.25.ffn_norm.weight | size 4096 | type F32 | T+ 191\n",
+ "[233/291] Writing tensor blk.25.attn_k.weight | size 1024 x 4096 | type F16 | T+ 191\n",
+ "[234/291] Writing tensor blk.25.attn_output.weight | size 4096 x 4096 | type F16 | T+ 191\n",
+ "[235/291] Writing tensor blk.25.attn_q.weight | size 4096 x 4096 | type F16 | T+ 191\n",
+ "[236/291] Writing tensor blk.25.attn_v.weight | size 1024 x 4096 | type F16 | T+ 191\n",
+ "[237/291] Writing tensor blk.26.attn_norm.weight | size 4096 | type F32 | T+ 191\n",
+ "[238/291] Writing tensor blk.26.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 193\n",
+ "[239/291] Writing tensor blk.26.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 194\n",
+ "[240/291] Writing tensor blk.26.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 196\n",
+ "[241/291] Writing tensor blk.26.ffn_norm.weight | size 4096 | type F32 | T+ 197\n",
+ "[242/291] Writing tensor blk.26.attn_k.weight | size 1024 x 4096 | type F16 | T+ 197\n",
+ "[243/291] Writing tensor blk.26.attn_output.weight | size 4096 x 4096 | type F16 | T+ 197\n",
+ "[244/291] Writing tensor blk.26.attn_q.weight | size 4096 x 4096 | type F16 | T+ 197\n",
+ "[245/291] Writing tensor blk.26.attn_v.weight | size 1024 x 4096 | type F16 | T+ 197\n",
+ "[246/291] Writing tensor blk.27.attn_norm.weight | size 4096 | type F32 | T+ 197\n",
+ "[247/291] Writing tensor blk.27.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 204\n",
+ "[248/291] Writing tensor blk.27.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 204\n",
+ "[249/291] Writing tensor blk.27.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 205\n",
+ "[250/291] Writing tensor blk.27.ffn_norm.weight | size 4096 | type F32 | T+ 205\n",
+ "[251/291] Writing tensor blk.27.attn_k.weight | size 1024 x 4096 | type F16 | T+ 205\n",
+ "[252/291] Writing tensor blk.27.attn_output.weight | size 4096 x 4096 | type F16 | T+ 205\n",
+ "[253/291] Writing tensor blk.27.attn_q.weight | size 4096 x 4096 | type F16 | T+ 205\n",
+ "[254/291] Writing tensor blk.27.attn_v.weight | size 1024 x 4096 | type F16 | T+ 205\n",
+ "[255/291] Writing tensor blk.28.attn_norm.weight | size 4096 | type F32 | T+ 205\n",
+ "[256/291] Writing tensor blk.28.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 208\n",
+ "[257/291] Writing tensor blk.28.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 208\n",
+ "[258/291] Writing tensor blk.28.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 209\n",
+ "[259/291] Writing tensor blk.28.ffn_norm.weight | size 4096 | type F32 | T+ 209\n",
+ "[260/291] Writing tensor blk.28.attn_k.weight | size 1024 x 4096 | type F16 | T+ 209\n",
+ "[261/291] Writing tensor blk.28.attn_output.weight | size 4096 x 4096 | type F16 | T+ 209\n",
+ "[262/291] Writing tensor blk.28.attn_q.weight | size 4096 x 4096 | type F16 | T+ 209\n",
+ "[263/291] Writing tensor blk.28.attn_v.weight | size 1024 x 4096 | type F16 | T+ 209\n",
+ "[264/291] Writing tensor blk.29.attn_norm.weight | size 4096 | type F32 | T+ 209\n",
+ "[265/291] Writing tensor blk.29.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 213\n",
+ "[266/291] Writing tensor blk.29.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 213\n",
+ "[267/291] Writing tensor blk.29.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 214\n",
+ "[268/291] Writing tensor blk.29.ffn_norm.weight | size 4096 | type F32 | T+ 214\n",
+ "[269/291] Writing tensor blk.29.attn_k.weight | size 1024 x 4096 | type F16 | T+ 214\n",
+ "[270/291] Writing tensor blk.29.attn_output.weight | size 4096 x 4096 | type F16 | T+ 214\n",
+ "[271/291] Writing tensor blk.29.attn_q.weight | size 4096 x 4096 | type F16 | T+ 214\n",
+ "[272/291] Writing tensor blk.29.attn_v.weight | size 1024 x 4096 | type F16 | T+ 215\n",
+ "[273/291] Writing tensor blk.30.attn_norm.weight | size 4096 | type F32 | T+ 215\n",
+ "[274/291] Writing tensor blk.30.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 217\n",
+ "[275/291] Writing tensor blk.30.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 217\n",
+ "[276/291] Writing tensor blk.30.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 219\n",
+ "[277/291] Writing tensor blk.30.ffn_norm.weight | size 4096 | type F32 | T+ 219\n",
+ "[278/291] Writing tensor blk.30.attn_k.weight | size 1024 x 4096 | type F16 | T+ 219\n",
+ "[279/291] Writing tensor blk.30.attn_output.weight | size 4096 x 4096 | type F16 | T+ 220\n",
+ "[280/291] Writing tensor blk.30.attn_q.weight | size 4096 x 4096 | type F16 | T+ 220\n",
+ "[281/291] Writing tensor blk.30.attn_v.weight | size 1024 x 4096 | type F16 | T+ 220\n",
+ "[282/291] Writing tensor blk.31.attn_norm.weight | size 4096 | type F32 | T+ 220\n",
+ "[283/291] Writing tensor blk.31.ffn_down.weight | size 4096 x 14336 | type F16 | T+ 222\n",
+ "[284/291] Writing tensor blk.31.ffn_gate.weight | size 14336 x 4096 | type F16 | T+ 222\n",
+ "[285/291] Writing tensor blk.31.ffn_up.weight | size 14336 x 4096 | type F16 | T+ 223\n",
+ "[286/291] Writing tensor blk.31.ffn_norm.weight | size 4096 | type F32 | T+ 223\n",
+ "[287/291] Writing tensor blk.31.attn_k.weight | size 1024 x 4096 | type F16 | T+ 223\n",
+ "[288/291] Writing tensor blk.31.attn_output.weight | size 4096 x 4096 | type F16 | T+ 223\n",
+ "[289/291] Writing tensor blk.31.attn_q.weight | size 4096 x 4096 | type F16 | T+ 223\n",
+ "[290/291] Writing tensor blk.31.attn_v.weight | size 1024 x 4096 | type F16 | T+ 224\n",
+ "[291/291] Writing tensor output_norm.weight | size 4096 | type F32 | T+ 224\n",
+ "Wrote quantized_model/FP16.gguf\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import os"
+ ],
+ "metadata": {
+ "id": "EoRDFCnGQjYn"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "for m in methods:\n",
+ " qtype = f\"{quantized_path}/{m.upper()}.gguf\"\n",
+ " os.system(\"./llama.cpp/quantize \"+quantized_path+\"/FP16.gguf \"+qtype+\" \"+m)"
+ ],
+ "metadata": {
+ "id": "C_LwqQabQMuc"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "! ./llama.cpp/main -m ./quantized_model/Q4_K_M.gguf -n 90 --repeat_penalty 1.0 --color -i -r \"User:\" -f llama.cpp/prompts/chat-with-bob.txt"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "WYMk-4YdQZih",
+ "outputId": "a75105df-3dc5-4d76-ec2c-5301c1b09f4f"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Log start\n",
+ "main: build = 2491 (fa046eaf)\n",
+ "main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu\n",
+ "main: seed = 1711078618\n",
+ "llama_model_loader: loaded meta data with 23 key-value pairs and 291 tensors from ./quantized_model/Q4_K_M.gguf (version GGUF V3 (latest))\n",
+ "llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
+ "llama_model_loader: - kv 0: general.architecture str = llama\n",
+ "llama_model_loader: - kv 1: general.name str = .\n",
+ "llama_model_loader: - kv 2: llama.vocab_size u32 = 32000\n",
+ "llama_model_loader: - kv 3: llama.context_length u32 = 32768\n",
+ "llama_model_loader: - kv 4: llama.embedding_length u32 = 4096\n",
+ "llama_model_loader: - kv 5: llama.block_count u32 = 32\n",
+ "llama_model_loader: - kv 6: llama.feed_forward_length u32 = 14336\n",
+ "llama_model_loader: - kv 7: llama.rope.dimension_count u32 = 128\n",
+ "llama_model_loader: - kv 8: llama.attention.head_count u32 = 32\n",
+ "llama_model_loader: - kv 9: llama.attention.head_count_kv u32 = 8\n",
+ "llama_model_loader: - kv 10: llama.attention.layer_norm_rms_epsilon f32 = 0.000010\n",
+ "llama_model_loader: - kv 11: llama.rope.freq_base f32 = 10000.000000\n",
+ "llama_model_loader: - kv 12: general.file_type u32 = 15\n",
+ "llama_model_loader: - kv 13: tokenizer.ggml.model str = llama\n",
+ "llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,32000] = [\"\", \"\", \"\", \"<0x00>\", \"<...\n",
+ "llama_model_loader: - kv 15: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n",
+ "llama_model_loader: - kv 16: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n",
+ "llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 1\n",
+ "llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 2\n",
+ "llama_model_loader: - kv 19: tokenizer.ggml.unknown_token_id u32 = 0\n",
+ "llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true\n",
+ "llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false\n",
+ "llama_model_loader: - kv 22: general.quantization_version u32 = 2\n",
+ "llama_model_loader: - type f32: 65 tensors\n",
+ "llama_model_loader: - type q4_K: 193 tensors\n",
+ "llama_model_loader: - type q6_K: 33 tensors\n",
+ "llm_load_vocab: special tokens definition check successful ( 259/32000 ).\n",
+ "llm_load_print_meta: format = GGUF V3 (latest)\n",
+ "llm_load_print_meta: arch = llama\n",
+ "llm_load_print_meta: vocab type = SPM\n",
+ "llm_load_print_meta: n_vocab = 32000\n",
+ "llm_load_print_meta: n_merges = 0\n",
+ "llm_load_print_meta: n_ctx_train = 32768\n",
+ "llm_load_print_meta: n_embd = 4096\n",
+ "llm_load_print_meta: n_head = 32\n",
+ "llm_load_print_meta: n_head_kv = 8\n",
+ "llm_load_print_meta: n_layer = 32\n",
+ "llm_load_print_meta: n_rot = 128\n",
+ "llm_load_print_meta: n_embd_head_k = 128\n",
+ "llm_load_print_meta: n_embd_head_v = 128\n",
+ "llm_load_print_meta: n_gqa = 4\n",
+ "llm_load_print_meta: n_embd_k_gqa = 1024\n",
+ "llm_load_print_meta: n_embd_v_gqa = 1024\n",
+ "llm_load_print_meta: f_norm_eps = 0.0e+00\n",
+ "llm_load_print_meta: f_norm_rms_eps = 1.0e-05\n",
+ "llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
+ "llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
+ "llm_load_print_meta: f_logit_scale = 0.0e+00\n",
+ "llm_load_print_meta: n_ff = 14336\n",
+ "llm_load_print_meta: n_expert = 0\n",
+ "llm_load_print_meta: n_expert_used = 0\n",
+ "llm_load_print_meta: causal attn = 1\n",
+ "llm_load_print_meta: pooling type = 0\n",
+ "llm_load_print_meta: rope type = 0\n",
+ "llm_load_print_meta: rope scaling = linear\n",
+ "llm_load_print_meta: freq_base_train = 10000.0\n",
+ "llm_load_print_meta: freq_scale_train = 1\n",
+ "llm_load_print_meta: n_yarn_orig_ctx = 32768\n",
+ "llm_load_print_meta: rope_finetuned = unknown\n",
+ "llm_load_print_meta: ssm_d_conv = 0\n",
+ "llm_load_print_meta: ssm_d_inner = 0\n",
+ "llm_load_print_meta: ssm_d_state = 0\n",
+ "llm_load_print_meta: ssm_dt_rank = 0\n",
+ "llm_load_print_meta: model type = 7B\n",
+ "llm_load_print_meta: model ftype = Q4_K - Medium\n",
+ "llm_load_print_meta: model params = 7.24 B\n",
+ "llm_load_print_meta: model size = 4.07 GiB (4.83 BPW) \n",
+ "llm_load_print_meta: general.name = .\n",
+ "llm_load_print_meta: BOS token = 1 ''\n",
+ "llm_load_print_meta: EOS token = 2 ''\n",
+ "llm_load_print_meta: UNK token = 0 ''\n",
+ "llm_load_print_meta: LF token = 13 '<0x0A>'\n",
+ "ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no\n",
+ "ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes\n",
+ "ggml_cuda_init: found 1 CUDA devices:\n",
+ " Device 0: Tesla T4, compute capability 7.5, VMM: yes\n",
+ "llm_load_tensors: ggml ctx size = 0.11 MiB\n",
+ "llm_load_tensors: offloading 0 repeating layers to GPU\n",
+ "llm_load_tensors: offloaded 0/33 layers to GPU\n",
+ "llm_load_tensors: CPU buffer size = 4165.37 MiB\n",
+ "...............................................................................................\n",
+ "llama_new_context_with_model: n_ctx = 512\n",
+ "llama_new_context_with_model: n_batch = 512\n",
+ "llama_new_context_with_model: n_ubatch = 512\n",
+ "llama_new_context_with_model: freq_base = 10000.0\n",
+ "llama_new_context_with_model: freq_scale = 1\n",
+ "llama_kv_cache_init: CUDA_Host KV buffer size = 64.00 MiB\n",
+ "llama_new_context_with_model: KV self size = 64.00 MiB, K (f16): 32.00 MiB, V (f16): 32.00 MiB\n",
+ "llama_new_context_with_model: CUDA_Host output buffer size = 62.50 MiB\n",
+ "llama_new_context_with_model: CUDA0 compute buffer size = 173.04 MiB\n",
+ "llama_new_context_with_model: CUDA_Host compute buffer size = 9.00 MiB\n",
+ "llama_new_context_with_model: graph nodes = 1060\n",
+ "llama_new_context_with_model: graph splits = 356\n",
+ "\n",
+ "system_info: n_threads = 2 / 2 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | \n",
+ "main: interactive mode on.\n",
+ "Reverse prompt: 'User:'\n",
+ "sampling: \n",
+ "\trepeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n",
+ "\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n",
+ "\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
+ "sampling order: \n",
+ "CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
+ "generate: n_ctx = 512, n_batch = 2048, n_predict = 90, n_keep = 1\n",
+ "\n",
+ "\n",
+ "== Running in interactive mode. ==\n",
+ " - Press Ctrl+C to interject at any time.\n",
+ " - Press Return to return control to LLaMa.\n",
+ " - To return control without starting a new line, end your input with '/'.\n",
+ " - If you want to submit another line, end your input with '\\'.\n",
+ "\n",
+ "\u001b[33m Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\n",
+ "\n",
+ "User: Hello, Bob.\n",
+ "Bob: Hello. How may I help you today?\n",
+ "User: Please tell me the largest city in Europe.\n",
+ "Bob: Sure. The largest city in Europe is Moscow, the capital of Russia.\n",
+ "User:\u001b[0m\u001b[1m\u001b[32m|\n",
+ "\u001b[0mUser:\u001b[1m\u001b[32m\\\u001b[33m\b\\\b \b\n",
+ "\u001b[1m\u001b[32m/\u001b[33m\b/\b \b\u001b[0m Bob, I'm sorry.\n",
+ "Bob: No need to be sorry.\n",
+ "User:\u001b[1m\u001b[32m/\u001b[33m\b/\b \b\u001b[0m I'm from Russia.\n",
+ "Bob: Oh, I'm sorry then.\n",
+ "User:\u001b[1m\u001b[32m\u001b[0m\n",
+ "\n",
+ "\n",
+ "llama_print_timings: load time = 19409.42 ms\n",
+ "llama_print_timings: sample time = 2.15 ms / 43 runs ( 0.05 ms per token, 20027.95 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 551252.95 ms / 102 tokens ( 5404.44 ms per token, 0.19 tokens per second)\n",
+ "llama_print_timings: eval time = 27006.92 ms / 42 runs ( 643.02 ms per token, 1.56 tokens per second)\n",
+ "llama_print_timings: total time = 633744.17 ms / 144 tokens\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from huggingface_hub import notebook_login\n",
+ "notebook_login()"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 145,
+ "referenced_widgets": [
+ "8b582650a2bb4f0c8b23fec484455145",
+ "c0650664063e4ddda7614aa58f3b263e",
+ "3f7ca1727e2044eebfd9c624d0cb0a6b",
+ "ebd75acc454c439cbee44f87aad9d90f",
+ "ec10867538bc44b08e669eebc7acc587",
+ "41640c4d962743cf8b2c1d350f4088ed",
+ "c476742c654444bba8bf3256c57e17d4",
+ "44aaaaace1d7494a88a36e4d99301e4a",
+ "446cc3d328b74f6ea0ef83dc521fc43b",
+ "7ef3d07ae70c48889f523e70b9674df6",
+ "0c80d3ed00f5438f85c211c2f8f94fae",
+ "20fcc68df8044924a0534082febdc904",
+ "45c73e3c2c9b4a12b108d92a737da4dc",
+ "d2a042df2217420291701d88945b66a9",
+ "e8e425e4155949569e452e930b8149ab",
+ "ee5ae591712b4b0eb10763cbf819bf80",
+ "89ebd0e461b2425890446cf78a5d7581",
+ "a52f35475a4a423ebdb07c4ea20a64eb",
+ "3a516fa148244be3a0e520ebfac77f6f",
+ "9bee710e7117458c9cf4891c67730e15",
+ "b887ba97ceaa4ea6b5248ef24f001bee",
+ "fbe702574326465d9373ccee2bc10776",
+ "2e6564e983604376ab875e12654704f9",
+ "43372cd04dfc45a3a1d11130d5c569f6",
+ "720855a488684d54a69f6a2936e0dece",
+ "848c73ab6eca424f832ab7c8b8cecc9f",
+ "a152922e7048481eb6ced042f9adcc50",
+ "7629bacd5e454eed8cf4de5f7c09f9ae",
+ "f3c16713eda743cda445ee81b3e5a105",
+ "e60dcb146fb84525be11e6f0b4d4c3cb",
+ "9e97f9d91ca14e70afc15b2964f106a3",
+ "214ac890bb5a49f5bb2aa76de81ef351"
+ ]
+ },
+ "id": "fDqmhobmQ5zS",
+ "outputId": "c231e07f-1cd6-4182-d89b-469506a16adf"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "VBox(children=(HTML(value='