Upload granite-20b-code-instruct.Q8_0.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- granite-20b-code-instruct.Q8_0.gguf +3 -0
.gitattributes
CHANGED
|
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
imatrix.dat filter=lfs diff=lfs merge=lfs -text
|
| 37 |
granite-20b-code-instruct.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
imatrix.dat filter=lfs diff=lfs merge=lfs -text
|
| 37 |
granite-20b-code-instruct.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
granite-20b-code-instruct.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
granite-20b-code-instruct.Q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d16c217e648bfc5b1f2ec78ab18430a5a20b56d4393f47bb446d5ae0315986b6
|
| 3 |
+
size 21481183872
|